summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-class-ubi9
-rw-r--r--Documentation/DocBook/gpu.tmpl6
-rw-r--r--Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt2
-rw-r--r--Documentation/devicetree/bindings/mips/cpu_irq.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/atmel-nand.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/nand.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/max98371.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-rt5650.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt48
-rw-r--r--Documentation/devicetree/bindings/sound/tas571x.txt10
-rw-r--r--Documentation/devicetree/bindings/sound/tas5720.txt25
-rw-r--r--Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt12
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.txt1
-rw-r--r--Documentation/devicetree/bindings/thermal/tango-thermal.txt17
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt89
-rw-r--r--Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt4
-rw-r--r--Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt4
-rw-r--r--Documentation/filesystems/dax.txt32
-rw-r--r--Documentation/filesystems/directory-locking32
-rw-r--r--Documentation/filesystems/overlayfs.txt9
-rw-r--r--Documentation/filesystems/porting7
-rw-r--r--Documentation/hwmon/max344406
-rw-r--r--Documentation/infiniband/sysfs.txt12
-rw-r--r--Documentation/kbuild/kconfig-language.txt22
-rw-r--r--Documentation/scsi/tcm_qla2xxx.txt22
-rw-r--r--Documentation/sysctl/kernel.txt14
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py16
-rw-r--r--Documentation/thermal/sysfs-api.txt44
-rw-r--r--MAINTAINERS36
-rw-r--r--Makefile72
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/arc/kernel/perf_event.c6
-rw-r--r--arch/arm/boot/Makefile1
-rw-r--r--arch/arm/boot/bootp/Makefile3
-rw-r--r--arch/arm/include/asm/kvm_host.h6
-rw-r--r--arch/arm/include/asm/kvm_mmio.h3
-rw-r--r--arch/arm/kernel/perf_callchain.c10
-rw-r--r--arch/arm/kvm/Kconfig7
-rw-r--r--arch/arm/kvm/Makefile11
-rw-r--r--arch/arm/kvm/arm.c37
-rw-r--r--arch/arm/kvm/mmio.c24
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--arch/arm64/include/asm/kvm_host.h6
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h3
-rw-r--r--arch/arm64/kernel/perf_callchain.c14
-rw-r--r--arch/arm64/kvm/Kconfig7
-rw-r--r--arch/arm64/kvm/Makefile12
-rw-r--r--arch/arm64/kvm/inject_fault.c2
-rw-r--r--arch/h8300/Kconfig1
-rw-r--r--arch/h8300/boot/compressed/Makefile1
-rw-r--r--arch/h8300/include/asm/hash.h53
-rw-r--r--arch/ia64/Makefile6
-rw-r--r--arch/m32r/boot/compressed/Makefile1
-rw-r--r--arch/m68k/Kconfig.cpu1
-rw-r--r--arch/m68k/include/asm/hash.h59
-rw-r--r--arch/metag/kernel/perf_callchain.c10
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/hash.h81
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/mt7620a.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/rt2880.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/rt3050.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/rt3883.dtsi2
-rw-r--r--arch/mips/boot/dts/xilfpga/nexys4ddr.dts2
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/asmmacro.h99
-rw-r--r--arch/mips/include/asm/hazards.h8
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h2
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1300.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson_hwmon.h2
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h6
-rw-r--r--arch/mips/include/asm/mips_mt.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h188
-rw-r--r--arch/mips/include/asm/msa.h21
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow.h2
-rw-r--r--arch/mips/include/asm/sgi/hpc3.h2
-rw-r--r--arch/mips/kernel/branch.c4
-rw-r--r--arch/mips/kernel/cps-vec.S15
-rw-r--r--arch/mips/kernel/cpu-probe.c4
-rw-r--r--arch/mips/kernel/elf.c2
-rw-r--r--arch/mips/kernel/irq.c3
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c2
-rw-r--r--arch/mips/kernel/perf_event.c12
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/signal.c8
-rw-r--r--arch/mips/kernel/smp-cps.c8
-rw-r--r--arch/mips/lasat/picvue_proc.c4
-rw-r--r--arch/mips/lib/ashldi3.c2
-rw-r--r--arch/mips/lib/ashrdi3.c2
-rw-r--r--arch/mips/lib/bswapdi.c2
-rw-r--r--arch/mips/lib/bswapsi.c2
-rw-r--r--arch/mips/lib/cmpdi2.c2
-rw-r--r--arch/mips/lib/lshrdi3.c2
-rw-r--r--arch/mips/lib/memcpy.S2
-rw-r--r--arch/mips/lib/ucmpdi2.c2
-rw-r--r--arch/mips/loongson64/loongson-3/hpet.c2
-rw-r--r--arch/mips/math-emu/dsemul.c4
-rw-r--r--arch/mips/mm/tlbex.c22
-rw-r--r--arch/mips/oprofile/op_impl.h2
-rw-r--r--arch/mips/pci/ops-bridge.c4
-rw-r--r--arch/mips/pistachio/init.c8
-rw-r--r--arch/mips/ralink/mt7620.c112
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c2
-rw-r--r--arch/mips/sni/rm200.c2
-rw-r--r--arch/mips/vdso/Makefile4
-rw-r--r--arch/mips/vr41xx/common/cmu.c2
-rw-r--r--arch/mn10300/boot/compressed/Makefile1
-rw-r--r--arch/nios2/boot/compressed/Makefile1
-rw-r--r--arch/powerpc/perf/callchain.c20
-rw-r--r--arch/powerpc/sysdev/axonram.c2
-rw-r--r--arch/s390/boot/compressed/Makefile1
-rw-r--r--arch/s390/configs/default_defconfig44
-rw-r--r--arch/s390/configs/gcov_defconfig34
-rw-r--r--arch/s390/configs/performance_defconfig36
-rw-r--r--arch/s390/configs/zfcpdump_defconfig4
-rw-r--r--arch/s390/defconfig44
-rw-r--r--arch/s390/kernel/perf_event.c4
-rw-r--r--arch/s390/mm/fault.c1
-rw-r--r--arch/s390/net/bpf_jit.h4
-rw-r--r--arch/s390/net/bpf_jit_comp.c4
-rw-r--r--arch/sh/boot/compressed/Makefile1
-rw-r--r--arch/sh/boot/romimage/Makefile1
-rw-r--r--arch/sh/kernel/perf_callchain.c4
-rw-r--r--arch/sparc/kernel/perf_event.c14
-rw-r--r--arch/tile/kernel/perf_event.c6
-rw-r--r--arch/um/include/shared/registers.h2
-rw-r--r--arch/um/kernel/process.c2
-rw-r--r--arch/um/os-Linux/signal.c28
-rw-r--r--arch/unicore32/boot/Makefile2
-rw-r--r--arch/unicore32/boot/compressed/Makefile1
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/entry/thunk_64.S11
-rw-r--r--arch/x86/entry/vdso/Makefile4
-rw-r--r--arch/x86/events/core.c14
-rw-r--r--arch/x86/events/intel/p4.c2
-rw-r--r--arch/x86/events/intel/uncore.c2
-rw-r--r--arch/x86/ia32/ia32_aout.c17
-rw-r--r--arch/x86/include/asm/bugs.h8
-rw-r--r--arch/x86/include/asm/cpufeature.h12
-rw-r--r--arch/x86/include/asm/disabled-features.h6
-rw-r--r--arch/x86/include/asm/intel_telemetry.h2
-rw-r--r--arch/x86/include/asm/pmc_core.h27
-rw-r--r--arch/x86/include/uapi/asm/svm.h44
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/intel.c37
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/ptrace.c4
-rw-r--r--arch/x86/kernel/tsc_msr.c3
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/vmx.c48
-rw-r--r--arch/x86/mm/fault.c11
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S9
-rw-r--r--arch/x86/purgatory/Makefile2
-rw-r--r--arch/x86/realmode/rm/Makefile1
-rw-r--r--arch/x86/um/os-Linux/registers.c49
-rw-r--r--arch/x86/um/ptrace_32.c5
-rw-r--r--arch/x86/um/ptrace_64.c16
-rw-r--r--arch/x86/um/shared/sysdep/ptrace_64.h4
-rw-r--r--arch/x86/um/signal.c37
-rw-r--r--arch/x86/um/user-offsets.c2
-rw-r--r--arch/xtensa/kernel/perf_event.c10
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/ioctl.c1
-rw-r--r--crypto/asymmetric_keys/Kconfig1
-rw-r--r--drivers/acpi/acpi_dbg.c22
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/device_pm.c1
-rw-r--r--drivers/ata/sata_highbank.c2
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/rbd.c305
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/cpufreq/cpufreq.c84
-rw-r--r--drivers/cpufreq/intel_pstate.c11
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c11
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle.c2
-rw-r--r--drivers/crypto/caam/ctrl.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c17
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/dma/sun4i-dma.c16
-rw-r--r--drivers/gpio/gpio-lpc32xx.c48
-rw-r--r--drivers/gpio/gpio-xlp.c2
-rw-r--r--drivers/gpio/gpiolib.c51
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c45
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c38
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c44
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c2
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c366
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c36
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c16
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c141
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c104
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c146
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c55
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c207
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h12
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c8
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c5
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/lm75.c10
-rw-r--r--drivers/hwmon/ntc_thermistor.c12
-rw-r--r--drivers/hwmon/scpi-hwmon.c48
-rw-r--r--drivers/hwmon/tmp102.c8
-rw-r--r--drivers/i2c/busses/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c2
-rw-r--r--drivers/i2c/i2c-dev.c25
-rw-r--r--drivers/infiniband/Kconfig2
-rw-r--r--drivers/infiniband/core/Makefile12
-rw-r--r--drivers/infiniband/core/addr.c226
-rw-r--r--drivers/infiniband/core/core_priv.h16
-rw-r--r--drivers/infiniband/core/device.c58
-rw-r--r--drivers/infiniband/core/mad.c13
-rw-r--r--drivers/infiniband/core/multicast.c23
-rw-r--r--drivers/infiniband/core/sa_query.c211
-rw-r--r--drivers/infiniband/core/sysfs.c366
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c147
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c58
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig (renamed from drivers/staging/rdma/hfi1/Kconfig)0
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile (renamed from drivers/staging/rdma/hfi1/Makefile)2
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c (renamed from drivers/staging/rdma/hfi1/affinity.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h (renamed from drivers/staging/rdma/hfi1/affinity.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.h (renamed from drivers/staging/rdma/hfi1/aspm.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c (renamed from drivers/staging/rdma/hfi1/chip.c)41
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h (renamed from drivers/staging/rdma/hfi1/chip.h)6
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h (renamed from drivers/staging/rdma/hfi1/chip_registers.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/common.h (renamed from drivers/staging/rdma/hfi1/common.h)5
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c (renamed from drivers/staging/rdma/hfi1/debugfs.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.h (renamed from drivers/staging/rdma/hfi1/debugfs.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/device.c (renamed from drivers/staging/rdma/hfi1/device.c)18
-rw-r--r--drivers/infiniband/hw/hfi1/device.h (renamed from drivers/staging/rdma/hfi1/device.h)3
-rw-r--r--drivers/infiniband/hw/hfi1/dma.c (renamed from drivers/staging/rdma/hfi1/dma.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c (renamed from drivers/staging/rdma/hfi1/driver.c)2
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.c (renamed from drivers/staging/rdma/hfi1/efivar.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.h (renamed from drivers/staging/rdma/hfi1/efivar.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c102
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.h (renamed from drivers/staging/rdma/hfi1/eprom.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c (renamed from drivers/staging/rdma/hfi1/file_ops.c)549
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c (renamed from drivers/staging/rdma/hfi1/firmware.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h (renamed from drivers/staging/rdma/hfi1/hfi.h)7
-rw-r--r--drivers/infiniband/hw/hfi1/init.c (renamed from drivers/staging/rdma/hfi1/init.c)22
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c (renamed from drivers/staging/rdma/hfi1/intr.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h (renamed from drivers/staging/rdma/hfi1/iowait.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c (renamed from drivers/staging/rdma/hfi1/mad.c)99
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h (renamed from drivers/staging/rdma/hfi1/mad.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c (renamed from drivers/staging/rdma/hfi1/mmu_rb.c)22
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h (renamed from drivers/staging/rdma/hfi1/mmu_rb.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/opa_compat.h (renamed from drivers/staging/rdma/hfi1/opa_compat.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c (renamed from drivers/staging/rdma/hfi1/pcie.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c (renamed from drivers/staging/rdma/hfi1/pio.c)3
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h (renamed from drivers/staging/rdma/hfi1/pio.h)8
-rw-r--r--drivers/infiniband/hw/hfi1/pio_copy.c (renamed from drivers/staging/rdma/hfi1/pio_copy.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c (renamed from drivers/staging/rdma/hfi1/platform.c)27
-rw-r--r--drivers/infiniband/hw/hfi1/platform.h (renamed from drivers/staging/rdma/hfi1/platform.h)1
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c (renamed from drivers/staging/rdma/hfi1/qp.c)9
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h (renamed from drivers/staging/rdma/hfi1/qp.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.c (renamed from drivers/staging/rdma/hfi1/qsfp.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/qsfp.h (renamed from drivers/staging/rdma/hfi1/qsfp.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c (renamed from drivers/staging/rdma/hfi1/rc.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c (renamed from drivers/staging/rdma/hfi1/ruc.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c (renamed from drivers/staging/rdma/hfi1/sdma.c)4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h (renamed from drivers/staging/rdma/hfi1/sdma.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/sdma_txreq.h (renamed from drivers/staging/rdma/hfi1/sdma_txreq.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c (renamed from drivers/staging/rdma/hfi1/sysfs.c)4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c (renamed from drivers/staging/rdma/hfi1/trace.c)8
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h (renamed from drivers/staging/rdma/hfi1/trace.h)5
-rw-r--r--drivers/infiniband/hw/hfi1/twsi.c (renamed from drivers/staging/rdma/hfi1/twsi.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/twsi.h (renamed from drivers/staging/rdma/hfi1/twsi.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c (renamed from drivers/staging/rdma/hfi1/uc.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c (renamed from drivers/staging/rdma/hfi1/ud.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c (renamed from drivers/staging/rdma/hfi1/user_exp_rcv.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h (renamed from drivers/staging/rdma/hfi1/user_exp_rcv.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c (renamed from drivers/staging/rdma/hfi1/user_pages.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c (renamed from drivers/staging/rdma/hfi1/user_sdma.c)18
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h (renamed from drivers/staging/rdma/hfi1/user_sdma.h)0
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c (renamed from drivers/staging/rdma/hfi1/verbs.c)4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h (renamed from drivers/staging/rdma/hfi1/verbs.h)1
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c (renamed from drivers/staging/rdma/hfi1/verbs_txreq.c)0
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h (renamed from drivers/staging/rdma/hfi1/verbs_txreq.h)0
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c145
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h1
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c1
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c4
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c30
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c109
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c140
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c48
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c2
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c11
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c11
-rw-r--r--drivers/input/joystick/xpad.c18
-rw-r--r--drivers/input/misc/pwm-beeper.c69
-rw-r--r--drivers/input/misc/uinput.c6
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c9
-rw-r--r--drivers/iommu/arm-smmu-v3.c18
-rw-r--r--drivers/iommu/arm-smmu.c8
-rw-r--r--drivers/iommu/intel-iommu.c318
-rw-r--r--drivers/iommu/iova.c417
-rw-r--r--drivers/irqchip/irq-clps711x.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c10
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/writeback.c3
-rw-r--r--drivers/media/i2c/adp1653.c10
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c2
-rw-r--r--drivers/mfd/twl4030-irq.c2
-rw-r--r--drivers/mmc/card/block.c24
-rw-r--r--drivers/mmc/core/core.c4
-rw-r--r--drivers/mmc/core/host.c24
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/host/dw_mmc-rockchip.c72
-rw-r--r--drivers/mmc/host/dw_mmc.c12
-rw-r--r--drivers/mmc/host/sdhci-acpi.c13
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c2
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c5
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c35
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h3
-rw-r--r--drivers/mtd/ubi/build.c5
-rw-r--r--drivers/mtd/ubi/debug.c3
-rw-r--r--drivers/mtd/ubi/eba.c47
-rw-r--r--drivers/mtd/ubi/fastmap.c1
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/vmt.c2
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/nvdimm/pmem.c10
-rw-r--r--drivers/nvme/host/core.c37
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c23
-rw-r--r--drivers/nvmem/core.c22
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c10
-rw-r--r--drivers/platform/chrome/Kconfig10
-rw-r--r--drivers/platform/chrome/Makefile15
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c22
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c55
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c7
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c10
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c4
-rw-r--r--drivers/platform/chrome/cros_kbd_led_backlight.c122
-rw-r--r--drivers/platform/x86/Kconfig12
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-laptop.c15
-rw-r--r--drivers/platform/x86/asus-wmi.c5
-rw-r--r--drivers/platform/x86/dell-rbtn.c56
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c63
-rw-r--r--drivers/platform/x86/ideapad-laptop.c19
-rw-r--r--drivers/platform/x86/intel_menlow.c49
-rw-r--r--drivers/platform/x86/intel_pmc_core.c200
-rw-r--r--drivers/platform/x86/intel_pmc_core.h51
-rw-r--r--drivers/platform/x86/intel_telemetry_core.c6
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c3
-rw-r--r--drivers/platform/x86/surfacepro3_button.c9
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c43
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/linit.c11
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/qla2xxx/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c56
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c59
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/sd.c9
-rw-r--r--drivers/spi/spi-ep93xx.c2
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c6
-rw-r--r--drivers/staging/rdma/Kconfig27
-rw-r--r--drivers/staging/rdma/Makefile2
-rw-r--r--drivers/staging/rdma/hfi1/TODO6
-rw-r--r--drivers/staging/rdma/hfi1/diag.c1925
-rw-r--r--drivers/staging/rdma/hfi1/eprom.c471
-rw-r--r--drivers/target/iscsi/Kconfig2
-rw-r--r--drivers/target/iscsi/Makefile1
-rw-r--r--drivers/target/iscsi/cxgbit/Kconfig7
-rw-r--r--drivers/target/iscsi/cxgbit/Makefile6
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit.h353
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c2086
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_ddp.c325
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_lro.h72
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c702
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_target.c1561
-rw-r--r--drivers/target/iscsi/iscsi_target.c701
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c158
-rw-r--r--drivers/target/iscsi/iscsi_target_datain_values.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c19
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c1
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c5
-rw-r--r--drivers/target/loopback/tcm_loop.c12
-rw-r--r--drivers/target/sbp/sbp_target.c12
-rw-r--r--drivers/target/target_core_alua.c6
-rw-r--r--drivers/target/target_core_configfs.c70
-rw-r--r--drivers/target/target_core_internal.h6
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_rd.c4
-rw-r--r--drivers/target/target_core_tpg.c83
-rw-r--r--drivers/target/target_core_transport.c26
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h1
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c1
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c12
-rw-r--r--drivers/thermal/Kconfig31
-rw-r--r--drivers/thermal/Makefile4
-rw-r--r--drivers/thermal/gov_bang_bang.c8
-rw-r--r--drivers/thermal/hisi_thermal.c45
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c108
-rw-r--r--drivers/thermal/intel_powerclamp.c47
-rw-r--r--drivers/thermal/mtk_thermal.c12
-rw-r--r--drivers/thermal/of-thermal.c10
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c3
-rw-r--r--drivers/thermal/rcar_thermal.c2
-rw-r--r--drivers/thermal/rockchip_thermal.c280
-rw-r--r--drivers/thermal/tango_thermal.c109
-rw-r--r--drivers/thermal/tegra/Kconfig13
-rw-r--r--drivers/thermal/tegra/Makefile6
-rw-r--r--drivers/thermal/tegra/soctherm-fuse.c169
-rw-r--r--drivers/thermal/tegra/soctherm.c685
-rw-r--r--drivers/thermal/tegra/soctherm.h127
-rw-r--r--drivers/thermal/tegra/tegra124-soctherm.c196
-rw-r--r--drivers/thermal/tegra/tegra132-soctherm.c196
-rw-r--r--drivers/thermal/tegra/tegra210-soctherm.c197
-rw-r--r--drivers/thermal/tegra_soctherm.c476
-rw-r--r--drivers/thermal/thermal-generic-adc.c182
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c5
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/sprd_serial.c2
-rw-r--r--drivers/usb/gadget/function/f_tcm.c11
-rw-r--r--drivers/vhost/scsi.c12
-rw-r--r--drivers/video/fbdev/da8xx-fb.c4
-rw-r--r--drivers/xen/xen-scsiback.c11
-rw-r--r--fs/9p/acl.c6
-rw-r--r--fs/9p/xattr.c5
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/affs/super.c5
-rw-r--r--fs/afs/write.c4
-rw-r--r--fs/bad_inode.c4
-rw-r--r--fs/binfmt_aout.c19
-rw-r--r--fs/binfmt_elf.c11
-rw-r--r--fs/binfmt_flat.c6
-rw-r--r--fs/block_dev.c114
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/btrfs_inode.h2
-rw-r--r--fs/btrfs/check-integrity.c2
-rw-r--r--fs/btrfs/ctree.c14
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/delayed-ref.h2
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c14
-rw-r--r--fs/btrfs/extent-tree.c48
-rw-r--r--fs/btrfs/extent_io.c94
-rw-r--r--fs/btrfs/extent_io.h34
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file-item.c2
-rw-r--r--fs/btrfs/file.c31
-rw-r--r--fs/btrfs/free-space-cache.c2
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/inode.c27
-rw-r--r--fs/btrfs/ioctl.c30
-rw-r--r--fs/btrfs/ordered-data.h2
-rw-r--r--fs/btrfs/qgroup.c24
-rw-r--r--fs/btrfs/raid56.c6
-rw-r--r--fs/btrfs/relocation.c19
-rw-r--r--fs/btrfs/root-tree.c4
-rw-r--r--fs/btrfs/scrub.c11
-rw-r--r--fs/btrfs/send.c6
-rw-r--r--fs/btrfs/struct-funcs.c2
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/btrfs/tests/extent-io-tests.c10
-rw-r--r--fs/btrfs/tests/free-space-tests.c7
-rw-r--r--fs/btrfs/tests/inode-tests.c2
-rw-r--r--fs/btrfs/tests/qgroup-tests.c2
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c10
-rw-r--r--fs/btrfs/ulist.c2
-rw-r--r--fs/btrfs/volumes.c12
-rw-r--r--fs/btrfs/xattr.c12
-rw-r--r--fs/ceph/addr.c214
-rw-r--r--fs/ceph/cache.c2
-rw-r--r--fs/ceph/caps.c51
-rw-r--r--fs/ceph/debugfs.c2
-rw-r--r--fs/ceph/dir.c376
-rw-r--r--fs/ceph/file.c89
-rw-r--r--fs/ceph/inode.c159
-rw-r--r--fs/ceph/ioctl.c14
-rw-r--r--fs/ceph/mds_client.c140
-rw-r--r--fs/ceph/mds_client.h17
-rw-r--r--fs/ceph/mdsmap.c43
-rw-r--r--fs/ceph/super.c47
-rw-r--r--fs/ceph/super.h12
-rw-r--r--fs/ceph/xattr.c32
-rw-r--r--fs/cifs/xattr.c9
-rw-r--r--fs/dax.c845
-rw-r--r--fs/dcache.c3
-rw-r--r--fs/direct-io.c14
-rw-r--r--fs/ecryptfs/crypto.c9
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h4
-rw-r--r--fs/ecryptfs/inode.c7
-rw-r--r--fs/ecryptfs/mmap.c3
-rw-r--r--fs/ext2/file.c4
-rw-r--r--fs/ext2/inode.c12
-rw-r--r--fs/ext2/super.c11
-rw-r--r--fs/ext2/xattr_security.c7
-rw-r--r--fs/ext2/xattr_trusted.c7
-rw-r--r--fs/ext2/xattr_user.c9
-rw-r--r--fs/ext4/file.c4
-rw-r--r--fs/ext4/super.c11
-rw-r--r--fs/ext4/xattr_security.c7
-rw-r--r--fs/ext4/xattr_trusted.c7
-rw-r--r--fs/ext4/xattr_user.c9
-rw-r--r--fs/f2fs/xattr.c12
-rw-r--r--fs/fuse/dir.c6
-rw-r--r--fs/gfs2/dir.c15
-rw-r--r--fs/gfs2/xattr.c6
-rw-r--r--fs/hfs/attr.c6
-rw-r--r--fs/hfs/hfs_fs.h2
-rw-r--r--fs/hfsplus/xattr.c12
-rw-r--r--fs/hfsplus/xattr.h2
-rw-r--r--fs/hfsplus/xattr_security.c7
-rw-r--r--fs/hfsplus/xattr_trusted.c7
-rw-r--r--fs/hfsplus/xattr_user.c7
-rw-r--r--fs/hpfs/super.c42
-rw-r--r--fs/jffs2/security.c7
-rw-r--r--fs/jffs2/xattr_trusted.c7
-rw-r--r--fs/jffs2/xattr_user.c7
-rw-r--r--fs/jfs/xattr.c14
-rw-r--r--fs/kernfs/inode.c11
-rw-r--r--fs/kernfs/kernfs-internal.h3
-rw-r--r--fs/libfs.c5
-rw-r--r--fs/namei.c163
-rw-r--r--fs/nfs/callback_proc.c9
-rw-r--r--fs/nfs/callback_xdr.c17
-rw-r--r--fs/nfs/delegation.c9
-rw-r--r--fs/nfs/delegation.h2
-rw-r--r--fs/nfs/direct.c17
-rw-r--r--fs/nfs/filelayout/filelayout.c6
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c200
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.h17
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c119
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs42.h1
-rw-r--r--fs/nfs/nfs42proc.c107
-rw-r--r--fs/nfs/nfs42xdr.c146
-rw-r--r--fs/nfs/nfs4_fs.h12
-rw-r--r--fs/nfs/nfs4file.c23
-rw-r--r--fs/nfs/nfs4proc.c204
-rw-r--r--fs/nfs/nfs4state.c18
-rw-r--r--fs/nfs/nfs4trace.h10
-rw-r--r--fs/nfs/nfs4xdr.c43
-rw-r--r--fs/nfs/pagelist.c6
-rw-r--r--fs/nfs/pnfs.c349
-rw-r--r--fs/nfs/pnfs.h17
-rw-r--r--fs/nfs/pnfs_nfs.c60
-rw-r--r--fs/nfs/super.c9
-rw-r--r--fs/nfs/write.c64
-rw-r--r--fs/ocfs2/cluster/heartbeat.c180
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h5
-rw-r--r--fs/ocfs2/inode.c7
-rw-r--r--fs/ocfs2/xattr.c23
-rw-r--r--fs/orangefs/xattr.c10
-rw-r--r--fs/overlayfs/copy_up.c26
-rw-r--r--fs/overlayfs/dir.c67
-rw-r--r--fs/overlayfs/inode.c5
-rw-r--r--fs/overlayfs/overlayfs.h6
-rw-r--r--fs/overlayfs/readdir.c18
-rw-r--r--fs/overlayfs/super.c37
-rw-r--r--fs/posix_acl.c6
-rw-r--r--fs/readdir.c12
-rw-r--r--fs/reiserfs/xattr_security.c9
-rw-r--r--fs/reiserfs/xattr_trusted.c9
-rw-r--r--fs/reiserfs/xattr_user.c9
-rw-r--r--fs/ubifs/debug.c2
-rw-r--r--fs/ubifs/xattr.c7
-rw-r--r--fs/xattr.c13
-rw-r--r--fs/xfs/kmem.c26
-rw-r--r--fs/xfs/kmem.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr.c58
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c22
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c9
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c99
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h1
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h5
-rw-r--r--fs/xfs/libxfs/xfs_sb.c8
-rw-r--r--fs/xfs/libxfs/xfs_shared.h102
-rw-r--r--fs/xfs/xfs_aops.c353
-rw-r--r--fs/xfs/xfs_aops.h15
-rw-r--r--fs/xfs/xfs_attr.h4
-rw-r--r--fs/xfs/xfs_attr_inactive.c16
-rw-r--r--fs/xfs/xfs_attr_list.c85
-rw-r--r--fs/xfs/xfs_bmap_util.c60
-rw-r--r--fs/xfs/xfs_buf.c12
-rw-r--r--fs/xfs/xfs_buf.h20
-rw-r--r--fs/xfs/xfs_buf_item.c121
-rw-r--r--fs/xfs/xfs_dquot.c9
-rw-r--r--fs/xfs/xfs_file.c15
-rw-r--r--fs/xfs/xfs_fsops.c14
-rw-r--r--fs/xfs/xfs_icache.c290
-rw-r--r--fs/xfs/xfs_inode.c167
-rw-r--r--fs/xfs/xfs_inode.h5
-rw-r--r--fs/xfs/xfs_inode_item.c6
-rw-r--r--fs/xfs/xfs_ioctl.c31
-rw-r--r--fs/xfs/xfs_iomap.c53
-rw-r--r--fs/xfs/xfs_iops.c117
-rw-r--r--fs/xfs/xfs_log.c62
-rw-r--r--fs/xfs/xfs_log.h3
-rw-r--r--fs/xfs/xfs_log_cil.c1
-rw-r--r--fs/xfs/xfs_log_priv.h1
-rw-r--r--fs/xfs/xfs_log_recover.c12
-rw-r--r--fs/xfs/xfs_mount.c23
-rw-r--r--fs/xfs/xfs_mount.h34
-rw-r--r--fs/xfs/xfs_pnfs.c7
-rw-r--r--fs/xfs/xfs_qm.c9
-rw-r--r--fs/xfs/xfs_qm_syscalls.c26
-rw-r--r--fs/xfs/xfs_rtalloc.c21
-rw-r--r--fs/xfs/xfs_super.c77
-rw-r--r--fs/xfs/xfs_symlink.c37
-rw-r--r--fs/xfs/xfs_sysfs.c291
-rw-r--r--fs/xfs/xfs_sysfs.h3
-rw-r--r--fs/xfs/xfs_trace.h16
-rw-r--r--fs/xfs/xfs_trans.c88
-rw-r--r--fs/xfs/xfs_trans.h8
-rw-r--r--fs/xfs/xfs_xattr.c26
-rw-r--r--include/asm-generic/preempt.h4
-rw-r--r--include/drm/drm_dp_dual_mode_helper.h92
-rw-r--r--include/dt-bindings/thermal/tegra124-soctherm.h1
-rw-r--r--include/kvm/arm_arch_timer.h11
-rw-r--r--include/kvm/arm_vgic.h20
-rw-r--r--include/kvm/vgic/vgic.h246
-rw-r--r--include/linux/blkdev.h15
-rw-r--r--include/linux/ceph/ceph_frag.h4
-rw-r--r--include/linux/ceph/ceph_fs.h20
-rw-r--r--include/linux/ceph/decode.h2
-rw-r--r--include/linux/ceph/libceph.h57
-rw-r--r--include/linux/ceph/mon_client.h23
-rw-r--r--include/linux/ceph/osd_client.h231
-rw-r--r--include/linux/ceph/osdmap.h158
-rw-r--r--include/linux/ceph/rados.h34
-rw-r--r--include/linux/dax.h41
-rw-r--r--include/linux/dcache.h27
-rw-r--r--include/linux/err.h2
-rw-r--r--include/linux/errno.h1
-rw-r--r--include/linux/export.h33
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/hash.h108
-rw-r--r--include/linux/iova.h23
-rw-r--r--include/linux/irqchip/arm-gic-v3.h6
-rw-r--r--include/linux/irqchip/arm-gic.h2
-rw-r--r--include/linux/kvm_host.h7
-rw-r--r--include/linux/memory_hotplug.h2
-rw-r--r--include/linux/mfd/cros_ec.h6
-rw-r--r--include/linux/mfd/twl6040.h1
-rw-r--r--include/linux/mm.h9
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmc/host.h4
-rw-r--r--include/linux/nfs4.h28
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h30
-rw-r--r--include/linux/perf_event.h34
-rw-r--r--include/linux/platform_data/at24.h2
-rw-r--r--include/linux/rwsem.h2
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/seqlock.h4
-rw-r--r--include/linux/slub_def.h16
-rw-r--r--include/linux/stringhash.h76
-rw-r--r--include/linux/sunrpc/auth.h26
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/msg_prot.h4
-rw-r--r--include/linux/sunrpc/svcauth.h40
-rw-r--r--include/linux/sunrpc/xprt.h1
-rw-r--r--include/linux/sunrpc/xprtrdma.h4
-rw-r--r--include/linux/thermal.h1
-rw-r--r--include/linux/xattr.h7
-rw-r--r--include/rdma/ib_mad.h60
-rw-r--r--include/rdma/ib_pack.h5
-rw-r--r--include/rdma/ib_sa.h12
-rw-r--r--include/rdma/ib_verbs.h126
-rw-r--r--include/rdma/rdma_vt.h13
-rw-r--r--include/rdma/rdmavt_qp.h5
-rw-r--r--include/target/iscsi/iscsi_target_core.h27
-rw-r--r--include/target/iscsi/iscsi_transport.h41
-rw-r--r--include/target/target_core_base.h2
-rw-r--r--include/target/target_core_fabric.h6
-rw-r--r--include/trace/events/kvm.h6
-rw-r--r--include/uapi/linux/nvme_ioctl.h1
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h80
-rw-r--r--include/uapi/rdma/rdma_netlink.h10
-rw-r--r--include/uapi/sound/asoc.h44
-rw-r--r--include/video/imx-ipu-v3.h2
-rw-r--r--init/Kconfig29
-rw-r--r--init/main.c3
-rw-r--r--kernel/bpf/stackmap.c3
-rw-r--r--kernel/events/callchain.c36
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/gcov/Kconfig1
-rw-r--r--kernel/locking/rwsem.c16
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/cpufreq_schedutil.c4
-rw-r--r--kernel/sysctl.c11
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/Makefile2
-rw-r--r--lib/dma-debug.c2
-rw-r--r--lib/iov_iter.c2
-rw-r--r--lib/test_hash.c250
-rw-r--r--lib/test_uuid.c133
-rw-r--r--lib/uuid.c4
-rw-r--r--mm/Kconfig3
-rw-r--r--mm/cma.c7
-rw-r--r--mm/filemap.c30
-rw-r--r--mm/kasan/kasan.h1
-rw-r--r--mm/memcontrol.c41
-rw-r--r--mm/memory.c40
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--mm/mmap.c16
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/oom_kill.c32
-rw-r--r--mm/page_ext.c4
-rw-r--r--mm/rmap.c2
-rw-r--r--mm/shmem.c7
-rw-r--r--mm/truncate.c62
-rw-r--r--mm/zsmalloc.c35
-rw-r--r--net/9p/client.c8
-rw-r--r--net/ceph/ceph_common.c2
-rw-r--r--net/ceph/ceph_strings.c16
-rw-r--r--net/ceph/debugfs.c147
-rw-r--r--net/ceph/mon_client.c393
-rw-r--r--net/ceph/osd_client.c4032
-rw-r--r--net/ceph/osdmap.c651
-rw-r--r--net/sunrpc/auth.c9
-rw-r--r--net/sunrpc/auth_generic.c13
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c6
-rw-r--r--net/sunrpc/auth_unix.c6
-rw-r--r--net/sunrpc/clnt.c17
-rw-r--r--net/sunrpc/xdr.c2
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c16
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c134
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c214
-rw-r--r--net/sunrpc/xprtrdma/physical_ops.c39
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c517
-rw-r--r--net/sunrpc/xprtrdma/transport.c16
-rw-r--r--net/sunrpc/xprtrdma/verbs.c78
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h47
-rw-r--r--net/sunrpc/xprtsock.c6
-rw-r--r--scripts/Kbuild.include45
-rw-r--r--scripts/Makefile.build44
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--scripts/Makefile.lib8
-rwxr-xr-xscripts/adjust_autoksyms.sh101
-rw-r--r--scripts/basic/fixdep.c61
-rwxr-xr-xscripts/coccicheck2
-rw-r--r--scripts/coccinelle/api/setup_timer.cocci4
-rw-r--r--scripts/coccinelle/misc/compare_const_fl.cocci171
-rw-r--r--scripts/genksyms/genksyms.c3
-rw-r--r--scripts/kconfig/confdata.c4
-rw-r--r--scripts/kconfig/symbol.c14
-rw-r--r--scripts/package/Makefile4
-rwxr-xr-xscripts/package/builddeb5
-rwxr-xr-xscripts/package/mkspec5
-rw-r--r--security/smack/smack_lsm.c2
-rw-r--r--security/yama/yama_lsm.c69
-rw-r--r--sound/pci/hda/patch_realtek.c16
-rw-r--r--sound/soc/codecs/Kconfig38
-rw-r--r--sound/soc/codecs/Makefile11
-rw-r--r--sound/soc/codecs/ak4642.c3
-rw-r--r--sound/soc/codecs/max98371.c441
-rw-r--r--sound/soc/codecs/max98371.h67
-rw-r--r--sound/soc/codecs/rt298.c51
-rw-r--r--sound/soc/codecs/rt298.h2
-rw-r--r--sound/soc/codecs/rt5677.c24
-rw-r--r--sound/soc/codecs/tas571x.c141
-rw-r--r--sound/soc/codecs/tas571x.h22
-rw-r--r--sound/soc/codecs/tas5720.c620
-rw-r--r--sound/soc/codecs/tas5720.h90
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c10
-rw-r--r--sound/soc/codecs/tlv320aic32x4-i2c.c74
-rw-r--r--sound/soc/codecs/tlv320aic32x4-spi.c76
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c279
-rw-r--r--sound/soc/codecs/tlv320aic32x4.h7
-rw-r--r--sound/soc/codecs/twl6040.c16
-rw-r--r--sound/soc/codecs/wm8962.c9
-rw-r--r--sound/soc/codecs/wm8962.h6
-rw-r--r--sound/soc/generic/simple-card.c1
-rw-r--r--sound/soc/kirkwood/Kconfig1
-rw-r--r--sound/soc/mediatek/Kconfig1
-rw-r--r--sound/soc/mediatek/mt8173-rt5650-rt5676.c27
-rw-r--r--sound/soc/mediatek/mt8173-rt5650.c50
-rw-r--r--sound/soc/mediatek/mtk-afe-pcm.c2
-rw-r--r--sound/soc/omap/mcbsp.c8
-rw-r--r--sound/soc/omap/omap-pcm.c2
-rw-r--r--sound/soc/pxa/brownstone.c1
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c1
-rw-r--r--sound/soc/pxa/mmp-pcm.c1
-rw-r--r--sound/soc/pxa/mmp-sspa.c1
-rw-r--r--sound/soc/pxa/palm27x.c1
-rw-r--r--sound/soc/pxa/pxa-ssp.c1
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c1
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c1
-rw-r--r--sound/soc/qcom/lpass-platform.c8
-rw-r--r--sound/soc/sh/rcar/adg.c8
-rw-r--r--sound/soc/sh/rcar/dma.c12
-rw-r--r--sound/soc/sh/rcar/rsnd.h13
-rw-r--r--sound/soc/sh/rcar/src.c4
-rw-r--r--sound/soc/soc-topology.c48
-rw-r--r--sound/soc/sti/sti_uniperif.c144
-rw-r--r--sound/soc/sti/uniperif.h220
-rw-r--r--sound/soc/sti/uniperif_player.c182
-rw-r--r--sound/soc/sti/uniperif_reader.c229
-rw-r--r--tools/Makefile6
-rw-r--r--tools/build/Makefile.build8
-rw-r--r--tools/kvm/kvm_stat/Makefile41
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat1127
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt63
-rw-r--r--tools/objtool/Makefile4
-rw-r--r--tools/objtool/elf.h5
-rw-r--r--tools/perf/Documentation/perf-report.txt5
-rw-r--r--tools/perf/Documentation/perf-script.txt2
-rw-r--r--tools/perf/Documentation/perf-trace.txt3
-rw-r--r--tools/perf/builtin-annotate.c5
-rw-r--r--tools/perf/builtin-buildid-cache.c8
-rw-r--r--tools/perf/builtin-diff.c5
-rw-r--r--tools/perf/builtin-record.c81
-rw-r--r--tools/perf/builtin-report.c7
-rw-r--r--tools/perf/builtin-script.c7
-rw-r--r--tools/perf/builtin-stat.c22
-rw-r--r--tools/perf/builtin-timechart.c5
-rw-r--r--tools/perf/builtin-top.c6
-rw-r--r--tools/perf/builtin-trace.c274
-rw-r--r--tools/perf/perf.c3
-rw-r--r--tools/perf/util/annotate.c32
-rw-r--r--tools/perf/util/build-id.c2
-rw-r--r--tools/perf/util/db-export.c3
-rw-r--r--tools/perf/util/dso.c7
-rw-r--r--tools/perf/util/evlist.c34
-rw-r--r--tools/perf/util/evlist.h4
-rw-r--r--tools/perf/util/evsel.c13
-rw-r--r--tools/perf/util/evsel.h1
-rw-r--r--tools/perf/util/hist.c9
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/machine.c35
-rw-r--r--tools/perf/util/machine.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c3
-rw-r--r--tools/perf/util/sort.c84
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/stat-shadow.c8
-rw-r--r--tools/perf/util/symbol.c33
-rw-r--r--tools/perf/util/symbol.h7
-rw-r--r--tools/perf/util/top.h1
-rw-r--r--tools/perf/util/util.c3
-rw-r--r--tools/perf/util/util.h3
-rw-r--r--virt/kvm/arm/arch_timer.c68
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c5
-rw-r--r--virt/kvm/arm/hyp/vgic-v2-sr.c17
-rw-r--r--virt/kvm/arm/pmu.c25
-rw-r--r--virt/kvm/arm/vgic-v2.c4
-rw-r--r--virt/kvm/arm/vgic-v3.c8
-rw-r--r--virt/kvm/arm/vgic.c86
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c452
-rw-r--r--virt/kvm/arm/vgic/vgic-irqfd.c52
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c431
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v2.c446
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c455
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c526
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.h150
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c352
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c330
-rw-r--r--virt/kvm/arm/vgic/vgic.c619
-rw-r--r--virt/kvm/arm/vgic/vgic.h131
-rw-r--r--virt/kvm/kvm_main.c187
942 files changed, 34001 insertions, 13505 deletions
diff --git a/Documentation/ABI/stable/sysfs-class-ubi b/Documentation/ABI/stable/sysfs-class-ubi
index 18d471d9faea..a6b324014692 100644
--- a/Documentation/ABI/stable/sysfs-class-ubi
+++ b/Documentation/ABI/stable/sysfs-class-ubi
@@ -107,6 +107,15 @@ Contact: Artem Bityutskiy <dedekind@infradead.org>
Description:
Number of physical eraseblocks reserved for bad block handling.
+What: /sys/class/ubi/ubiX/ro_mode
+Date: April 2016
+KernelVersion: 4.7
+Contact: linux-mtd@lists.infradead.org
+Description:
+ Contains ASCII "1\n" if the read-only flag is set on this
+ device, and "0\n" if it is cleared. UBI devices mark themselves
+ as read-only when they detect an unrecoverable error.
+
What: /sys/class/ubi/ubiX/total_eraseblocks
Date: July 2006
KernelVersion: 2.6.22
diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl
index 4a0c599b6a6d..7586bf75f62e 100644
--- a/Documentation/DocBook/gpu.tmpl
+++ b/Documentation/DocBook/gpu.tmpl
@@ -1628,6 +1628,12 @@ void intel_crt_init(struct drm_device *dev)
!Edrivers/gpu/drm/drm_dp_helper.c
</sect2>
<sect2>
+ <title>Display Port Dual Mode Adaptor Helper Functions Reference</title>
+!Pdrivers/gpu/drm/drm_dp_dual_mode_helper.c dp dual mode helpers
+!Iinclude/drm/drm_dp_dual_mode_helper.h
+!Edrivers/gpu/drm/drm_dp_dual_mode_helper.c
+ </sect2>
+ <sect2>
<title>Display Port MST Helper Functions Reference</title>
!Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper
!Iinclude/drm/drm_dp_mst_helper.h
diff --git a/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt b/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
index ef3752889496..dd031fc93b55 100644
--- a/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/microchip,pic32-gpio.txt
@@ -33,7 +33,7 @@ gpio0: gpio0@1f860000 {
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <0>;
gpio-ranges = <&pic32_pinctrl 0 0 16>;
};
diff --git a/Documentation/devicetree/bindings/mips/cpu_irq.txt b/Documentation/devicetree/bindings/mips/cpu_irq.txt
index fc149f326dae..f080f06da6d8 100644
--- a/Documentation/devicetree/bindings/mips/cpu_irq.txt
+++ b/Documentation/devicetree/bindings/mips/cpu_irq.txt
@@ -13,7 +13,7 @@ Required properties:
- compatible : Should be "mti,cpu-interrupt-controller"
Example devicetree:
- cpu-irq: cpu-irq@0 {
+ cpu-irq: cpu-irq {
#address-cells = <0>;
interrupt-controller;
diff --git a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
index 71ad57e050b1..3149297b3933 100644
--- a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
+++ b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
@@ -20,7 +20,7 @@ Example:
compatible = "microchip,pic32mzda-sdhci";
reg = <0x1f8ec000 0x100>;
interrupts = <191 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&REFCLKO4>, <&PBCLK5>;
+ clocks = <&rootclk REF4CLK>, <&rootclk PB5CLK>;
clock-names = "base_clk", "sys_clk";
bus-width = <4>;
cap-sd-highspeed;
diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
index d53aba98fbc9..3e7ee99d3949 100644
--- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -39,7 +39,7 @@ Optional properties:
Nand Flash Controller(NFC) is an optional sub-node
Required properties:
-- compatible : "atmel,sama5d3-nfc" or "atmel,sama5d4-nfc".
+- compatible : "atmel,sama5d3-nfc".
- reg : should specify the address and size used for NFC command registers,
NFC registers and NFC SRAM. NFC SRAM address and size can be absent
if don't want to use it.
diff --git a/Documentation/devicetree/bindings/mtd/nand.txt b/Documentation/devicetree/bindings/mtd/nand.txt
index 68342eac2383..3733300de8dd 100644
--- a/Documentation/devicetree/bindings/mtd/nand.txt
+++ b/Documentation/devicetree/bindings/mtd/nand.txt
@@ -53,7 +53,8 @@ Example:
nand@0 {
reg = <0>;
- nand-ecc-mode = "soft_bch";
+ nand-ecc-mode = "soft";
+ nand-ecc-algo = "bch";
/* controller specific properties */
};
diff --git a/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
index 4b5efa51bec7..29b72e303ebf 100644
--- a/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/microchip,pic32-pinctrl.txt
@@ -34,7 +34,7 @@ pic32_pinctrl: pinctrl@1f801400{
#size-cells = <1>;
compatible = "microchip,pic32mzda-pinctrl";
reg = <0x1f801400 0x400>;
- clocks = <&PBCLK1>;
+ clocks = <&rootclk PB1CLK>;
pinctrl_uart2: pinctrl_uart2 {
uart2-tx {
diff --git a/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt b/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
index 65b38bf60ae0..7a34345d0ca3 100644
--- a/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
+++ b/Documentation/devicetree/bindings/serial/microchip,pic32-uart.txt
@@ -20,7 +20,7 @@ Example:
interrupts = <112 IRQ_TYPE_LEVEL_HIGH>,
<113 IRQ_TYPE_LEVEL_HIGH>,
<114 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&PBCLK2>;
+ clocks = <&rootclk PB2CLK>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_uart1
&pinctrl_uart1_cts
diff --git a/Documentation/devicetree/bindings/sound/max98371.txt b/Documentation/devicetree/bindings/sound/max98371.txt
new file mode 100644
index 000000000000..6c285235e64b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max98371.txt
@@ -0,0 +1,17 @@
+max98371 codec
+
+This device supports I2C mode only.
+
+Required properties:
+
+- compatible : "maxim,max98371"
+- reg : The chip select number on the I2C bus
+
+Example:
+
+&i2c {
+ max98371: max98371@0x31 {
+ compatible = "maxim,max98371";
+ reg = <0x31>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
index f205ce9e31dd..ac28cdb4910e 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
@@ -1,15 +1,16 @@
-MT8173 with RT5650 RT5676 CODECS
+MT8173 with RT5650 RT5676 CODECS and HDMI via I2S
Required properties:
- compatible : "mediatek,mt8173-rt5650-rt5676"
- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
+ and of the hdmi encoder node
- mediatek,platform: the phandle of MT8173 ASoC platform
Example:
sound {
compatible = "mediatek,mt8173-rt5650-rt5676";
- mediatek,audio-codec = <&rt5650 &rt5676>;
+ mediatek,audio-codec = <&rt5650 &rt5676 &hdmi0>;
mediatek,platform = <&afe>;
};
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
index fe5a5ef1714d..5bfa6b60530b 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650.txt
@@ -5,11 +5,21 @@ Required properties:
- mediatek,audio-codec: the phandles of rt5650 codecs
- mediatek,platform: the phandle of MT8173 ASoC platform
+Optional subnodes:
+- codec-capture : the subnode of rt5650 codec capture
+Required codec-capture subnode properties:
+- sound-dai: audio codec dai name on capture path
+ <&rt5650 0> : Default setting. Connect rt5650 I2S1 for capture. (dai_name = rt5645-aif1)
+ <&rt5650 1> : Connect rt5650 I2S2 for capture. (dai_name = rt5645-aif2)
+
Example:
sound {
compatible = "mediatek,mt8173-rt5650";
mediatek,audio-codec = <&rt5650>;
mediatek,platform = <&afe>;
+ codec-capture {
+ sound-dai = <&rt5650 1>;
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
index 028fa1c82f50..4d9a83d9a017 100644
--- a/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
+++ b/Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -37,17 +37,18 @@ Required properties:
- dai-name: DAI name that describes the IP.
+ - IP mode: IP working mode depending on associated codec.
+ "HDMI" connected to HDMI codec and support IEC HDMI formats (player only).
+ "SPDIF" connected to SPDIF codec and support SPDIF formats (player only).
+ "PCM" PCM standard mode for I2S or TDM bus.
+ "TDM" TDM mode for TDM bus.
+
Required properties ("st,sti-uni-player" compatibility only):
- clocks: CPU_DAI IP clock source, listed in the same order than the
CPU_DAI properties.
- uniperiph-id: internal SOC IP instance ID.
- - IP mode: IP working mode depending on associated codec.
- "HDMI" connected to HDMI codec IP and IEC HDMI formats.
- "SPDIF"connected to SPDIF codec and support SPDIF formats.
- "PCM" PCM standard mode for I2S or TDM bus.
-
Optional properties:
- pinctrl-0: defined for CPU_DAI@1 and CPU_DAI@4 to describe I2S PIOs for
external codecs connection.
@@ -56,6 +57,22 @@ Optional properties:
Example:
+ sti_uni_player1: sti-uni-player@1 {
+ compatible = "st,sti-uni-player";
+ status = "okay";
+ #sound-dai-cells = <0>;
+ st,syscfg = <&syscfg_core>;
+ clocks = <&clk_s_d0_flexgen CLK_PCM_1>;
+ reg = <0x8D81000 0x158>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+ dmas = <&fdma0 3 0 1>;
+ st,dai-name = "Uni Player #1 (I2S)";
+ dma-names = "tx";
+ st,uniperiph-id = <1>;
+ st,version = <5>;
+ st,mode = "TDM";
+ };
+
sti_uni_player2: sti-uni-player@2 {
compatible = "st,sti-uni-player";
status = "okay";
@@ -65,7 +82,7 @@ Example:
reg = <0x8D82000 0x158>;
interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
dmas = <&fdma0 4 0 1>;
- dai-name = "Uni Player #1 (DAC)";
+ dai-name = "Uni Player #2 (DAC)";
dma-names = "tx";
uniperiph-id = <2>;
version = <5>;
@@ -82,7 +99,7 @@ Example:
interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
dmas = <&fdma0 7 0 1>;
dma-names = "tx";
- dai-name = "Uni Player #1 (PIO)";
+ dai-name = "Uni Player #3 (SPDIF)";
uniperiph-id = <3>;
version = <5>;
mode = "SPDIF";
@@ -99,6 +116,7 @@ Example:
dma-names = "rx";
dai-name = "Uni Reader #1 (HDMI RX)";
version = <3>;
+ st,mode = "PCM";
};
2) sti-sas-codec: internal audio codec IPs driver
@@ -152,4 +170,20 @@ Example of audio card declaration:
sound-dai = <&sti_sasg_codec 0>;
};
};
+ simple-audio-card,dai-link@2 {
+ /* TDM playback */
+ format = "left_j";
+ frame-inversion = <1>;
+ cpu {
+ sound-dai = <&sti_uni_player1>;
+ dai-tdm-slot-num = <16>;
+ dai-tdm-slot-width = <16>;
+ dai-tdm-slot-tx-mask =
+ <1 1 1 1 0 0 0 0 0 0 1 1 0 0 1 1>;
+ };
+
+ codec {
+ sound-dai = <&sti_sasg_codec 3>;
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/sound/tas571x.txt b/Documentation/devicetree/bindings/sound/tas571x.txt
index 0ac31d8d5ac4..b4959f10b74b 100644
--- a/Documentation/devicetree/bindings/sound/tas571x.txt
+++ b/Documentation/devicetree/bindings/sound/tas571x.txt
@@ -1,4 +1,4 @@
-Texas Instruments TAS5711/TAS5717/TAS5719 stereo power amplifiers
+Texas Instruments TAS5711/TAS5717/TAS5719/TAS5721 stereo power amplifiers
The codec is controlled through an I2C interface. It also has two other
signals that can be wired up to GPIOs: reset (strongly recommended), and
@@ -6,7 +6,11 @@ powerdown (optional).
Required properties:
-- compatible: "ti,tas5711", "ti,tas5717", or "ti,tas5719"
+- compatible: should be one of the following:
+ - "ti,tas5711",
+ - "ti,tas5717",
+ - "ti,tas5719",
+ - "ti,tas5721"
- reg: The I2C address of the device
- #sound-dai-cells: must be equal to 0
@@ -25,6 +29,8 @@ Optional properties:
- PVDD_B-supply: regulator phandle for the PVDD_B supply (5711)
- PVDD_C-supply: regulator phandle for the PVDD_C supply (5711)
- PVDD_D-supply: regulator phandle for the PVDD_D supply (5711)
+- DRVDD-supply: regulator phandle for the DRVDD supply (5721)
+- PVDD-supply: regulator phandle for the PVDD supply (5721)
Example:
diff --git a/Documentation/devicetree/bindings/sound/tas5720.txt b/Documentation/devicetree/bindings/sound/tas5720.txt
new file mode 100644
index 000000000000..806ea7381483
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/tas5720.txt
@@ -0,0 +1,25 @@
+Texas Instruments TAS5720 Mono Audio amplifier
+
+The TAS5720 serial control bus communicates through the I2C protocol only. The
+serial bus is also used for periodic codec fault checking/reporting during
+audio playback. For more product information please see the links below:
+
+http://www.ti.com/product/TAS5720L
+http://www.ti.com/product/TAS5720M
+
+Required properties:
+
+- compatible : "ti,tas5720"
+- reg : I2C slave address
+- dvdd-supply : phandle to a 3.3-V supply for the digital circuitry
+- pvdd-supply : phandle to a supply used for the Class-D amp and the analog
+
+Example:
+
+tas5720: tas5720@6c {
+ status = "okay";
+ compatible = "ti,tas5720";
+ reg = <0x6c>;
+ dvdd-supply = <&vdd_3v3_reg>;
+ pvdd-supply = <&amp_supply_reg>;
+};
diff --git a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
index 6908d3aca598..edebfa0a985e 100644
--- a/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
+++ b/Documentation/devicetree/bindings/thermal/nvidia,tegra124-soctherm.txt
@@ -26,6 +26,10 @@ Required properties :
of this property. See <dt-bindings/thermal/tegra124-soctherm.h> for a
list of valid values when referring to thermal sensors.
+Note:
+- the "critical" type trip points will be set to SOC_THERM hardware as the
+shut down temperature. Once the temperature of this thermal zone is higher
+than it, the system will be shutdown or reset by hardware.
Example :
@@ -51,5 +55,13 @@ Example: referring to thermal sensors :
thermal-sensors =
<&soctherm TEGRA124_SOCTHERM_SENSOR_CPU>;
+
+ trips {
+ cpu_shutdown_trip: shutdown-trip {
+ temperature = <102500>;
+ hysteresis = <1000>;
+ type = "critical";
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
index e5ee3f159893..a8e52c8ccfcc 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
@@ -11,7 +11,6 @@ Required properties:
- "renesas,thermal-r8a7791" (R-Car M2-W)
- "renesas,thermal-r8a7792" (R-Car V2H)
- "renesas,thermal-r8a7793" (R-Car M2-N)
- - "renesas,thermal-r8a7794" (R-Car E2)
- reg : Address range of the thermal registers.
The 1st reg will be recognized as common register
if it has "interrupts".
diff --git a/Documentation/devicetree/bindings/thermal/tango-thermal.txt b/Documentation/devicetree/bindings/thermal/tango-thermal.txt
new file mode 100644
index 000000000000..212198d4b937
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/tango-thermal.txt
@@ -0,0 +1,17 @@
+* Tango Thermal
+
+The SMP8758 SoC includes 3 instances of this temperature sensor
+(in the CPU, video decoder, and PCIe controller).
+
+Required properties:
+- #thermal-sensor-cells: Should be 0 (see thermal.txt)
+- compatible: "sigma,smp8758-thermal"
+- reg: Address range of the thermal registers
+
+Example:
+
+ cpu_temp: thermal@920100 {
+ #thermal-sensor-cells = <0>;
+ compatible = "sigma,smp8758-thermal";
+ reg = <0x920100 12>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt b/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt
new file mode 100644
index 000000000000..d72355502b78
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/thermal-generic-adc.txt
@@ -0,0 +1,89 @@
+General Purpose Analog To Digital Converter (ADC) based thermal sensor.
+
+On some of platforms, thermal sensor like thermistors are connected to
+one of ADC channel and sensor resistance is read via voltage across the
+sensor resistor. The voltage read across the sensor is mapped to
+temperature using voltage-temperature lookup table.
+
+Required properties:
+===================
+- compatible: Must be "generic-adc-thermal".
+- temperature-lookup-table: Two dimensional array of Integer; lookup table
+ to map the relation between ADC value and
+ temperature. When ADC is read, the value is
+ looked up on the table to get the equivalent
+ temperature.
+ The first value of the each row of array is the
+ temperature in milliCelsius and second value of
+ the each row of array is the ADC read value.
+- #thermal-sensor-cells: Should be 1. See ./thermal.txt for a description
+ of this property.
+
+Example :
+#include <dt-bindings/thermal/thermal.h>
+
+i2c@7000c400 {
+ ads1015: ads1015@4a {
+ reg = <0x4a>;
+ compatible = "ads1015";
+ sampling-frequency = <3300>;
+ #io-channel-cells = <1>;
+ };
+};
+
+tboard_thermistor: thermal-sensor {
+ compatible = "generic-adc-thermal";
+ #thermal-sensor-cells = <0>;
+ io-channels = <&ads1015 1>;
+ io-channel-names = "sensor-channel";
+ temperature-lookup-table = < (-40000) 2578
+ (-39000) 2577
+ (-38000) 2576
+ (-37000) 2575
+ (-36000) 2574
+ (-35000) 2573
+ (-34000) 2572
+ (-33000) 2571
+ (-32000) 2569
+ (-31000) 2568
+ (-30000) 2567
+ ::::::::::
+ 118000 254
+ 119000 247
+ 120000 240
+ 121000 233
+ 122000 226
+ 123000 220
+ 124000 214
+ 125000 208>;
+};
+
+dummy_cool_dev: dummy-cool-dev {
+ compatible = "dummy-cooling-dev";
+ #cooling-cells = <2>; /* min followed by max */
+};
+
+thermal-zones {
+ Tboard {
+ polling-delay = <15000>; /* milliseconds */
+ polling-delay-passive = <0>; /* milliseconds */
+ thermal-sensors = <&tboard_thermistor>;
+
+ trips {
+ therm_est_trip: therm_est_trip {
+ temperature = <40000>;
+ type = "active";
+ hysteresis = <1000>;
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&therm_est_trip>;
+ cooling-device = <&dummy_cool_dev THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ contribution = <100>;
+ };
+
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt b/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
index 852f694f3177..49485f831373 100644
--- a/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
+++ b/Documentation/devicetree/bindings/watchdog/microchip,pic32-dmt.txt
@@ -8,12 +8,12 @@ Required properties:
- compatible: must be "microchip,pic32mzda-dmt".
- reg: physical base address of the controller and length of memory mapped
region.
-- clocks: phandle of parent clock (should be &PBCLK7).
+- clocks: phandle of source clk. Should be <&rootclk PB7CLK>.
Example:
watchdog@1f800a00 {
compatible = "microchip,pic32mzda-dmt";
reg = <0x1f800a00 0x80>;
- clocks = <&PBCLK7>;
+ clocks = <&rootclk PB7CLK>;
};
diff --git a/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt b/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
index d1401030e75c..f03a29a1b323 100644
--- a/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/microchip,pic32-wdt.txt
@@ -7,12 +7,12 @@ Required properties:
- compatible: must be "microchip,pic32mzda-wdt".
- reg: physical base address of the controller and length of memory mapped
region.
-- clocks: phandle of source clk. should be <&LPRC> clk.
+- clocks: phandle of source clk. Should be <&rootclk LPRCCLK>.
Example:
watchdog@1f800800 {
compatible = "microchip,pic32mzda-wdt";
reg = <0x1f800800 0x200>;
- clocks = <&LPRC>;
+ clocks = <&rootclk LPRCCLK>;
};
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt
index 7bde64014a89..ce4587d257d2 100644
--- a/Documentation/filesystems/dax.txt
+++ b/Documentation/filesystems/dax.txt
@@ -79,6 +79,38 @@ These filesystems may be used for inspiration:
- ext4: the fourth extended filesystem, see Documentation/filesystems/ext4.txt
+Handling Media Errors
+---------------------
+
+The libnvdimm subsystem stores a record of known media error locations for
+each pmem block device (in gendisk->badblocks). If we fault at such location,
+or one with a latent error not yet discovered, the application can expect
+to receive a SIGBUS. Libnvdimm also allows clearing of these errors by simply
+writing the affected sectors (through the pmem driver, and if the underlying
+NVDIMM supports the clear_poison DSM defined by ACPI).
+
+Since DAX IO normally doesn't go through the driver/bio path, applications or
+sysadmins have an option to restore the lost data from a prior backup/inbuilt
+redundancy in the following ways:
+
+1. Delete the affected file, and restore from a backup (sysadmin route):
+ This will free the file system blocks that were being used by the file,
+ and the next time they're allocated, they will be zeroed first, which
+ happens through the driver, and will clear bad sectors.
+
+2. Truncate or hole-punch the part of the file that has a bad-block (at least
+ an entire aligned sector has to be hole-punched, but not necessarily an
+ entire filesystem block).
+
+These are the two basic paths that allow DAX filesystems to continue operating
+in the presence of media errors. More robust error recovery mechanisms can be
+built on top of this in the future, for example, involving redundancy/mirroring
+provided at the block layer through DM, or additionally, at the filesystem
+level. These would have to rely on the above two tenets, that error clearing
+can happen either by sending an IO through the driver, or zeroing (also through
+the driver).
+
+
Shortcomings
------------
diff --git a/Documentation/filesystems/directory-locking b/Documentation/filesystems/directory-locking
index 09bbf9a54f80..c314badbcfc6 100644
--- a/Documentation/filesystems/directory-locking
+++ b/Documentation/filesystems/directory-locking
@@ -1,30 +1,37 @@
Locking scheme used for directory operations is based on two
-kinds of locks - per-inode (->i_mutex) and per-filesystem
+kinds of locks - per-inode (->i_rwsem) and per-filesystem
(->s_vfs_rename_mutex).
- When taking the i_mutex on multiple non-directory objects, we
+ When taking the i_rwsem on multiple non-directory objects, we
always acquire the locks in order by increasing address. We'll call
that "inode pointer" order in the following.
For our purposes all operations fall in 5 classes:
1) read access. Locking rules: caller locks directory we are accessing.
+The lock is taken shared.
-2) object creation. Locking rules: same as above.
+2) object creation. Locking rules: same as above, but the lock is taken
+exclusive.
3) object removal. Locking rules: caller locks parent, finds victim,
-locks victim and calls the method.
+locks victim and calls the method. Locks are exclusive.
4) rename() that is _not_ cross-directory. Locking rules: caller locks
-the parent and finds source and target. If target already exists, lock
-it. If source is a non-directory, lock it. If that means we need to
-lock both, lock them in inode pointer order.
+the parent and finds source and target. In case of exchange (with
+RENAME_EXCHANGE in rename2() flags argument) lock both. In any case,
+if the target already exists, lock it. If the source is a non-directory,
+lock it. If we need to lock both, lock them in inode pointer order.
+Then call the method. All locks are exclusive.
+NB: we might get away with locking the the source (and target in exchange
+case) shared.
5) link creation. Locking rules:
* lock parent
* check that source is not a directory
* lock source
* call the method.
+All locks are exclusive.
6) cross-directory rename. The trickiest in the whole bunch. Locking
rules:
@@ -35,11 +42,12 @@ rules:
fail with -ENOTEMPTY
* if new parent is equal to or is a descendent of source
fail with -ELOOP
- * If target exists, lock it. If source is a non-directory, lock
- it. In case that means we need to lock both source and target,
- do so in inode pointer order.
+ * If it's an exchange, lock both the source and the target.
+ * If the target exists, lock it. If the source is a non-directory,
+ lock it. If we need to lock both, do so in inode pointer order.
* call the method.
-
+All ->i_rwsem are taken exclusive. Again, we might get away with locking
+the the source (and target in exchange case) shared.
The rules above obviously guarantee that all directories that are going to be
read, modified or removed by method will be locked by caller.
@@ -73,7 +81,7 @@ objects - A < B iff A is an ancestor of B.
attempt to acquire some lock and already holds at least one lock. Let's
consider the set of contended locks. First of all, filesystem lock is
not contended, since any process blocked on it is not holding any locks.
-Thus all processes are blocked on ->i_mutex.
+Thus all processes are blocked on ->i_rwsem.
By (3), any process holding a non-directory lock can only be
waiting on another non-directory lock with a larger address. Therefore
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index 28091457b71a..d6259c786316 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -194,15 +194,6 @@ If a file with multiple hard links is copied up, then this will
"break" the link. Changes will not be propagated to other names
referring to the same inode.
-Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory
-object in overlayfs will not contain valid absolute paths, only
-relative paths leading up to the filesystem's root. This will be
-fixed in the future.
-
-Some operations are not atomic, for example a crash during copy_up or
-rename will leave the filesystem in an inconsistent state. This will
-be addressed in the future.
-
Changes to underlying filesystems
---------------------------------
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 46f3bb7a02f5..a5fb89cac615 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -578,3 +578,10 @@ in your dentry operations instead.
--
[mandatory]
->atomic_open() calls without O_CREAT may happen in parallel.
+--
+[mandatory]
+ ->setxattr() and xattr_handler.set() get dentry and inode passed separately.
+ dentry might be yet to be attached to inode, so do _not_ use its ->d_inode
+ in the instances. Rationale: !@#!@# security_d_instantiate() needs to be
+ called before we attach dentry to inode and !@#!@##!@$!$#!@#$!@$!@$ smack
+ ->d_instantiate() uses not just ->getxattr() but ->setxattr() as well.
diff --git a/Documentation/hwmon/max34440 b/Documentation/hwmon/max34440
index f5b1fcaa9e4e..9ba6587b7657 100644
--- a/Documentation/hwmon/max34440
+++ b/Documentation/hwmon/max34440
@@ -5,17 +5,17 @@ Supported chips:
* Maxim MAX34440
Prefixes: 'max34440'
Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
+ Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34440.pdf
* Maxim MAX34441
PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
Prefixes: 'max34441'
Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
+ Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34441.pdf
* Maxim MAX34446
PMBus Power-Supply Data Logger
Prefixes: 'max34446'
Addresses scanned: -
- Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34446.pdf
+ Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34446.pdf
* Maxim MAX34460
PMBus 12-Channel Voltage Monitor & Sequencer
Prefix: 'max34460'
diff --git a/Documentation/infiniband/sysfs.txt b/Documentation/infiniband/sysfs.txt
index 3ecf0c3a133f..45bcafe6ff8a 100644
--- a/Documentation/infiniband/sysfs.txt
+++ b/Documentation/infiniband/sysfs.txt
@@ -56,6 +56,18 @@ SYSFS FILES
ports/1/pkeys/10 contains the value at index 10 in port 1's P_Key
table.
+ There is an optional "hw_counters" subdirectory that may be under either
+ the parent device or the port subdirectories or both. If present,
+ there are a list of counters provided by the hardware. They may match
+ some of the counters in the counters directory, but they often include
+ many other counters. In addition to the various counters, there will
+ be a file named "lifespan" that configures how frequently the core
+ should update the counters when they are being accessed (counters are
+ not updated if they are not being accessed). The lifespan is in milli-
+ seconds and defaults to 10 unless set to something else by the driver.
+ Users may echo a value between 0 - 10000 to the lifespan file to set
+ the length of time between updates in milliseconds.
+
MTHCA
The Mellanox HCA driver also creates the files:
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index c52856da0cad..db101857b2c9 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -241,9 +241,8 @@ comment "module support disabled"
depends on !MODULES
MODVERSIONS directly depends on MODULES, this means it's only visible if
-MODULES is different from 'n'. The comment on the other hand is always
-visible when MODULES is visible (the (empty) dependency of MODULES is
-also part of the comment dependencies).
+MODULES is different from 'n'. The comment on the other hand is only
+visible when MODULES is set to 'n'.
Kconfig syntax
@@ -285,12 +284,17 @@ choices:
"endchoice"
This defines a choice group and accepts any of the above attributes as
-options. A choice can only be of type bool or tristate, while a boolean
-choice only allows a single config entry to be selected, a tristate
-choice also allows any number of config entries to be set to 'm'. This
-can be used if multiple drivers for a single hardware exists and only a
-single driver can be compiled/loaded into the kernel, but all drivers
-can be compiled as modules.
+options. A choice can only be of type bool or tristate. If no type is
+specified for a choice, it's type will be determined by the type of
+the first choice element in the group or remain unknown if none of the
+choice elements have a type specified, as well.
+
+While a boolean choice only allows a single config entry to be
+selected, a tristate choice also allows any number of config entries
+to be set to 'm'. This can be used if multiple drivers for a single
+hardware exists and only a single driver can be compiled/loaded into
+the kernel, but all drivers can be compiled as modules.
+
A choice accepts another option "optional", which allows to set the
choice to 'n' and no entry needs to be selected.
If no [symbol] is associated with a choice, then you can not have multiple
diff --git a/Documentation/scsi/tcm_qla2xxx.txt b/Documentation/scsi/tcm_qla2xxx.txt
new file mode 100644
index 000000000000..c3a670a25e2b
--- /dev/null
+++ b/Documentation/scsi/tcm_qla2xxx.txt
@@ -0,0 +1,22 @@
+tcm_qla2xxx jam_host attribute
+------------------------------
+There is now a new module endpoint atribute called jam_host
+attribute: jam_host: boolean=0/1
+This attribute and accompanying code is only included if the
+Kconfig parameter TCM_QLA2XXX_DEBUG is set to Y
+By default this jammer code and functionality is disabled
+
+Use this attribute to control the discarding of SCSI commands to a
+selected host.
+This may be useful for testing error handling and simulating slow drain
+and other fabric issues.
+
+Setting a boolean of 1 for the jam_host attribute for a particular host
+ will discard the commands for that host.
+Reset back to 0 to stop the jamming.
+
+Enable host 4 to be jammed
+echo 1 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:27:8f:ae/tpgt_1/attrib/jam_host
+
+Disable jamming on host 4
+echo 0 > /sys/kernel/config/target/qla2xxx/21:00:00:24:ff:27:8f:ae/tpgt_1/attrib/jam_host
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index daabdd7ee543..a3683ce2a2f3 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -61,6 +61,7 @@ show up in /proc/sys/kernel:
- perf_cpu_time_max_percent
- perf_event_paranoid
- perf_event_max_stack
+- perf_event_max_contexts_per_stack
- pid_max
- powersave-nap [ PPC only ]
- printk
@@ -668,6 +669,19 @@ The default value is 127.
==============================================================
+perf_event_max_contexts_per_stack:
+
+Controls maximum number of stack frame context entries for
+(attr.sample_type & PERF_SAMPLE_CALLCHAIN) configured events, for
+instance, when using 'perf record -g' or 'perf trace --call-graph fp'.
+
+This can only be done when no events are in use that have callchains
+enabled, otherwise writing to this file will return -EBUSY.
+
+The default value is 8.
+
+==============================================================
+
pid_max:
PID allocation wrap value. When the kernel's next PID value
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 7d370c9b1450..94bf6944bb1e 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -294,8 +294,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
- buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
- buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
@@ -467,20 +465,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
- if re.search('shutdown_session\)\(', fo):
- buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
-
- if re.search('close_session\)\(', fo):
- buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
-
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
diff --git a/Documentation/thermal/sysfs-api.txt b/Documentation/thermal/sysfs-api.txt
index ed419d6c8dec..efc3f3d293c4 100644
--- a/Documentation/thermal/sysfs-api.txt
+++ b/Documentation/thermal/sysfs-api.txt
@@ -69,8 +69,8 @@ temperature) and throttle appropriate devices.
1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz)
This interface function removes the thermal zone device.
- It deletes the corresponding entry form /sys/class/thermal folder and
- unbind all the thermal cooling devices it uses.
+ It deletes the corresponding entry from /sys/class/thermal folder and
+ unbinds all the thermal cooling devices it uses.
1.1.3 struct thermal_zone_device *thermal_zone_of_sensor_register(
struct device *dev, int sensor_id, void *data,
@@ -146,32 +146,32 @@ temperature) and throttle appropriate devices.
This interface function adds a new thermal cooling device (fan/processor/...)
to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself
- to all the thermal zone devices register at the same time.
+ to all the thermal zone devices registered at the same time.
name: the cooling device name.
devdata: device private data.
ops: thermal cooling devices call-backs.
.get_max_state: get the Maximum throttle state of the cooling device.
- .get_cur_state: get the Current throttle state of the cooling device.
+ .get_cur_state: get the Currently requested throttle state of the cooling device.
.set_cur_state: set the Current throttle state of the cooling device.
1.2.2 void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
- This interface function remove the thermal cooling device.
- It deletes the corresponding entry form /sys/class/thermal folder and
- unbind itself from all the thermal zone devices using it.
+ This interface function removes the thermal cooling device.
+ It deletes the corresponding entry from /sys/class/thermal folder and
+ unbinds itself from all the thermal zone devices using it.
1.3 interface for binding a thermal zone device with a thermal cooling device
1.3.1 int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
int trip, struct thermal_cooling_device *cdev,
unsigned long upper, unsigned long lower, unsigned int weight);
- This interface function bind a thermal cooling device to the certain trip
+ This interface function binds a thermal cooling device to a particular trip
point of a thermal zone device.
This function is usually called in the thermal zone device .bind callback.
tz: the thermal zone device
cdev: thermal cooling device
- trip: indicates which trip point the cooling devices is associated with
- in this thermal zone.
+ trip: indicates which trip point in this thermal zone the cooling device
+ is associated with.
upper:the Maximum cooling state for this trip point.
THERMAL_NO_LIMIT means no upper limit,
and the cooling device can be in max_state.
@@ -184,13 +184,13 @@ temperature) and throttle appropriate devices.
1.3.2 int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
int trip, struct thermal_cooling_device *cdev);
- This interface function unbind a thermal cooling device from the certain
+ This interface function unbinds a thermal cooling device from a particular
trip point of a thermal zone device. This function is usually called in
the thermal zone device .unbind callback.
tz: the thermal zone device
cdev: thermal cooling device
- trip: indicates which trip point the cooling devices is associated with
- in this thermal zone.
+ trip: indicates which trip point in this thermal zone the cooling device
+ is associated with.
1.4 Thermal Zone Parameters
1.4.1 struct thermal_bind_params
@@ -210,13 +210,13 @@ temperature) and throttle appropriate devices.
this thermal zone and cdev, for a particular trip point.
If nth bit is set, then the cdev and thermal zone are bound
for trip point n.
- .limits: This is an array of cooling state limits. Must have exactly
- 2 * thermal_zone.number_of_trip_points. It is an array consisting
- of tuples <lower-state upper-state> of state limits. Each trip
- will be associated with one state limit tuple when binding.
- A NULL pointer means <THERMAL_NO_LIMITS THERMAL_NO_LIMITS>
- on all trips. These limits are used when binding a cdev to a
- trip point.
+ .binding_limits: This is an array of cooling state limits. Must have
+ exactly 2 * thermal_zone.number_of_trip_points. It is an
+ array consisting of tuples <lower-state upper-state> of
+ state limits. Each trip will be associated with one state
+ limit tuple when binding. A NULL pointer means
+ <THERMAL_NO_LIMITS THERMAL_NO_LIMITS> on all trips.
+ These limits are used when binding a cdev to a trip point.
.match: This call back returns success(0) if the 'tz and cdev' need to
be bound, as per platform data.
1.4.2 struct thermal_zone_params
@@ -351,8 +351,8 @@ cdev[0-*]
RO, Optional
cdev[0-*]_trip_point
- The trip point with which cdev[0-*] is associated in this thermal
- zone; -1 means the cooling device is not associated with any trip
+ The trip point in this thermal zone which cdev[0-*] is associated
+ with; -1 means the cooling device is not associated with any trip
point.
RO, Optional
diff --git a/MAINTAINERS b/MAINTAINERS
index 7fb2603e3a95..7304d2e37a98 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2304,7 +2304,7 @@ BCACHE (BLOCK LAYER CACHE)
M: Kent Overstreet <kent.overstreet@gmail.com>
L: linux-bcache@vger.kernel.org
W: http://bcache.evilpiepirate.org
-S: Maintained
+S: Orphan
F: drivers/md/bcache/
BDISP ST MEDIA DRIVER
@@ -2505,6 +2505,7 @@ M: Hauke Mehrtens <hauke@hauke-m.de>
M: Rafał Miłecki <zajec5@gmail.com>
L: linux-mips@linux-mips.org
S: Maintained
+F: Documentation/devicetree/bindings/mips/brcm/
F: arch/mips/bcm47xx/*
F: arch/mips/include/asm/mach-bcm47xx/*
@@ -5308,6 +5309,13 @@ F: drivers/block/cciss*
F: include/linux/cciss_ioctl.h
F: include/uapi/linux/cciss_ioctl.h
+HFI1 DRIVER
+M: Mike Marciniszyn <mike.marciniszyn@intel.com>
+M: Dennis Dalessandro <dennis.dalessandro@intel.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+F: drivers/infiniband/hw/hfi1
+
HFS FILESYSTEM
L: linux-fsdevel@vger.kernel.org
S: Orphan
@@ -5837,7 +5845,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git
S: Supported
F: Documentation/infiniband/
F: drivers/infiniband/
-F: drivers/staging/rdma/
F: include/uapi/linux/if_infiniband.h
F: include/uapi/rdma/
F: include/rdma/
@@ -6096,6 +6103,14 @@ S: Maintained
F: arch/x86/include/asm/intel_telemetry.h
F: drivers/platform/x86/intel_telemetry*
+INTEL PMC CORE DRIVER
+M: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+M: Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+L: platform-driver-x86@vger.kernel.org
+S: Maintained
+F: arch/x86/include/asm/pmc_core.h
+F: drivers/platform/x86/intel_pmc_core*
+
IOC3 ETHERNET DRIVER
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-mips@linux-mips.org
@@ -6413,8 +6428,9 @@ F: Documentation/kbuild/kconfig-language.txt
F: scripts/kconfig/
KDUMP
-M: Vivek Goyal <vgoyal@redhat.com>
-M: Haren Myneni <hbabu@us.ibm.com>
+M: Dave Young <dyoung@redhat.com>
+M: Baoquan He <bhe@redhat.com>
+R: Vivek Goyal <vgoyal@redhat.com>
L: kexec@lists.infradead.org
W: http://lse.sourceforge.net/kdump/
S: Maintained
@@ -6491,6 +6507,7 @@ F: arch/*/include/asm/kvm*
F: include/linux/kvm*
F: include/uapi/linux/kvm*
F: virt/kvm/
+F: tools/kvm/
KERNEL VIRTUAL MACHINE (KVM) FOR AMD-V
M: Joerg Roedel <joro@8bytes.org>
@@ -6559,7 +6576,7 @@ L: kexec@lists.infradead.org
S: Maintained
F: include/linux/kexec.h
F: include/uapi/linux/kexec.h
-F: kernel/kexec.c
+F: kernel/kexec*
KEYS/KEYRINGS:
M: David Howells <dhowells@redhat.com>
@@ -7505,6 +7522,7 @@ W: http://www.linux-mips.org/
T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
Q: http://patchwork.linux-mips.org/project/linux-mips/list/
S: Supported
+F: Documentation/devicetree/bindings/mips/
F: Documentation/mips/
F: arch/mips/
@@ -8881,6 +8899,7 @@ F: arch/*/kernel/*/perf_event*.c
F: arch/*/kernel/*/*/perf_event*.c
F: arch/*/include/asm/perf_event.h
F: arch/*/kernel/perf_callchain.c
+F: arch/*/events/*
F: tools/perf/
PERSONALITY HANDLING
@@ -10909,12 +10928,6 @@ M: Arnaud Patard <arnaud.patard@rtp-net.org>
S: Odd Fixes
F: drivers/staging/xgifb/
-HFI1 DRIVER
-M: Mike Marciniszyn <infinipath@intel.com>
-L: linux-rdma@vger.kernel.org
-S: Supported
-F: drivers/staging/rdma/hfi1
-
STARFIRE/DURALAN NETWORK DRIVER
M: Ion Badulescu <ionut@badula.org>
S: Odd Fixes
@@ -11295,6 +11308,7 @@ F: drivers/platform/x86/thinkpad_acpi.c
TI BANDGAP AND THERMAL DRIVER
M: Eduardo Valentin <edubezval@gmail.com>
+M: Keerthy <j-keerthy@ti.com>
L: linux-pm@vger.kernel.org
L: linux-omap@vger.kernel.org
S: Maintained
diff --git a/Makefile b/Makefile
index 0f9cb36d45c2..0f70de63cfdb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
VERSION = 4
-PATCHLEVEL = 6
+PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION =
-NAME = Charred Weasel
+EXTRAVERSION = -rc1
+NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -128,6 +128,10 @@ _all:
# Cancel implicit rules on top Makefile
$(CURDIR)/Makefile Makefile: ;
+ifneq ($(words $(subst :, ,$(CURDIR))), 1)
+ $(error main directory cannot contain spaces nor colons)
+endif
+
ifneq ($(KBUILD_OUTPUT),)
# Invoke a second make in the output directory, passing relevant variables
# check that the output directory actually exists
@@ -142,7 +146,7 @@ PHONY += $(MAKECMDGOALS) sub-make
$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@:
-sub-make: FORCE
+sub-make:
$(Q)$(MAKE) -C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR) \
-f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
@@ -364,7 +368,7 @@ AFLAGS_MODULE =
LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
-CFLAGS_GCOV = -fprofile-arcs -ftest-coverage
+CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im -Wno-maybe-uninitialized
CFLAGS_KCOV = -fsanitize-coverage=trace-pc
@@ -617,7 +621,11 @@ KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os $(call cc-disable-warning,maybe-uninitialized,)
else
-KBUILD_CFLAGS += -O2
+ifdef CONFIG_PROFILE_ALL_BRANCHES
+KBUILD_CFLAGS += -O2 $(call cc-disable-warning,maybe-uninitialized,)
+else
+KBUILD_CFLAGS += -O2
+endif
endif
# Tell gcc to never replace conditional load with a non-conditional one
@@ -697,9 +705,10 @@ KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
else
-# This warning generated too much noise in a regular build.
-# Use make W=1 to enable this warning (see scripts/Makefile.build)
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.build)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
endif
ifdef CONFIG_FRAME_POINTER
@@ -926,27 +935,41 @@ export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) arch Doc
vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
-# Final link of vmlinux
- cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
-quiet_cmd_link-vmlinux = LINK $@
-
-# Include targets which we want to
-# execute if the rest of the kernel build went well.
-vmlinux: scripts/link-vmlinux.sh $(vmlinux-deps) FORCE
+# Include targets which we want to execute sequentially if the rest of the
+# kernel build went well. If CONFIG_TRIM_UNUSED_KSYMS is set, this might be
+# evaluated more than once.
+PHONY += vmlinux_prereq
+vmlinux_prereq: $(vmlinux-deps) FORCE
ifdef CONFIG_HEADERS_CHECK
$(Q)$(MAKE) -f $(srctree)/Makefile headers_check
endif
-ifdef CONFIG_SAMPLES
- $(Q)$(MAKE) $(build)=samples
-endif
ifdef CONFIG_BUILD_DOCSRC
$(Q)$(MAKE) $(build)=Documentation
endif
ifdef CONFIG_GDB_SCRIPTS
$(Q)ln -fsn `cd $(srctree) && /bin/pwd`/scripts/gdb/vmlinux-gdb.py
endif
+ifdef CONFIG_TRIM_UNUSED_KSYMS
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh \
+ "$(MAKE) KBUILD_MODULES=1 -f $(srctree)/Makefile vmlinux_prereq"
+endif
+
+# standalone target for easier testing
+include/generated/autoksyms.h: FORCE
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/adjust_autoksyms.sh true
+
+# Final link of vmlinux
+ cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
+quiet_cmd_link-vmlinux = LINK $@
+
+vmlinux: scripts/link-vmlinux.sh vmlinux_prereq $(vmlinux-deps) FORCE
+$(call if_changed,link-vmlinux)
+# Build samples along the rest of the kernel
+ifdef CONFIG_SAMPLES
+vmlinux-dirs += samples
+endif
+
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
@@ -998,10 +1021,12 @@ prepare2: prepare3 outputmakefile asm-generic
prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
include/config/auto.conf
$(cmd_crmodverdir)
+ $(Q)test -e include/generated/autoksyms.h || \
+ touch include/generated/autoksyms.h
archprepare: archheaders archscripts prepare1 scripts_basic
-prepare0: archprepare FORCE
+prepare0: archprepare
$(Q)$(MAKE) $(build)=.
# All the preparing..
@@ -1061,7 +1086,7 @@ INSTALL_FW_PATH=$(INSTALL_MOD_PATH)/lib/firmware
export INSTALL_FW_PATH
PHONY += firmware_install
-firmware_install: FORCE
+firmware_install:
@mkdir -p $(objtree)/firmware
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.fwinst obj=firmware __fw_install
@@ -1081,7 +1106,7 @@ PHONY += archscripts
archscripts:
PHONY += __headers
-__headers: $(version_h) scripts_basic asm-generic archheaders archscripts FORCE
+__headers: $(version_h) scripts_basic asm-generic archheaders archscripts
$(Q)$(MAKE) $(build)=scripts build_unifdef
PHONY += headers_install_all
@@ -1192,7 +1217,8 @@ else # CONFIG_MODULES
# Modules not configured
# ---------------------------------------------------------------------------
-modules modules_install: FORCE
+PHONY += modules modules_install
+modules modules_install:
@echo >&2
@echo >&2 "The present kernel configuration has modules disabled."
@echo >&2 "Type 'make config' and enable loadable module support."
@@ -1283,6 +1309,7 @@ boards := $(sort $(notdir $(boards)))
board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig))
board-dirs := $(sort $(notdir $(board-dirs:/=)))
+PHONY += help
help:
@echo 'Cleaning targets:'
@echo ' clean - Remove most generated files but keep the config and'
@@ -1453,6 +1480,7 @@ $(clean-dirs):
clean: rm-dirs := $(MODVERDIR)
clean: rm-files := $(KBUILD_EXTMOD)/Module.symvers
+PHONY += help
help:
@echo ' Building external modules.'
@echo ' Syntax: make -C path/to/kernel/src M=$$PWD target'
diff --git a/arch/Kconfig b/arch/Kconfig
index b16e74e4b5af..d794384a0404 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -598,6 +598,14 @@ config HAVE_STACK_VALIDATION
Architecture supports the 'objtool check' host tool command, which
performs compile-time stack metadata validation.
+config HAVE_ARCH_HASH
+ bool
+ default n
+ help
+ If this is set, the architecture provides an <asm/hash.h>
+ file which provides platform-specific implementations of some
+ functions in <linux/hash.h> or fs/namei.c.
+
#
# ABI hall of shame
#
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 8b134cfe5e1f..6fd48021324b 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -48,7 +48,7 @@ struct arc_callchain_trace {
static int callchain_trace(unsigned int addr, void *data)
{
struct arc_callchain_trace *ctrl = data;
- struct perf_callchain_entry *entry = ctrl->perf_stuff;
+ struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
perf_callchain_store(entry, addr);
if (ctrl->depth++ < 3)
@@ -58,7 +58,7 @@ static int callchain_trace(unsigned int addr, void *data)
}
void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct arc_callchain_trace ctrl = {
.depth = 0,
@@ -69,7 +69,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
}
void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
/*
* User stack can't be unwound trivially with kernel dwarf unwinder
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 446705a4325a..5be33a2d59a9 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -82,7 +82,6 @@ $(obj)/uImage: $(obj)/zImage FORCE
$(obj)/bootp/bootp: $(obj)/zImage initrd FORCE
$(Q)$(MAKE) $(build)=$(obj)/bootp $@
- @:
$(obj)/bootpImage: $(obj)/bootp/bootp FORCE
$(call if_changed,objcopy)
diff --git a/arch/arm/boot/bootp/Makefile b/arch/arm/boot/bootp/Makefile
index 5761f0039133..5e4acd253b30 100644
--- a/arch/arm/boot/bootp/Makefile
+++ b/arch/arm/boot/bootp/Makefile
@@ -17,7 +17,6 @@ targets := bootp init.o kernel.o initrd.o
# Note that bootp.lds picks up kernel.o and initrd.o
$(obj)/bootp: $(src)/bootp.lds $(addprefix $(obj)/,init.o kernel.o initrd.o) FORCE
$(call if_changed,ld)
- @:
# kernel.o and initrd.o includes a binary image using
# .incbin, a dependency which is not tracked automatically
@@ -26,4 +25,4 @@ $(obj)/kernel.o: arch/arm/boot/zImage FORCE
$(obj)/initrd.o: $(INITRD) FORCE
-PHONY += $(INITRD) FORCE
+PHONY += $(INITRD)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 0df6b1fc9655..96387d477e91 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -41,6 +41,8 @@
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
+#define KVM_REQ_VCPU_EXIT 8
+
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -226,6 +228,10 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+void kvm_arm_halt_guest(struct kvm *kvm);
+void kvm_arm_resume_guest(struct kvm *kvm);
+void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
+void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
index d8e90c8cb5fa..f3a7de71f515 100644
--- a/arch/arm/include/asm/kvm_mmio.h
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -28,6 +28,9 @@ struct kvm_decode {
bool sign_extend;
};
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
+
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 27563befa8a2..22bf1f64d99a 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -31,7 +31,7 @@ struct frame_tail {
*/
static struct frame_tail __user *
user_backtrace(struct frame_tail __user *tail,
- struct perf_callchain_entry *entry)
+ struct perf_callchain_entry_ctx *entry)
{
struct frame_tail buftail;
unsigned long err;
@@ -59,7 +59,7 @@ user_backtrace(struct frame_tail __user *tail,
}
void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct frame_tail __user *tail;
@@ -75,7 +75,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
- while ((entry->nr < sysctl_perf_event_max_stack) &&
+ while ((entry->nr < entry->max_stack) &&
tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, entry);
}
@@ -89,13 +89,13 @@ static int
callchain_trace(struct stackframe *fr,
void *data)
{
- struct perf_callchain_entry *entry = data;
+ struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, fr->pc);
return 0;
}
void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct stackframe fr;
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 95a000515e43..02abfff68ee5 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -46,6 +46,13 @@ config KVM_ARM_HOST
---help---
Provides host support for ARM processors.
+config KVM_NEW_VGIC
+ bool "New VGIC implementation"
+ depends on KVM
+ default y
+ ---help---
+ uses the new VGIC implementation
+
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index eb1bf4309c13..a596b58f6d37 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -21,7 +21,18 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
+
+ifeq ($(CONFIG_KVM_NEW_VGIC),y)
+obj-y += $(KVM)/arm/vgic/vgic.o
+obj-y += $(KVM)/arm/vgic/vgic-init.o
+obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
+obj-y += $(KVM)/arm/vgic/vgic-v2.o
+obj-y += $(KVM)/arm/vgic/vgic-mmio.o
+obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
+obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
+else
obj-y += $(KVM)/arm/vgic.o
obj-y += $(KVM)/arm/vgic-v2.o
obj-y += $(KVM)/arm/vgic-v2-emul.o
+endif
obj-y += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 237d5d82f0af..893941ec98dc 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -455,7 +455,7 @@ static void update_vttbr(struct kvm *kvm)
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- int ret;
+ int ret = 0;
if (likely(vcpu->arch.has_run_once))
return 0;
@@ -478,9 +478,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* interrupts from the virtual timer with a userspace gic.
*/
if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
- kvm_timer_enable(kvm);
+ ret = kvm_timer_enable(vcpu);
- return 0;
+ return ret;
}
bool kvm_arch_intc_initialized(struct kvm *kvm)
@@ -488,30 +488,37 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
return vgic_initialized(kvm);
}
-static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused;
-static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused;
-
-static void kvm_arm_halt_guest(struct kvm *kvm)
+void kvm_arm_halt_guest(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
vcpu->arch.pause = true;
- force_vm_exit(cpu_all_mask);
+ kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
+}
+
+void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pause = true;
+ kvm_vcpu_kick(vcpu);
}
-static void kvm_arm_resume_guest(struct kvm *kvm)
+void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
+{
+ struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+
+ vcpu->arch.pause = false;
+ swake_up(wq);
+}
+
+void kvm_arm_resume_guest(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;
- kvm_for_each_vcpu(i, vcpu, kvm) {
- struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
-
- vcpu->arch.pause = false;
- swake_up(wq);
- }
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_arm_resume_vcpu(vcpu);
}
static void vcpu_sleep(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 0f6600f05137..10f80a6c797a 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -23,7 +23,7 @@
#include "trace.h"
-static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
{
void *datap = NULL;
union {
@@ -55,7 +55,7 @@ static void mmio_write_buf(char *buf, unsigned int len, unsigned long data)
memcpy(buf, datap, len);
}
-static unsigned long mmio_read_buf(char *buf, unsigned int len)
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
{
unsigned long data = 0;
union {
@@ -66,7 +66,7 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len)
switch (len) {
case 1:
- data = buf[0];
+ data = *(u8 *)buf;
break;
case 2:
memcpy(&tmp.hword, buf, len);
@@ -87,11 +87,10 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len)
/**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
+ * or in-kernel IO emulation
+ *
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
- *
- * This should only be called after returning from userspace for MMIO load
- * emulation.
*/
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@@ -104,7 +103,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (len > sizeof(unsigned long))
return -EINVAL;
- data = mmio_read_buf(run->mmio.data, len);
+ data = kvm_mmio_read_buf(run->mmio.data, len);
if (vcpu->arch.mmio_decode.sign_extend &&
len < sizeof(unsigned long)) {
@@ -190,7 +189,7 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
len);
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
- mmio_write_buf(data_buf, len, data);
+ kvm_mmio_write_buf(data_buf, len, data);
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
data_buf);
@@ -206,18 +205,19 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
- if (is_write)
- memcpy(run->mmio.data, data_buf, len);
if (!ret) {
/* We handled the access successfully in the kernel. */
+ if (!is_write)
+ memcpy(run->mmio.data, data_buf, len);
vcpu->stat.mmio_exit_kernel++;
kvm_handle_mmio_return(vcpu, run);
return 1;
- } else {
- vcpu->stat.mmio_exit_user++;
}
+ if (is_write)
+ memcpy(run->mmio.data, data_buf, len);
+ vcpu->stat.mmio_exit_user++;
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 1160434eece0..59a8fa7b8a3b 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -74,5 +74,5 @@ $(MODLIB)/vdso: FORCE
@mkdir -p $(MODLIB)/vdso
PHONY += vdso_install
-vdso_install: $(obj)/vdso.so.dbg $(MODLIB)/vdso FORCE
+vdso_install: $(obj)/vdso.so.dbg $(MODLIB)/vdso
$(call cmd,vdso_install)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index e63d23bad36e..49095fc4b482 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -43,6 +43,8 @@
#define KVM_VCPU_MAX_FEATURES 4
+#define KVM_REQ_VCPU_EXIT 8
+
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
int kvm_arch_dev_ioctl_check_extension(long ext);
@@ -327,6 +329,10 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
+void kvm_arm_halt_guest(struct kvm *kvm);
+void kvm_arm_resume_guest(struct kvm *kvm);
+void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
+void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
u64 __kvm_call_hyp(void *hypfn, ...);
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index fe612a962576..75ea42079757 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -30,6 +30,9 @@ struct kvm_decode {
bool sign_extend;
};
+void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
+unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
+
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 32c3c6e70119..713ca824f266 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -31,7 +31,7 @@ struct frame_tail {
*/
static struct frame_tail __user *
user_backtrace(struct frame_tail __user *tail,
- struct perf_callchain_entry *entry)
+ struct perf_callchain_entry_ctx *entry)
{
struct frame_tail buftail;
unsigned long err;
@@ -76,7 +76,7 @@ struct compat_frame_tail {
static struct compat_frame_tail __user *
compat_user_backtrace(struct compat_frame_tail __user *tail,
- struct perf_callchain_entry *entry)
+ struct perf_callchain_entry_ctx *entry)
{
struct compat_frame_tail buftail;
unsigned long err;
@@ -106,7 +106,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
}
#endif /* CONFIG_COMPAT */
-void perf_callchain_user(struct perf_callchain_entry *entry,
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
@@ -122,7 +122,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
tail = (struct frame_tail __user *)regs->regs[29];
- while (entry->nr < sysctl_perf_event_max_stack &&
+ while (entry->nr < entry->max_stack &&
tail && !((unsigned long)tail & 0xf))
tail = user_backtrace(tail, entry);
} else {
@@ -132,7 +132,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
- while ((entry->nr < sysctl_perf_event_max_stack) &&
+ while ((entry->nr < entry->max_stack) &&
tail && !((unsigned long)tail & 0x3))
tail = compat_user_backtrace(tail, entry);
#endif
@@ -146,12 +146,12 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
*/
static int callchain_trace(struct stackframe *frame, void *data)
{
- struct perf_callchain_entry *entry = data;
+ struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, frame->pc);
return 0;
}
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct stackframe frame;
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index aa2e34e99582..c4f26ef91e77 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -54,6 +54,13 @@ config KVM_ARM_PMU
Adds support for a virtual Performance Monitoring Unit (PMU) in
virtual machines.
+config KVM_NEW_VGIC
+ bool "New VGIC implementation"
+ depends on KVM
+ default y
+ ---help---
+ uses the new VGIC implementation
+
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 122cff482ac4..a7a958ca29d5 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -20,10 +20,22 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
+ifeq ($(CONFIG_KVM_NEW_VGIC),y)
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o
+else
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
+endif
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 4d1ac81870d2..e9e0e6db73f6 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -162,7 +162,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
if (!is_iabt)
- esr |= ESR_ELx_EC_DABT_LOW;
+ esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
}
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index aa232de2d4bc..3ae852507e57 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -20,6 +20,7 @@ config H8300
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_ARCH_KGDB
+ select HAVE_ARCH_HASH
select CPU_NO_EFFICIENT_FFS
config RWSEM_GENERIC_SPINLOCK
diff --git a/arch/h8300/boot/compressed/Makefile b/arch/h8300/boot/compressed/Makefile
index 7643633f1330..613bfe6f5272 100644
--- a/arch/h8300/boot/compressed/Makefile
+++ b/arch/h8300/boot/compressed/Makefile
@@ -23,7 +23,6 @@ LDFLAGS_vmlinux := -Ttext $(IMAGE_OFFSET) -estartup -T $(obj)/vmlinux.lds \
$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE
$(call if_changed,ld)
- @:
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/h8300/include/asm/hash.h b/arch/h8300/include/asm/hash.h
new file mode 100644
index 000000000000..04cfbd2bd850
--- /dev/null
+++ b/arch/h8300/include/asm/hash.h
@@ -0,0 +1,53 @@
+#ifndef _ASM_HASH_H
+#define _ASM_HASH_H
+
+/*
+ * The later H8SX models have a 32x32-bit multiply, but the H8/300H
+ * and H8S have only 16x16->32. Since it's tolerably compact, this is
+ * basically an inlined version of the __mulsi3 code. Since the inputs
+ * are not expected to be small, it's also simplfied by skipping the
+ * early-out checks.
+ *
+ * (Since neither CPU has any multi-bit shift instructions, a
+ * shift-and-add version is a non-starter.)
+ *
+ * TODO: come up with an arch-specific version of the hashing in fs/namei.c,
+ * since that is heavily dependent on rotates. Which, as mentioned, suck
+ * horribly on H8.
+ */
+
+#if defined(CONFIG_CPU_H300H) || defined(CONFIG_CPU_H8S)
+
+#define HAVE_ARCH__HASH_32 1
+
+/*
+ * Multiply by k = 0x61C88647. Fitting this into three registers requires
+ * one extra instruction, but reducing register pressure will probably
+ * make that back and then some.
+ *
+ * GCC asm note: %e1 is the high half of operand %1, while %f1 is the
+ * low half. So if %1 is er4, then %e1 is e4 and %f1 is r4.
+ *
+ * This has been designed to modify x in place, since that's the most
+ * common usage, but preserve k, since hash_64() makes two calls in
+ * quick succession.
+ */
+static inline u32 __attribute_const__ __hash_32(u32 x)
+{
+ u32 temp;
+
+ asm( "mov.w %e1,%f0"
+ "\n mulxu.w %f2,%0" /* klow * xhigh */
+ "\n mov.w %f0,%e1" /* The extra instruction */
+ "\n mov.w %f1,%f0"
+ "\n mulxu.w %e2,%0" /* khigh * xlow */
+ "\n add.w %e1,%f0"
+ "\n mulxu.w %f2,%1" /* klow * xlow */
+ "\n add.w %f0,%e1"
+ : "=&r" (temp), "=r" (x)
+ : "%r" (GOLDEN_RATIO_32), "1" (x));
+ return x;
+}
+
+#endif
+#endif /* _ASM_HASH_H */
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 970d0bd99621..c100d780f1eb 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -95,8 +95,8 @@ define archhelp
echo '* unwcheck - Check vmlinux for invalid unwind info'
endef
-archprepare: make_nr_irqs_h FORCE
-PHONY += make_nr_irqs_h FORCE
+archprepare: make_nr_irqs_h
+PHONY += make_nr_irqs_h
-make_nr_irqs_h: FORCE
+make_nr_irqs_h:
$(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile
index 01729c2979ba..0606a727aab2 100644
--- a/arch/m32r/boot/compressed/Makefile
+++ b/arch/m32r/boot/compressed/Makefile
@@ -19,7 +19,6 @@ LDFLAGS_vmlinux := -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(obj)/piggy.o FORCE
$(call if_changed,ld)
- @:
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 8ace920ca24a..967260f2eb1c 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -41,6 +41,7 @@ config M68000
select CPU_HAS_NO_UNALIGNED
select GENERIC_CSUM
select CPU_NO_EFFICIENT_FFS
+ select HAVE_ARCH_HASH
help
The Freescale (was Motorola) 68000 CPU is the first generation of
the well known M68K family of processors. The CPU core as well as
diff --git a/arch/m68k/include/asm/hash.h b/arch/m68k/include/asm/hash.h
new file mode 100644
index 000000000000..6407af84a994
--- /dev/null
+++ b/arch/m68k/include/asm/hash.h
@@ -0,0 +1,59 @@
+#ifndef _ASM_HASH_H
+#define _ASM_HASH_H
+
+/*
+ * If CONFIG_M68000=y (original mc68000/010), this file is #included
+ * to work around the lack of a MULU.L instruction.
+ */
+
+#define HAVE_ARCH__HASH_32 1
+/*
+ * While it would be legal to substitute a different hash operation
+ * entirely, let's keep it simple and just use an optimized multiply
+ * by GOLDEN_RATIO_32 = 0x61C88647.
+ *
+ * The best way to do that appears to be to multiply by 0x8647 with
+ * shifts and adds, and use mulu.w to multiply the high half by 0x61C8.
+ *
+ * Because the 68000 has multi-cycle shifts, this addition chain is
+ * chosen to minimise the shift distances.
+ *
+ * Despite every attempt to spoon-feed it simple operations, GCC
+ * 6.1.1 doggedly insists on doing annoying things like converting
+ * "lsl.l #2,<reg>" (12 cycles) to two adds (8+8 cycles).
+ *
+ * It also likes to notice two shifts in a row, like "a = x << 2" and
+ * "a <<= 7", and convert that to "a = x << 9". But shifts longer
+ * than 8 bits are extra-slow on m68k, so that's a lose.
+ *
+ * Since the 68000 is a very simple in-order processor with no
+ * instruction scheduling effects on execution time, we can safely
+ * take it out of GCC's hands and write one big asm() block.
+ *
+ * Without calling overhead, this operation is 30 bytes (14 instructions
+ * plus one immediate constant) and 166 cycles.
+ *
+ * (Because %2 is fetched twice, it can't be postincrement, and thus it
+ * can't be a fully general "g" or "m". Register is preferred, but
+ * offsettable memory or immediate will work.)
+ */
+static inline u32 __attribute_const__ __hash_32(u32 x)
+{
+ u32 a, b;
+
+ asm( "move.l %2,%0" /* a = x * 0x0001 */
+ "\n lsl.l #2,%0" /* a = x * 0x0004 */
+ "\n move.l %0,%1"
+ "\n lsl.l #7,%0" /* a = x * 0x0200 */
+ "\n add.l %2,%0" /* a = x * 0x0201 */
+ "\n add.l %0,%1" /* b = x * 0x0205 */
+ "\n add.l %0,%0" /* a = x * 0x0402 */
+ "\n add.l %0,%1" /* b = x * 0x0607 */
+ "\n lsl.l #5,%0" /* a = x * 0x8040 */
+ : "=&d,d" (a), "=&r,r" (b)
+ : "r,roi?" (x)); /* a+b = x*0x8647 */
+
+ return ((u16)(x*0x61c8) << 16) + a + b;
+}
+
+#endif /* _ASM_HASH_H */
diff --git a/arch/metag/kernel/perf_callchain.c b/arch/metag/kernel/perf_callchain.c
index 252abc12a5a3..3e8e048040df 100644
--- a/arch/metag/kernel/perf_callchain.c
+++ b/arch/metag/kernel/perf_callchain.c
@@ -29,7 +29,7 @@ static bool is_valid_call(unsigned long calladdr)
static struct metag_frame __user *
user_backtrace(struct metag_frame __user *user_frame,
- struct perf_callchain_entry *entry)
+ struct perf_callchain_entry_ctx *entry)
{
struct metag_frame frame;
unsigned long calladdr;
@@ -56,7 +56,7 @@ user_backtrace(struct metag_frame __user *user_frame,
}
void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
unsigned long sp = regs->ctx.AX[0].U0;
struct metag_frame __user *frame;
@@ -65,7 +65,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
--frame;
- while ((entry->nr < sysctl_perf_event_max_stack) && frame)
+ while ((entry->nr < entry->max_stack) && frame)
frame = user_backtrace(frame, entry);
}
@@ -78,13 +78,13 @@ static int
callchain_trace(struct stackframe *fr,
void *data)
{
- struct perf_callchain_entry *entry = data;
+ struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, fr->pc);
return 0;
}
void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct stackframe fr;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index f17c3a4fb697..636e0720fb20 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -16,6 +16,7 @@ config MICROBLAZE
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
+ select HAVE_ARCH_HASH
select HAVE_ARCH_KGDB
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
diff --git a/arch/microblaze/include/asm/hash.h b/arch/microblaze/include/asm/hash.h
new file mode 100644
index 000000000000..753513ae8cb0
--- /dev/null
+++ b/arch/microblaze/include/asm/hash.h
@@ -0,0 +1,81 @@
+#ifndef _ASM_HASH_H
+#define _ASM_HASH_H
+
+/*
+ * Fortunately, most people who want to run Linux on Microblaze enable
+ * both multiplier and barrel shifter, but omitting them is technically
+ * a supported configuration.
+ *
+ * With just a barrel shifter, we can implement an efficient constant
+ * multiply using shifts and adds. GCC can find a 9-step solution, but
+ * this 6-step solution was found by Yevgen Voronenko's implementation
+ * of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html.
+ *
+ * That software is really not designed for a single multiplier this large,
+ * but if you run it enough times with different seeds, it'll find several
+ * 6-shift, 6-add sequences for computing x * 0x61C88647. They are all
+ * c = (x << 19) + x;
+ * a = (x << 9) + c;
+ * b = (x << 23) + a;
+ * return (a<<11) + (b<<6) + (c<<3) - b;
+ * with variations on the order of the final add.
+ *
+ * Without even a shifter, it's hopless; any hash function will suck.
+ */
+
+#if CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL == 0
+
+#define HAVE_ARCH__HASH_32 1
+
+/* Multiply by GOLDEN_RATIO_32 = 0x61C88647 */
+static inline u32 __attribute_const__ __hash_32(u32 a)
+{
+#if CONFIG_XILINX_MICROBLAZE0_USE_BARREL
+ unsigned int b, c;
+
+ /* Phase 1: Compute three intermediate values */
+ b = a << 23;
+ c = (a << 19) + a;
+ a = (a << 9) + c;
+ b += a;
+
+ /* Phase 2: Compute (a << 11) + (b << 6) + (c << 3) - b */
+ a <<= 5;
+ a += b; /* (a << 5) + b */
+ a <<= 3;
+ a += c; /* (a << 8) + (b << 3) + c */
+ a <<= 3;
+ return a - b; /* (a << 11) + (b << 6) + (c << 3) - b */
+#else
+ /*
+ * "This is really going to hurt."
+ *
+ * Without a barrel shifter, left shifts are implemented as
+ * repeated additions, and the best we can do is an optimal
+ * addition-subtraction chain. This one is not known to be
+ * optimal, but at 37 steps, it's decent for a 31-bit multiplier.
+ *
+ * Question: given its size (37*4 = 148 bytes per instance),
+ * and slowness, is this worth having inline?
+ */
+ unsigned int b, c, d;
+
+ b = a << 4; /* 4 */
+ c = b << 1; /* 1 5 */
+ b += a; /* 1 6 */
+ c += b; /* 1 7 */
+ c <<= 3; /* 3 10 */
+ c -= a; /* 1 11 */
+ d = c << 7; /* 7 18 */
+ d += b; /* 1 19 */
+ d <<= 8; /* 8 27 */
+ d += a; /* 1 28 */
+ d <<= 1; /* 1 29 */
+ d += b; /* 1 30 */
+ d <<= 6; /* 6 36 */
+ return d + c; /* 1 37 total instructions*/
+#endif
+}
+
+#endif /* !CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL */
+#endif /* _ASM_HASH_H */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 46938847e794..ac91939b9b75 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -398,6 +398,7 @@ config MACH_PISTACHIO
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_MIPS_CPS
select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_RELOCATABLE
select SYS_SUPPORTS_ZBOOT
select SYS_HAS_EARLY_PRINTK
select USE_GENERIC_EARLY_PRINTK_8250
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 4a9c8f2a72d6..f6ae6ed9c4b1 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -5,7 +5,7 @@
#size-cells = <1>;
compatible = "ingenic,jz4740";
- cpuintc: interrupt-controller@0 {
+ cpuintc: interrupt-controller {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/mt7620a.dtsi b/arch/mips/boot/dts/ralink/mt7620a.dtsi
index 08bf24fefe9f..793c0c7ca921 100644
--- a/arch/mips/boot/dts/ralink/mt7620a.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7620a.dtsi
@@ -9,7 +9,7 @@
};
};
- cpuintc: cpuintc@0 {
+ cpuintc: cpuintc {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt2880.dtsi b/arch/mips/boot/dts/ralink/rt2880.dtsi
index 182afde2f2e1..fb2faef0ab79 100644
--- a/arch/mips/boot/dts/ralink/rt2880.dtsi
+++ b/arch/mips/boot/dts/ralink/rt2880.dtsi
@@ -9,7 +9,7 @@
};
};
- cpuintc: cpuintc@0 {
+ cpuintc: cpuintc {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt3050.dtsi b/arch/mips/boot/dts/ralink/rt3050.dtsi
index e3203d414fee..d3cb57f985da 100644
--- a/arch/mips/boot/dts/ralink/rt3050.dtsi
+++ b/arch/mips/boot/dts/ralink/rt3050.dtsi
@@ -9,7 +9,7 @@
};
};
- cpuintc: cpuintc@0 {
+ cpuintc: cpuintc {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt3883.dtsi b/arch/mips/boot/dts/ralink/rt3883.dtsi
index 3b131dd0d5ac..3d6fc9afdaf6 100644
--- a/arch/mips/boot/dts/ralink/rt3883.dtsi
+++ b/arch/mips/boot/dts/ralink/rt3883.dtsi
@@ -9,7 +9,7 @@
};
};
- cpuintc: cpuintc@0 {
+ cpuintc: cpuintc {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
index 686ebd11386d..48d21127c3f3 100644
--- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
+++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
@@ -10,7 +10,7 @@
reg = <0x0 0x08000000>;
};
- cpuintc: interrupt-controller@0 {
+ cpuintc: interrupt-controller {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index dff88aa7e377..33aab89259f3 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -384,7 +384,7 @@ static int octeon_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
octeon_update_boot_vector(cpu);
break;
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 6741673c92ca..56584a659183 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,6 +19,28 @@
#include <asm/asmmacro-64.h>
#endif
+/*
+ * Helper macros for generating raw instruction encodings.
+ */
+#ifdef CONFIG_CPU_MICROMIPS
+ .macro insn32_if_mm enc
+ .insn
+ .hword ((\enc) >> 16)
+ .hword ((\enc) & 0xffff)
+ .endm
+
+ .macro insn_if_mips enc
+ .endm
+#else
+ .macro insn32_if_mm enc
+ .endm
+
+ .macro insn_if_mips enc
+ .insn
+ .word (\enc)
+ .endm
+#endif
+
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
.macro local_irq_enable reg=t0
ei
@@ -341,38 +363,6 @@
.endm
#else
-#ifdef CONFIG_CPU_MICROMIPS
-#define CFC_MSA_INSN 0x587e0056
-#define CTC_MSA_INSN 0x583e0816
-#define LDB_MSA_INSN 0x58000807
-#define LDH_MSA_INSN 0x58000817
-#define LDW_MSA_INSN 0x58000827
-#define LDD_MSA_INSN 0x58000837
-#define STB_MSA_INSN 0x5800080f
-#define STH_MSA_INSN 0x5800081f
-#define STW_MSA_INSN 0x5800082f
-#define STD_MSA_INSN 0x5800083f
-#define COPY_SW_MSA_INSN 0x58b00056
-#define COPY_SD_MSA_INSN 0x58b80056
-#define INSERT_W_MSA_INSN 0x59300816
-#define INSERT_D_MSA_INSN 0x59380816
-#else
-#define CFC_MSA_INSN 0x787e0059
-#define CTC_MSA_INSN 0x783e0819
-#define LDB_MSA_INSN 0x78000820
-#define LDH_MSA_INSN 0x78000821
-#define LDW_MSA_INSN 0x78000822
-#define LDD_MSA_INSN 0x78000823
-#define STB_MSA_INSN 0x78000824
-#define STH_MSA_INSN 0x78000825
-#define STW_MSA_INSN 0x78000826
-#define STD_MSA_INSN 0x78000827
-#define COPY_SW_MSA_INSN 0x78b00059
-#define COPY_SD_MSA_INSN 0x78b80059
-#define INSERT_W_MSA_INSN 0x79300819
-#define INSERT_D_MSA_INSN 0x79380819
-#endif
-
/*
* Temporary until all toolchains in use include MSA support.
*/
@@ -380,8 +370,8 @@
.set push
.set noat
SET_HARDFLOAT
- .insn
- .word CFC_MSA_INSN | (\cs << 11)
+ insn_if_mips 0x787e0059 | (\cs << 11)
+ insn32_if_mm 0x587e0056 | (\cs << 11)
move \rd, $1
.set pop
.endm
@@ -391,7 +381,8 @@
.set noat
SET_HARDFLOAT
move $1, \rs
- .word CTC_MSA_INSN | (\cd << 6)
+ insn_if_mips 0x783e0819 | (\cd << 6)
+ insn32_if_mm 0x583e0816 | (\cd << 6)
.set pop
.endm
@@ -400,7 +391,8 @@
.set noat
SET_HARDFLOAT
PTR_ADDU $1, \base, \off
- .word LDB_MSA_INSN | (\wd << 6)
+ insn_if_mips 0x78000820 | (\wd << 6)
+ insn32_if_mm 0x58000807 | (\wd << 6)
.set pop
.endm
@@ -409,7 +401,8 @@
.set noat
SET_HARDFLOAT
PTR_ADDU $1, \base, \off
- .word LDH_MSA_INSN | (\wd << 6)
+ insn_if_mips 0x78000821 | (\wd << 6)
+ insn32_if_mm 0x58000817 | (\wd << 6)
.set pop
.endm
@@ -418,7 +411,8 @@
.set noat
SET_HARDFLOAT
PTR_ADDU $1, \base, \off
- .word LDW_MSA_INSN | (\wd << 6)
+ insn_if_mips 0x78000822 | (\wd << 6)
+ insn32_if_mm 0x58000827 | (\wd << 6)
.set pop
.endm
@@ -427,7 +421,8 @@
.set noat
SET_HARDFLOAT
PTR_ADDU $1, \base, \off
- .word LDD_MSA_INSN | (\wd << 6)
+ insn_if_mips 0x78000823 | (\wd << 6)
+ insn32_if_mm 0x58000837 | (\wd << 6)
.set pop
.endm
@@ -436,7 +431,8 @@
.set noat
SET_HARDFLOAT
PTR_ADDU $1, \base, \off
- .word STB_MSA_INSN | (\wd << 6)
+ insn_if_mips 0x78000824 | (\wd << 6)
+ insn32_if_mm 0x5800080f | (\wd << 6)
.set pop
.endm
@@ -445,7 +441,8 @@
.set noat
SET_HARDFLOAT
PTR_ADDU $1, \base, \off
- .word STH_MSA_INSN | (\wd << 6)
+ insn_if_mips 0x78000825 | (\wd << 6)
+ insn32_if_mm 0x5800081f | (\wd << 6)
.set pop
.endm
@@ -454,7 +451,8 @@
.set noat
SET_HARDFLOAT
PTR_ADDU $1, \base, \off
- .word STW_MSA_INSN | (\wd << 6)
+ insn_if_mips 0x78000826 | (\wd << 6)
+ insn32_if_mm 0x5800082f | (\wd << 6)
.set pop
.endm
@@ -463,7 +461,8 @@
.set noat
SET_HARDFLOAT
PTR_ADDU $1, \base, \off
- .word STD_MSA_INSN | (\wd << 6)
+ insn_if_mips 0x78000827 | (\wd << 6)
+ insn32_if_mm 0x5800083f | (\wd << 6)
.set pop
.endm
@@ -471,8 +470,8 @@
.set push
.set noat
SET_HARDFLOAT
- .insn
- .word COPY_SW_MSA_INSN | (\n << 16) | (\ws << 11)
+ insn_if_mips 0x78b00059 | (\n << 16) | (\ws << 11)
+ insn32_if_mm 0x58b00056 | (\n << 16) | (\ws << 11)
.set pop
.endm
@@ -480,8 +479,8 @@
.set push
.set noat
SET_HARDFLOAT
- .insn
- .word COPY_SD_MSA_INSN | (\n << 16) | (\ws << 11)
+ insn_if_mips 0x78b80059 | (\n << 16) | (\ws << 11)
+ insn32_if_mm 0x58b80056 | (\n << 16) | (\ws << 11)
.set pop
.endm
@@ -489,7 +488,8 @@
.set push
.set noat
SET_HARDFLOAT
- .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
+ insn_if_mips 0x79300819 | (\n << 16) | (\wd << 6)
+ insn32_if_mm 0x59300816 | (\n << 16) | (\wd << 6)
.set pop
.endm
@@ -497,7 +497,8 @@
.set push
.set noat
SET_HARDFLOAT
- .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
+ insn_if_mips 0x79380819 | (\n << 16) | (\wd << 6)
+ insn32_if_mm 0x59380816 | (\n << 16) | (\wd << 6)
.set pop
.endm
#endif
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index dbb1eb6e284f..e0fecf206f2c 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -58,8 +58,8 @@
* address of a label as argument to inline assembler. Gas otoh has the
* annoying difference between la and dla which are only usable for 32-bit
* rsp. 64-bit code, so can't be used without conditional compilation.
- * The alterantive is switching the assembler to 64-bit code which happens
- * to work right even for 32-bit code ...
+ * The alternative is switching the assembler to 64-bit code which happens
+ * to work right even for 32-bit code...
*/
#define instruction_hazard() \
do { \
@@ -133,8 +133,8 @@ do { \
* address of a label as argument to inline assembler. Gas otoh has the
* annoying difference between la and dla which are only usable for 32-bit
* rsp. 64-bit code, so can't be used without conditional compilation.
- * The alterantive is switching the assembler to 64-bit code which happens
- * to work right even for 32-bit code ...
+ * The alternative is switching the assembler to 64-bit code which happens
+ * to work right even for 32-bit code...
*/
#define __instruction_hazard() \
do { \
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index ca8077afac4a..456ddba152c4 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -100,7 +100,7 @@ typedef volatile struct au1xxx_ddma_desc {
u32 dscr_nxtptr; /* Next descriptor pointer (mostly) */
/*
* First 32 bytes are HW specific!!!
- * Lets have some SW data following -- make sure it's 32 bytes.
+ * Let's have some SW data following -- make sure it's 32 bytes.
*/
u32 sw_status;
u32 sw_context;
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
index ce02894271c6..d607d643b973 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
@@ -140,7 +140,7 @@ static inline int au1300_gpio_getinitlvl(unsigned int gpio)
* Cases 1 and 3 are intended for boards which want to provide their own
* GPIO namespace and -operations (i.e. for example you have 8 GPIOs
* which are in part provided by spare Au1300 GPIO pins and in part by
-* an external FPGA but you still want them to be accssible in linux
+* an external FPGA but you still want them to be accessible in linux
* as gpio0-7. The board can of course use the alchemy_gpioX_* functions
* as required).
*/
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
index 466fc85899f4..c4e856f27040 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
@@ -22,7 +22,7 @@ struct bcm63xx_enet_platform_data {
int has_phy_interrupt;
int phy_interrupt;
- /* if has_phy, use autonegociated pause parameters or force
+ /* if has_phy, use autonegotiated pause parameters or force
* them */
int pause_auto;
int pause_rx;
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index 1daa64412569..04d862020ac9 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -64,7 +64,7 @@ static inline void plat_post_dma_flush(struct device *dev)
static inline int plat_device_is_coherent(struct device *dev)
{
- return 1; /* IP27 non-cohernet mode is unsupported */
+ return 1; /* IP27 non-coherent mode is unsupported */
}
#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index 0a0b0e2ced60..7bdf212587a0 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -86,7 +86,7 @@ static inline void plat_post_dma_flush(struct device *dev)
static inline int plat_device_is_coherent(struct device *dev)
{
- return 0; /* IP32 is non-cohernet */
+ return 0; /* IP32 is non-coherent */
}
#endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
index 7023883ca50f..8e9b022c3594 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -22,7 +22,7 @@
/*
* during early_printk no ioremap possible at this early stage
- * lets use KSEG1 instead
+ * let's use KSEG1 instead
*/
#define LTQ_ASC0_BASE_ADDR 0x1E100C00
#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index f87310755319..17b41bb5991f 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -75,7 +75,7 @@ extern __iomem void *ltq_cgu_membase;
/*
* during early_printk no ioremap is possible
- * lets use KSEG1 instead
+ * let's use KSEG1 instead
*/
#define LTQ_ASC1_BASE_ADDR 0x1E100C00
#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
diff --git a/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h b/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
index 4431fc54a36c..74230d0ca98b 100644
--- a/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
+++ b/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
@@ -24,7 +24,7 @@ struct temp_range {
u8 level;
};
-#define CONSTANT_SPEED_POLICY 0 /* at constent speed */
+#define CONSTANT_SPEED_POLICY 0 /* at constant speed */
#define STEP_SPEED_POLICY 1 /* use up/down arrays to describe policy */
#define KERNEL_HELPER_POLICY 2 /* kernel as a helper to fan control */
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 0cf8622db27f..ab03eb3fadac 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -56,7 +56,7 @@
(0 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
or t0, t2
- mtc0 t0, $5, 2
+ mtc0 t0, CP0_SEGCTL0
/* SegCtl1 */
li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
@@ -67,7 +67,7 @@
(0 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
ins t0, t1, 16, 3
- mtc0 t0, $5, 3
+ mtc0 t0, CP0_SEGCTL1
/* SegCtl2 */
li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
@@ -77,7 +77,7 @@
(4 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
or t0, t2
- mtc0 t0, $5, 4
+ mtc0 t0, CP0_SEGCTL2
jal mips_ihb
mfc0 t0, $16, 5
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index f6ba004a7711..aa4cca060e0a 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -1,5 +1,5 @@
/*
- * Definitions and decalrations for MIPS MT support that are common between
+ * Definitions and declarations for MIPS MT support that are common between
* the VSMP, and AP/SP kernel models.
*/
#ifndef __ASM_MIPS_MT_H
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 25d01577d0b5..e1ca65c62f6a 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -48,6 +48,9 @@
#define CP0_CONF $3
#define CP0_CONTEXT $4
#define CP0_PAGEMASK $5
+#define CP0_SEGCTL0 $5, 2
+#define CP0_SEGCTL1 $5, 3
+#define CP0_SEGCTL2 $5, 4
#define CP0_WIRED $6
#define CP0_INFO $7
#define CP0_HWRENA $7, 0
@@ -726,6 +729,8 @@
#define MIPS_PWFIELD_PTEI_SHIFT 0
#define MIPS_PWFIELD_PTEI_MASK 0x0000003f
+#define MIPS_PWSIZE_PS_SHIFT 30
+#define MIPS_PWSIZE_PS_MASK 0x40000000
#define MIPS_PWSIZE_GDW_SHIFT 24
#define MIPS_PWSIZE_GDW_MASK 0x3f000000
#define MIPS_PWSIZE_UDW_SHIFT 18
@@ -739,6 +744,12 @@
#define MIPS_PWCTL_PWEN_SHIFT 31
#define MIPS_PWCTL_PWEN_MASK 0x80000000
+#define MIPS_PWCTL_XK_SHIFT 28
+#define MIPS_PWCTL_XK_MASK 0x10000000
+#define MIPS_PWCTL_XS_SHIFT 27
+#define MIPS_PWCTL_XS_MASK 0x08000000
+#define MIPS_PWCTL_XU_SHIFT 26
+#define MIPS_PWCTL_XU_MASK 0x04000000
#define MIPS_PWCTL_DPH_SHIFT 7
#define MIPS_PWCTL_DPH_MASK 0x00000080
#define MIPS_PWCTL_HUGEPG_SHIFT 6
@@ -1046,6 +1057,33 @@ static inline int mm_insn_16bit(u16 insn)
}
/*
+ * Helper macros for generating raw instruction encodings in inline asm.
+ */
+#ifdef CONFIG_CPU_MICROMIPS
+#define _ASM_INSN16_IF_MM(_enc) \
+ ".insn\n\t" \
+ ".hword (" #_enc ")\n\t"
+#define _ASM_INSN32_IF_MM(_enc) \
+ ".insn\n\t" \
+ ".hword ((" #_enc ") >> 16)\n\t" \
+ ".hword ((" #_enc ") & 0xffff)\n\t"
+#else
+#define _ASM_INSN_IF_MIPS(_enc) \
+ ".insn\n\t" \
+ ".word (" #_enc ")\n\t"
+#endif
+
+#ifndef _ASM_INSN16_IF_MM
+#define _ASM_INSN16_IF_MM(_enc)
+#endif
+#ifndef _ASM_INSN32_IF_MM
+#define _ASM_INSN32_IF_MM(_enc)
+#endif
+#ifndef _ASM_INSN_IF_MIPS
+#define _ASM_INSN_IF_MIPS(_enc)
+#endif
+
+/*
* TLB Invalidate Flush
*/
static inline void tlbinvf(void)
@@ -1053,7 +1091,9 @@ static inline void tlbinvf(void)
__asm__ __volatile__(
".set push\n\t"
".set noreorder\n\t"
- ".word 0x42000004\n\t" /* tlbinvf */
+ "# tlbinvf\n\t"
+ _ASM_INSN_IF_MIPS(0x42000004)
+ _ASM_INSN32_IF_MM(0x0000537c)
".set pop");
}
@@ -1274,9 +1314,9 @@ do { \
" .set push \n" \
" .set noat \n" \
" .set mips32r2 \n" \
- " .insn \n" \
" # mfhc0 $1, %1 \n" \
- " .word (0x40410000 | ((%1 & 0x1f) << 11)) \n" \
+ _ASM_INSN_IF_MIPS(0x40410000 | ((%1 & 0x1f) << 11)) \
+ _ASM_INSN32_IF_MM(0x002000f4 | ((%1 & 0x1f) << 16)) \
" move %0, $1 \n" \
" .set pop \n" \
: "=r" (__res) \
@@ -1292,8 +1332,8 @@ do { \
" .set mips32r2 \n" \
" move $1, %0 \n" \
" # mthc0 $1, %1 \n" \
- " .insn \n" \
- " .word (0x40c10000 | ((%1 & 0x1f) << 11)) \n" \
+ _ASM_INSN_IF_MIPS(0x40c10000 | ((%1 & 0x1f) << 11)) \
+ _ASM_INSN32_IF_MM(0x002002f4 | ((%1 & 0x1f) << 16)) \
" .set pop \n" \
: \
: "r" (value), "i" (register)); \
@@ -1743,7 +1783,8 @@ do { \
".set\tpush\n\t" \
".set\tnoat\n\t" \
"# mfgc0\t$1, $%1, %2\n\t" \
- ".word\t(0x40610000 | %1 << 11 | %2)\n\t" \
+ _ASM_INSN_IF_MIPS(0x40610000 | %1 << 11 | %2) \
+ _ASM_INSN32_IF_MM(0x002004fc | %1 << 16 | %2 << 11) \
"move\t%0, $1\n\t" \
".set\tpop" \
: "=r" (__res) \
@@ -1757,7 +1798,8 @@ do { \
".set\tpush\n\t" \
".set\tnoat\n\t" \
"# dmfgc0\t$1, $%1, %2\n\t" \
- ".word\t(0x40610100 | %1 << 11 | %2)\n\t" \
+ _ASM_INSN_IF_MIPS(0x40610100 | %1 << 11 | %2) \
+ _ASM_INSN32_IF_MM(0x582004fc | %1 << 16 | %2 << 11) \
"move\t%0, $1\n\t" \
".set\tpop" \
: "=r" (__res) \
@@ -1770,9 +1812,10 @@ do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
- "move\t$1, %0\n\t" \
+ "move\t$1, %z0\n\t" \
"# mtgc0\t$1, $%1, %2\n\t" \
- ".word\t(0x40610200 | %1 << 11 | %2)\n\t" \
+ _ASM_INSN_IF_MIPS(0x40610200 | %1 << 11 | %2) \
+ _ASM_INSN32_IF_MM(0x002006fc | %1 << 16 | %2 << 11) \
".set\tpop" \
: : "Jr" ((unsigned int)(value)), \
"i" (register), "i" (sel)); \
@@ -1783,9 +1826,10 @@ do { \
__asm__ __volatile__( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
- "move\t$1, %0\n\t" \
+ "move\t$1, %z0\n\t" \
"# dmtgc0\t$1, $%1, %2\n\t" \
- ".word\t(0x40610300 | %1 << 11 | %2)\n\t" \
+ _ASM_INSN_IF_MIPS(0x40610300 | %1 << 11 | %2) \
+ _ASM_INSN32_IF_MM(0x582006fc | %1 << 16 | %2 << 11) \
".set\tpop" \
: : "Jr" (value), \
"i" (register), "i" (sel)); \
@@ -2246,7 +2290,6 @@ do { \
#else
-#ifdef CONFIG_CPU_MICROMIPS
#define rddsp(mask) \
({ \
unsigned int __res; \
@@ -2255,8 +2298,8 @@ do { \
" .set push \n" \
" .set noat \n" \
" # rddsp $1, %x1 \n" \
- " .hword ((0x0020067c | (%x1 << 14)) >> 16) \n" \
- " .hword ((0x0020067c | (%x1 << 14)) & 0xffff) \n" \
+ _ASM_INSN_IF_MIPS(0x7c000cb8 | (%x1 << 16)) \
+ _ASM_INSN32_IF_MM(0x0020067c | (%x1 << 14)) \
" move %0, $1 \n" \
" .set pop \n" \
: "=r" (__res) \
@@ -2271,22 +2314,22 @@ do { \
" .set noat \n" \
" move $1, %0 \n" \
" # wrdsp $1, %x1 \n" \
- " .hword ((0x0020167c | (%x1 << 14)) >> 16) \n" \
- " .hword ((0x0020167c | (%x1 << 14)) & 0xffff) \n" \
+ _ASM_INSN_IF_MIPS(0x7c2004f8 | (%x1 << 11)) \
+ _ASM_INSN32_IF_MM(0x0020167c | (%x1 << 14)) \
" .set pop \n" \
: \
: "r" (val), "i" (mask)); \
} while (0)
-#define _umips_dsp_mfxxx(ins) \
+#define _dsp_mfxxx(ins) \
({ \
unsigned long __treg; \
\
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " .hword 0x0001 \n" \
- " .hword %x1 \n" \
+ _ASM_INSN_IF_MIPS(0x00000810 | %X1) \
+ _ASM_INSN32_IF_MM(0x0001007c | %x1) \
" move %0, $1 \n" \
" .set pop \n" \
: "=r" (__treg) \
@@ -2294,101 +2337,28 @@ do { \
__treg; \
})
-#define _umips_dsp_mtxxx(val, ins) \
+#define _dsp_mtxxx(val, ins) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " .hword 0x0001 \n" \
- " .hword %x1 \n" \
+ _ASM_INSN_IF_MIPS(0x00200011 | %X1) \
+ _ASM_INSN32_IF_MM(0x0001207c | %x1) \
" .set pop \n" \
: \
: "r" (val), "i" (ins)); \
} while (0)
-#define _umips_dsp_mflo(reg) _umips_dsp_mfxxx((reg << 14) | 0x107c)
-#define _umips_dsp_mfhi(reg) _umips_dsp_mfxxx((reg << 14) | 0x007c)
-
-#define _umips_dsp_mtlo(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x307c))
-#define _umips_dsp_mthi(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x207c))
-
-#define mflo0() _umips_dsp_mflo(0)
-#define mflo1() _umips_dsp_mflo(1)
-#define mflo2() _umips_dsp_mflo(2)
-#define mflo3() _umips_dsp_mflo(3)
-
-#define mfhi0() _umips_dsp_mfhi(0)
-#define mfhi1() _umips_dsp_mfhi(1)
-#define mfhi2() _umips_dsp_mfhi(2)
-#define mfhi3() _umips_dsp_mfhi(3)
+#ifdef CONFIG_CPU_MICROMIPS
-#define mtlo0(x) _umips_dsp_mtlo(x, 0)
-#define mtlo1(x) _umips_dsp_mtlo(x, 1)
-#define mtlo2(x) _umips_dsp_mtlo(x, 2)
-#define mtlo3(x) _umips_dsp_mtlo(x, 3)
+#define _dsp_mflo(reg) _dsp_mfxxx((reg << 14) | 0x1000)
+#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 14) | 0x0000)
-#define mthi0(x) _umips_dsp_mthi(x, 0)
-#define mthi1(x) _umips_dsp_mthi(x, 1)
-#define mthi2(x) _umips_dsp_mthi(x, 2)
-#define mthi3(x) _umips_dsp_mthi(x, 3)
+#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x1000))
+#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x0000))
#else /* !CONFIG_CPU_MICROMIPS */
-#define rddsp(mask) \
-({ \
- unsigned int __res; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # rddsp $1, %x1 \n" \
- " .word 0x7c000cb8 | (%x1 << 16) \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__res) \
- : "i" (mask)); \
- __res; \
-})
-
-#define wrdsp(val, mask) \
-do { \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " move $1, %0 \n" \
- " # wrdsp $1, %x1 \n" \
- " .word 0x7c2004f8 | (%x1 << 11) \n" \
- " .set pop \n" \
- : \
- : "r" (val), "i" (mask)); \
-} while (0)
-
-#define _dsp_mfxxx(ins) \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " .word (0x00000810 | %1) \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg) \
- : "i" (ins)); \
- __treg; \
-})
-
-#define _dsp_mtxxx(val, ins) \
-do { \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " move $1, %0 \n" \
- " .word (0x00200011 | %1) \n" \
- " .set pop \n" \
- : \
- : "r" (val), "i" (ins)); \
-} while (0)
#define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002)
#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000)
@@ -2396,6 +2366,8 @@ do { \
#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002))
#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000))
+#endif /* CONFIG_CPU_MICROMIPS */
+
#define mflo0() _dsp_mflo(0)
#define mflo1() _dsp_mflo(1)
#define mflo2() _dsp_mflo(2)
@@ -2416,7 +2388,6 @@ do { \
#define mthi2(x) _dsp_mthi(x, 2)
#define mthi3(x) _dsp_mthi(x, 3)
-#endif /* CONFIG_CPU_MICROMIPS */
#endif
/*
@@ -2556,28 +2527,32 @@ static inline void guest_tlb_probe(void)
{
__asm__ __volatile__(
"# tlbgp\n\t"
- ".word 0x42000010");
+ _ASM_INSN_IF_MIPS(0x42000010)
+ _ASM_INSN32_IF_MM(0x0000017c));
}
static inline void guest_tlb_read(void)
{
__asm__ __volatile__(
"# tlbgr\n\t"
- ".word 0x42000009");
+ _ASM_INSN_IF_MIPS(0x42000009)
+ _ASM_INSN32_IF_MM(0x0000117c));
}
static inline void guest_tlb_write_indexed(void)
{
__asm__ __volatile__(
"# tlbgwi\n\t"
- ".word 0x4200000a");
+ _ASM_INSN_IF_MIPS(0x4200000a)
+ _ASM_INSN32_IF_MM(0x0000217c));
}
static inline void guest_tlb_write_random(void)
{
__asm__ __volatile__(
"# tlbgwr\n\t"
- ".word 0x4200000e");
+ _ASM_INSN_IF_MIPS(0x4200000e)
+ _ASM_INSN32_IF_MM(0x0000317c));
}
/*
@@ -2587,7 +2562,8 @@ static inline void guest_tlbinvf(void)
{
__asm__ __volatile__(
"# tlbginvf\n\t"
- ".word 0x4200000c");
+ _ASM_INSN_IF_MIPS(0x4200000c)
+ _ASM_INSN32_IF_MM(0x0000517c));
}
#endif /* !TOOLCHAIN_SUPPORTS_VIRT */
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index 6e4effa6f626..ddf496cb2a2a 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -192,13 +192,6 @@ static inline void write_msa_##name(unsigned int val) \
* allow compilation with toolchains that do not support MSA. Once all
* toolchains in use support MSA these can be removed.
*/
-#ifdef CONFIG_CPU_MICROMIPS
-#define CFC_MSA_INSN 0x587e0056
-#define CTC_MSA_INSN 0x583e0816
-#else
-#define CFC_MSA_INSN 0x787e0059
-#define CTC_MSA_INSN 0x783e0819
-#endif
#define __BUILD_MSA_CTL_REG(name, cs) \
static inline unsigned int read_msa_##name(void) \
@@ -207,11 +200,12 @@ static inline unsigned int read_msa_##name(void) \
__asm__ __volatile__( \
" .set push\n" \
" .set noat\n" \
- " .insn\n" \
- " .word %1 | (" #cs " << 11)\n" \
+ " # cfcmsa $1, $%1\n" \
+ _ASM_INSN_IF_MIPS(0x787e0059 | %1 << 11) \
+ _ASM_INSN32_IF_MM(0x587e0056 | %1 << 11) \
" move %0, $1\n" \
" .set pop\n" \
- : "=r"(reg) : "i"(CFC_MSA_INSN)); \
+ : "=r"(reg) : "i"(cs)); \
return reg; \
} \
\
@@ -221,10 +215,11 @@ static inline void write_msa_##name(unsigned int val) \
" .set push\n" \
" .set noat\n" \
" move $1, %0\n" \
- " .insn\n" \
- " .word %1 | (" #cs " << 6)\n" \
+ " # ctcmsa $%1, $1\n" \
+ _ASM_INSN_IF_MIPS(0x783e0819 | %1 << 6) \
+ _ASM_INSN32_IF_MM(0x583e0816 | %1 << 6) \
" .set pop\n" \
- : : "r"(val), "i"(CTC_MSA_INSN)); \
+ : : "r"(val), "i"(cs)); \
}
#endif /* !TOOLCHAIN_SUPPORTS_MSA */
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 8d05d9069823..a07a36f7d814 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -146,7 +146,7 @@ typedef struct {
* This structure contains the global state of all command queues.
* It is stored in a bootmem named block and shared by all
* applications running on Octeon. Tickets are stored in a differnet
- * cahce line that queue information to reduce the contention on the
+ * cache line that queue information to reduce the contention on the
* ll/sc used to get a ticket. If this is not the case, the update
* of queue state causes the ll/sc to fail quite often.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index 893320375aef..cda93aee712c 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -94,7 +94,7 @@ extern int cvmx_helper_board_get_mii_address(int ipd_port);
* @phy_addr: The address of the PHY to program
* @link_flags:
* Flags to control autonegotiation. Bit 0 is autonegotiation
- * enable/disable to maintain backware compatibility.
+ * enable/disable to maintain backward compatibility.
* @link_info: Link speed to program. If the speed is zero and autonegotiation
* is enabled, all possible negotiation speeds are advertised.
*
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd.h b/arch/mips/include/asm/octeon/cvmx-ipd.h
index e13490ebbb27..cbdc14b77435 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd.h
@@ -39,7 +39,7 @@
enum cvmx_ipd_mode {
CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
- CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
+ CVMX_IPD_OPC_MODE_STF = 1LL, /* All blocks into L2 */
CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
};
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index 51531563f8dc..410bb70e5aac 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -2051,7 +2051,7 @@ static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
}
/**
- * Descchedules the current work queue entry.
+ * Deschedules the current work queue entry.
*
* @no_sched: no schedule flag value to be set on the work queue
* entry. If this is set the entry will not be
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
index 4a9c99050c13..c0e3dc0293a7 100644
--- a/arch/mips/include/asm/sgi/hpc3.h
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -39,7 +39,7 @@ struct hpc3_pbus_dmacregs {
volatile u32 pbdma_dptr; /* pbus dma channel desc ptr */
u32 _unused0[0x1000/4 - 2]; /* padding */
volatile u32 pbdma_ctrl; /* pbus dma channel control register has
- * copletely different meaning for read
+ * completely different meaning for read
* compared with write */
/* read */
#define HPC3_PDMACTRL_INT 0x00000001 /* interrupt (cleared after read) */
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index ceca6cc41b2b..6dc3f1fdaccc 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -481,7 +481,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/*
* OK we are here either because we hit a NAL
* instruction or because we are emulating an
- * old bltzal{,l} one. Lets figure out what the
+ * old bltzal{,l} one. Let's figure out what the
* case really is.
*/
if (!insn.i_format.rs) {
@@ -515,7 +515,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/*
* OK we are here either because we hit a BAL
* instruction or because we are emulating an
- * old bgezal{,l} one. Lets figure out what the
+ * old bgezal{,l} one. Let's figure out what the
* case really is.
*/
if (!insn.i_format.rs) {
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 51b98dc371b3..59476a607add 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -441,6 +441,21 @@ LEAF(mips_cps_boot_vpes)
mfc0 t0, CP0_CONFIG
mttc0 t0, CP0_CONFIG
+ /*
+ * Copy the EVA config from this VPE if the CPU supports it.
+ * CONFIG3 must exist to be running MT startup - just read it.
+ */
+ mfc0 t0, CP0_CONFIG, 3
+ and t0, t0, MIPS_CONF3_SC
+ beqz t0, 3f
+ nop
+ mfc0 t0, CP0_SEGCTL0
+ mttc0 t0, CP0_SEGCTL0
+ mfc0 t0, CP0_SEGCTL1
+ mttc0 t0, CP0_SEGCTL1
+ mfc0 t0, CP0_SEGCTL2
+ mttc0 t0, CP0_SEGCTL2
+3:
/* Ensure no software interrupts are pending */
mttc0 zero, CP0_CAUSE
mttc0 zero, CP0_STATUS
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5ac5c3e23460..a88d44247cc8 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -833,10 +833,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_MAAR;
if (config5 & MIPS_CONF5_LLB)
c->options |= MIPS_CPU_RW_LLB;
-#ifdef CONFIG_XPA
if (config5 & MIPS_CONF5_MVH)
- c->options |= MIPS_CPU_XPA;
-#endif
+ c->options |= MIPS_CPU_MVH;
if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
c->options |= MIPS_CPU_VP;
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index c3c234dc0c07..891f5ee63983 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -88,7 +88,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
- /* Lets see if this is an O32 ELF */
+ /* Let's see if this is an O32 ELF */
if (elf32) {
if (flags & EF_MIPS_FP64) {
/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 8eb5af805964..f25f7eab7307 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -54,6 +54,9 @@ void __init init_IRQ(void)
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
+ if (cpu_has_veic)
+ clear_c0_status(ST0_IM);
+
arch_init_irq();
}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 625ee770b1aa..7ff2a557f4aa 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -2202,7 +2202,7 @@ fpu_emul:
}
/*
- * Lets not return to userland just yet. It's constly and
+ * Let's not return to userland just yet. It's costly and
* it's likely we have more R2 instructions to emulate
*/
if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index 5021c546ad07..d64056e0bb56 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -25,8 +25,8 @@
* the user stack callchains, we will add it here.
*/
-static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
- unsigned long reg29)
+static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
+ unsigned long reg29)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
@@ -35,14 +35,14 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
addr = *sp++;
if (__kernel_text_address(addr)) {
perf_callchain_store(entry, addr);
- if (entry->nr >= sysctl_perf_event_max_stack)
+ if (entry->nr >= entry->max_stack)
break;
}
}
}
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
@@ -59,7 +59,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
}
do {
perf_callchain_store(entry, pc);
- if (entry->nr >= sysctl_perf_event_max_stack)
+ if (entry->nr >= entry->max_stack)
break;
pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 411c971e3417..813ed7829c61 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -345,7 +345,7 @@ static int get_frame_info(struct mips_frame_info *info)
return 0;
if (info->pc_offset < 0) /* leaf */
return 1;
- /* prologue seems boggus... */
+ /* prologue seems bogus... */
err:
return -1;
}
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index ab042291fbfd..ae4231452115 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -770,15 +770,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *oldset = sigmask_to_save();
int ret;
struct mips_abi *abi = current->thread.abi;
-#ifdef CONFIG_CPU_MICROMIPS
- void *vdso;
- unsigned long tmp = (unsigned long)current->mm->context.vdso;
-
- set_isa16_mode(tmp);
- vdso = (void *)tmp;
-#else
void *vdso = current->mm->context.vdso;
-#endif
if (regs->regs[0]) {
switch(regs->regs[2]) {
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 1061bd2e7e9c..4ed36f288d64 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -359,8 +359,12 @@ static void cps_init_secondary(void)
BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
}
- change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
- STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
+ if (cpu_has_veic)
+ clear_c0_status(ST0_IM);
+ else
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
+ STATUSF_IP4 | STATUSF_IP5 |
+ STATUSF_IP6 | STATUSF_IP7);
}
static void cps_smp_finish(void)
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index b42095880667..27533c109f92 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -43,7 +43,7 @@ static int pvc_line_proc_show(struct seq_file *m, void *v)
{
int lineno = *(int *)m->private;
- if (lineno < 0 || lineno > PVC_NLINES) {
+ if (lineno < 0 || lineno >= PVC_NLINES) {
printk(KERN_WARNING "proc_read_line: invalid lineno %d\n", lineno);
return 0;
}
@@ -67,7 +67,7 @@ static ssize_t pvc_line_proc_write(struct file *file, const char __user *buf,
char kbuf[PVC_LINELEN];
size_t len;
- BUG_ON(lineno < 0 || lineno > PVC_NLINES);
+ BUG_ON(lineno < 0 || lineno >= PVC_NLINES);
len = min(count, sizeof(kbuf) - 1);
if (copy_from_user(kbuf, buf, len))
diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
index beb80f316095..927dc94a030f 100644
--- a/arch/mips/lib/ashldi3.c
+++ b/arch/mips/lib/ashldi3.c
@@ -2,7 +2,7 @@
#include "libgcc.h"
-long long __ashldi3(long long u, word_type b)
+long long notrace __ashldi3(long long u, word_type b)
{
DWunion uu, w;
word_type bm;
diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
index c884a912b660..9fdf1a598428 100644
--- a/arch/mips/lib/ashrdi3.c
+++ b/arch/mips/lib/ashrdi3.c
@@ -2,7 +2,7 @@
#include "libgcc.h"
-long long __ashrdi3(long long u, word_type b)
+long long notrace __ashrdi3(long long u, word_type b)
{
DWunion uu, w;
word_type bm;
diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
index 77e5f9c1f005..e3e77aa52c95 100644
--- a/arch/mips/lib/bswapdi.c
+++ b/arch/mips/lib/bswapdi.c
@@ -1,6 +1,6 @@
#include <linux/module.h>
-unsigned long long __bswapdi2(unsigned long long u)
+unsigned long long notrace __bswapdi2(unsigned long long u)
{
return (((u) & 0xff00000000000000ull) >> 56) |
(((u) & 0x00ff000000000000ull) >> 40) |
diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
index 2b302ff121d2..530a8afe6fda 100644
--- a/arch/mips/lib/bswapsi.c
+++ b/arch/mips/lib/bswapsi.c
@@ -1,6 +1,6 @@
#include <linux/module.h>
-unsigned int __bswapsi2(unsigned int u)
+unsigned int notrace __bswapsi2(unsigned int u)
{
return (((u) & 0xff000000) >> 24) |
(((u) & 0x00ff0000) >> 8) |
diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c
index 8c1306437ed1..06857da96993 100644
--- a/arch/mips/lib/cmpdi2.c
+++ b/arch/mips/lib/cmpdi2.c
@@ -2,7 +2,7 @@
#include "libgcc.h"
-word_type __cmpdi2(long long a, long long b)
+word_type notrace __cmpdi2(long long a, long long b)
{
const DWunion au = {
.ll = a
diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c
index dcf8d6810b7c..364547449c65 100644
--- a/arch/mips/lib/lshrdi3.c
+++ b/arch/mips/lib/lshrdi3.c
@@ -2,7 +2,7 @@
#include "libgcc.h"
-long long __lshrdi3(long long u, word_type b)
+long long notrace __lshrdi3(long long u, word_type b)
{
DWunion uu, w;
word_type bm;
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 9245e1705e69..6c303a94a196 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -256,7 +256,7 @@
/*
* Macro to build the __copy_user common code
- * Arguements:
+ * Arguments:
* mode : LEGACY_MODE or EVA_MODE
* from : Source operand. USEROP or KERNELOP
* to : Destination operand. USEROP or KERNELOP
diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c
index bb4cb2f828ea..bd599f58234c 100644
--- a/arch/mips/lib/ucmpdi2.c
+++ b/arch/mips/lib/ucmpdi2.c
@@ -2,7 +2,7 @@
#include "libgcc.h"
-word_type __ucmpdi2(unsigned long long a, unsigned long long b)
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b)
{
const DWunion au = {.ll = a};
const DWunion bu = {.ll = b};
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index a2631a52ca99..249039af66c4 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -212,7 +212,7 @@ static void hpet_setup(void)
/* set hpet base address */
smbus_write(SMBUS_PCI_REGB4, HPET_ADDR);
- /* enable decodeing of access to HPET MMIO*/
+ /* enable decoding of access to HPET MMIO*/
smbus_enable(SMBUS_PCI_REG40, (1 << 28));
/* HPET irq enable */
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index d4ceacd4fa12..47074887e64c 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -8,7 +8,7 @@
#include "ieee754.h"
/*
- * Emulate the arbritrary instruction ir at xcp->cp0_epc. Required when
+ * Emulate the arbitrary instruction ir at xcp->cp0_epc. Required when
* we have to emulate the instruction in a COP1 branch delay slot. Do
* not change cp0_epc due to the instruction
*
@@ -88,7 +88,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
fr = (struct emuframe __user *)
((regs->regs[29] - sizeof(struct emuframe)) & ~0x7);
- /* Verify that the stack pointer is not competely insane */
+ /* Verify that the stack pointer is not completely insane */
if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
return SIGBUS;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 274da90adf0d..4004b659ce50 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -2361,8 +2361,9 @@ static void print_htw_config(void)
(config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
config = read_c0_pwsize();
- pr_debug("PWSize (0x%0*lx): GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
+ pr_debug("PWSize (0x%0*lx): PS: 0x%lx GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
field, config,
+ (config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
(config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
(config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
(config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
@@ -2370,9 +2371,12 @@ static void print_htw_config(void)
(config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
pwctl = read_c0_pwctl();
- pr_debug("PWCtl (0x%x): PWEn: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
+ pr_debug("PWCtl (0x%x): PWEn: 0x%x XK: 0x%x XS: 0x%x XU: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
pwctl,
(pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
+ (pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
+ (pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
+ (pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
(pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
(pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
(pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
@@ -2427,15 +2431,25 @@ static void config_htw_params(void)
if (CONFIG_PGTABLE_LEVELS >= 3)
pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
- pwsize |= ilog2(sizeof(pte_t)/4) << MIPS_PWSIZE_PTEW_SHIFT;
+ /* Set pointer size to size of directory pointers */
+ if (config_enabled(CONFIG_64BIT))
+ pwsize |= MIPS_PWSIZE_PS_MASK;
+ /* PTEs may be multiple pointers long (e.g. with XPA) */
+ pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
+ & MIPS_PWSIZE_PTEW_MASK;
write_c0_pwsize(pwsize);
/* Make sure everything is set before we enable the HTW */
back_to_back_c0_hazard();
- /* Enable HTW and disable the rest of the pwctl fields */
+ /*
+ * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
+ * the pwctl fields.
+ */
config = 1 << MIPS_PWCTL_PWEN_SHIFT;
+ if (config_enabled(CONFIG_64BIT))
+ config |= MIPS_PWCTL_XU_MASK;
write_c0_pwctl(config);
pr_info("Hardware Page Table Walker enabled\n");
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
index 7c2da27ece04..a4e758a39af4 100644
--- a/arch/mips/oprofile/op_impl.h
+++ b/arch/mips/oprofile/op_impl.h
@@ -24,7 +24,7 @@ struct op_counter_config {
unsigned long unit_mask;
};
-/* Per-architecture configury and hooks. */
+/* Per-architecture configure and hooks. */
struct op_mips_model {
void (*reg_setup) (struct op_counter_config *);
void (*cpu_setup) (void *dummy);
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
index 438319465cb4..57e1463fcd02 100644
--- a/arch/mips/pci/ops-bridge.c
+++ b/arch/mips/pci/ops-bridge.c
@@ -33,9 +33,9 @@ static u32 emulate_ioc3_cfg(int where, int size)
* The Bridge ASIC supports both type 0 and type 1 access. Type 1 is
* not really documented, so right now I can't write code which uses it.
* Therefore we use type 0 accesses for now even though they won't work
- * correcly for PCI-to-PCI bridges.
+ * correctly for PCI-to-PCI bridges.
*
- * The function is complicated by the ultimate brokeness of the IOC3 chip
+ * The function is complicated by the ultimate brokenness of the IOC3 chip
* which is used in SGI systems. The IOC3 can only handle 32-bit PCI
* accesses and does only decode parts of it's address space.
*/
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index 956c92eabfab..ab79828230ab 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -83,12 +83,16 @@ static void __init plat_setup_iocoherency(void)
}
}
-void __init plat_mem_setup(void)
+void __init *plat_get_fdt(void)
{
if (fw_arg0 != -2)
panic("Device-tree not present");
+ return (void *)fw_arg1;
+}
- __dt_setup_arch((void *)fw_arg1);
+void __init plat_mem_setup(void)
+{
+ __dt_setup_arch(plat_get_fdt());
plat_setup_iocoherency();
}
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 88b82fe21ae6..d40edda0ca3b 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -188,6 +188,41 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = {
FUNC("gpio", 0, 11, 1),
};
+static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 30, 1),
+ FUNC("util", 2, 30, 1),
+ FUNC("gpio", 1, 30, 1),
+ FUNC("p4led_kn", 0, 30, 1),
+};
+
+static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 31, 1),
+ FUNC("util", 2, 31, 1),
+ FUNC("gpio", 1, 31, 1),
+ FUNC("p3led_kn", 0, 31, 1),
+};
+
+static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 32, 1),
+ FUNC("util", 2, 32, 1),
+ FUNC("gpio", 1, 32, 1),
+ FUNC("p2led_kn", 0, 32, 1),
+};
+
+static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 33, 1),
+ FUNC("util", 2, 33, 1),
+ FUNC("gpio", 1, 33, 1),
+ FUNC("p1led_kn", 0, 33, 1),
+};
+
+static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 34, 1),
+ FUNC("rsvd", 2, 34, 1),
+ FUNC("gpio", 1, 34, 1),
+ FUNC("p0led_kn", 0, 34, 1),
+};
+
static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
FUNC("rsvd", 3, 35, 1),
FUNC("rsvd", 2, 35, 1),
@@ -195,16 +230,61 @@ static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
FUNC("wled_kn", 0, 35, 1),
};
+static struct rt2880_pmx_func p4led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 39, 1),
+ FUNC("util", 2, 39, 1),
+ FUNC("gpio", 1, 39, 1),
+ FUNC("p4led_an", 0, 39, 1),
+};
+
+static struct rt2880_pmx_func p3led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 40, 1),
+ FUNC("util", 2, 40, 1),
+ FUNC("gpio", 1, 40, 1),
+ FUNC("p3led_an", 0, 40, 1),
+};
+
+static struct rt2880_pmx_func p2led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 41, 1),
+ FUNC("util", 2, 41, 1),
+ FUNC("gpio", 1, 41, 1),
+ FUNC("p2led_an", 0, 41, 1),
+};
+
+static struct rt2880_pmx_func p1led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 42, 1),
+ FUNC("util", 2, 42, 1),
+ FUNC("gpio", 1, 42, 1),
+ FUNC("p1led_an", 0, 42, 1),
+};
+
+static struct rt2880_pmx_func p0led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 43, 1),
+ FUNC("rsvd", 2, 43, 1),
+ FUNC("gpio", 1, 43, 1),
+ FUNC("p0led_an", 0, 43, 1),
+};
+
static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
- FUNC("rsvd", 3, 35, 1),
- FUNC("rsvd", 2, 35, 1),
- FUNC("gpio", 1, 35, 1),
- FUNC("wled_an", 0, 35, 1),
+ FUNC("rsvd", 3, 44, 1),
+ FUNC("rsvd", 2, 44, 1),
+ FUNC("gpio", 1, 44, 1),
+ FUNC("wled_an", 0, 44, 1),
};
#define MT7628_GPIO_MODE_MASK 0x3
+#define MT7628_GPIO_MODE_P4LED_KN 58
+#define MT7628_GPIO_MODE_P3LED_KN 56
+#define MT7628_GPIO_MODE_P2LED_KN 54
+#define MT7628_GPIO_MODE_P1LED_KN 52
+#define MT7628_GPIO_MODE_P0LED_KN 50
#define MT7628_GPIO_MODE_WLED_KN 48
+#define MT7628_GPIO_MODE_P4LED_AN 42
+#define MT7628_GPIO_MODE_P3LED_AN 40
+#define MT7628_GPIO_MODE_P2LED_AN 38
+#define MT7628_GPIO_MODE_P1LED_AN 36
+#define MT7628_GPIO_MODE_P0LED_AN 34
#define MT7628_GPIO_MODE_WLED_AN 32
#define MT7628_GPIO_MODE_PWM1 30
#define MT7628_GPIO_MODE_PWM0 28
@@ -223,9 +303,9 @@ static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
#define MT7628_GPIO_MODE_GPIO 0
static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
- GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_PWM1),
- GRP_G("pmw0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_PWM0),
GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_UART2),
@@ -251,8 +331,28 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
1, MT7628_GPIO_MODE_GPIO),
GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_WLED_AN),
+ GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P0LED_AN),
+ GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P1LED_AN),
+ GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P2LED_AN),
+ GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P3LED_AN),
+ GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P4LED_AN),
GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_WLED_KN),
+ GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P0LED_KN),
+ GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P1LED_KN),
+ GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P2LED_KN),
+ GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P3LED_KN),
+ GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P4LED_KN),
{ 0 }
};
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index 328ceb3c86ec..2abe016a0ffc 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -105,7 +105,7 @@ static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
prb.iprb_ff = force_fire_and_forget ? 1 : 0;
/*
- * Set the appropriate number of PIO cresits for the widget.
+ * Set the appropriate number of PIO credits for the widget.
*/
prb.iprb_xtalkctr = credits;
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index a2358b44420c..cfceaea92724 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -23,7 +23,7 @@ typedef unsigned long machreg_t;
static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/*
- * Lets see what else we need to do here. Set up sp, gp?
+ * Let's see what else we need to do here. Set up sp, gp?
*/
void nmi_dump(void)
{
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 20f582a2137a..4fe5678ba74d 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -67,7 +67,7 @@ static int xbow_probe(nasid_t nasid)
return -ENODEV;
/*
- * Okay, here's a xbow. Lets arbitrate and find
+ * Okay, here's a xbow. Let's arbitrate and find
* out if we should initialize it. Set enabled
* hub connected at highest or lowest widget as
* master.
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index a046b302623e..160b88000b4b 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -263,7 +263,7 @@ spurious_8259A_irq:
static int spurious_irq_mask;
/*
* At this point we can be sure the IRQ is spurious,
- * lets ACK and report it. [once per IRQ]
+ * let's ACK and report it. [once per IRQ]
*/
if (!(spurious_irq_mask & irqmask)) {
printk(KERN_DEBUG
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index b369509e9753..3b4538ec0102 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -5,10 +5,12 @@ obj-vdso-y := elf.o gettimeofday.o sigreturn.o
ccflags-vdso := \
$(filter -I%,$(KBUILD_CFLAGS)) \
$(filter -E%,$(KBUILD_CFLAGS)) \
+ $(filter -mmicromips,$(KBUILD_CFLAGS)) \
$(filter -march=%,$(KBUILD_CFLAGS))
cflags-vdso := $(ccflags-vdso) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
- -O2 -g -fPIC -fno-common -fno-builtin -G 0 -DDISABLE_BRANCH_PROFILING \
+ -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
+ -DDISABLE_BRANCH_PROFILING \
$(call cc-option, -fno-stack-protector)
aflags-vdso := $(ccflags-vdso) \
$(filter -I%,$(KBUILD_CFLAGS)) \
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
index 05302bfdd114..89bac9885695 100644
--- a/arch/mips/vr41xx/common/cmu.c
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001-2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <source@mvista.com>
- * Copuright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mn10300/boot/compressed/Makefile b/arch/mn10300/boot/compressed/Makefile
index 08a95e171685..5f56f9de1061 100644
--- a/arch/mn10300/boot/compressed/Makefile
+++ b/arch/mn10300/boot/compressed/Makefile
@@ -8,7 +8,6 @@ LDFLAGS_vmlinux := -Ttext $(CONFIG_KERNEL_ZIMAGE_BASE_ADDRESS) -e startup_32
$(obj)/vmlinux: $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o FORCE
$(call if_changed,ld)
- @:
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/nios2/boot/compressed/Makefile b/arch/nios2/boot/compressed/Makefile
index 5b0fb346d888..d5921c9a9726 100644
--- a/arch/nios2/boot/compressed/Makefile
+++ b/arch/nios2/boot/compressed/Makefile
@@ -11,7 +11,6 @@ LDFLAGS_vmlinux := -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS) $(obj)/piggy.o FORCE
$(call if_changed,ld)
- @:
LDFLAGS_piggy.o := -r --format binary --oformat elf32-littlenios2 -T
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 26d37e6f924e..0fc26714780a 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -47,7 +47,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
}
void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
unsigned long sp, next_sp;
unsigned long next_ip;
@@ -76,7 +76,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
next_ip = regs->nip;
lr = regs->link;
level = 0;
- perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+ perf_callchain_store_context(entry, PERF_CONTEXT_KERNEL);
} else {
if (level == 0)
@@ -232,7 +232,7 @@ static int sane_signal_64_frame(unsigned long sp)
puc == (unsigned long) &sf->uc;
}
-static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long sp, next_sp;
@@ -247,7 +247,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
- while (entry->nr < sysctl_perf_event_max_stack) {
+ while (entry->nr < entry->max_stack) {
fp = (unsigned long __user *) sp;
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
return;
@@ -274,7 +274,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
read_user_stack_64(&uregs[PT_R1], &sp))
return;
level = 0;
- perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_store_context(entry, PERF_CONTEXT_USER);
perf_callchain_store(entry, next_ip);
continue;
}
@@ -319,7 +319,7 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
return rc;
}
-static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
+static inline void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
}
@@ -439,7 +439,7 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
return mctx->mc_gregs;
}
-static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned int sp, next_sp;
@@ -453,7 +453,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
- while (entry->nr < sysctl_perf_event_max_stack) {
+ while (entry->nr < entry->max_stack) {
fp = (unsigned int __user *) (unsigned long) sp;
if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
return;
@@ -473,7 +473,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
read_user_stack_32(&uregs[PT_R1], &sp))
return;
level = 0;
- perf_callchain_store(entry, PERF_CONTEXT_USER);
+ perf_callchain_store_context(entry, PERF_CONTEXT_USER);
perf_callchain_store(entry, next_ip);
continue;
}
@@ -487,7 +487,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
}
void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
if (current_is_64bit())
perf_callchain_user_64(entry, regs);
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 0d112b94d91d..ff75d70f7285 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -143,7 +143,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
*/
static long
axon_ram_direct_access(struct block_device *device, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn)
+ void __pmem **kaddr, pfn_t *pfn, long size)
{
struct axon_ram_bank *bank = device->bd_disk->private_data;
loff_t offset = (loff_t)sector << AXON_RAM_SECTOR_SHIFT;
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index fac6ac9790fa..1dd210347e12 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -22,7 +22,6 @@ OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(OBJECTS)
$(call if_changed,ld)
- @:
sed-sizes := -e 's/^\([0-9a-fA-F]*\) . \(__bss_start\|_end\)$$/\#define SZ\2 0x\1/p'
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 0ac42cc4f880..d5ec71b2ed02 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -1,8 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,19 +12,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
@@ -55,7 +54,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
@@ -65,6 +63,15 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
+CONFIG_ZPOOL=m
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PCI=y
CONFIG_PCI_DEBUG=y
CONFIG_HOTPLUG_PCI=y
@@ -452,6 +459,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
@@ -537,6 +545,8 @@ CONFIG_DLM=m
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
CONFIG_FRAME_WARN=1024
CONFIG_READABLE_ASM=y
CONFIG_UNUSED_SYMBOLS=y
@@ -555,13 +565,17 @@ CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_VMACACHE=y
CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_PGFLAGS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
+CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y
+CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
@@ -596,6 +610,8 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y
@@ -607,7 +623,6 @@ CONFIG_TEST_STRING_HELPERS=y
CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
@@ -651,7 +666,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
@@ -664,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index a31dcd56f7c0..f46a35115d2d 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -1,8 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -13,17 +12,17 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PCI=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
@@ -530,6 +536,8 @@ CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
@@ -547,13 +555,13 @@ CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_BLK_DEV_IO_TRACE=y
# CONFIG_KPROBE_EVENT is not set
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_RBTREE_TEST=m
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
@@ -597,8 +605,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
@@ -610,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 7b73bf353345..ba0f2a58b8cd 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -1,8 +1,7 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -14,17 +13,17 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y
@@ -53,7 +52,6 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
@@ -62,6 +60,14 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PCI=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
@@ -447,6 +453,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
@@ -530,6 +537,8 @@ CONFIG_NLS_UTF8=m
CONFIG_DLM=m
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
@@ -546,11 +555,12 @@ CONFIG_FTRACE_SYSCALLS=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
@@ -594,8 +604,6 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_ZLIB=y
-CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
@@ -607,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_ASYMMETRIC_KEY_TYPE=m
+CONFIG_ASYMMETRIC_KEY_TYPE=y
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index 1719843a55a2..4366a3e3e754 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,5 +1,5 @@
# CONFIG_SWAP is not set
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -7,7 +7,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z196=y
CONFIG_TUNE_ZEC12=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
@@ -64,7 +63,6 @@ CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
-# CONFIG_STRICT_DEVMEM is not set
# CONFIG_PFAULT is not set
# CONFIG_S390_HYPFS_FS is not set
# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index e24f2af4c73b..3f571ea89509 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -1,8 +1,8 @@
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
+CONFIG_USELIB=y
CONFIG_AUDIT=y
-CONFIG_NO_HZ=y
+CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_TASKSTATS=y
CONFIG_TASK_DELAY_ACCT=y
@@ -11,19 +11,19 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_PIDS=y
-CONFIG_CGROUP_DEVICE=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
-CONFIG_CGROUP_PERF=y
+CONFIG_BLK_CGROUP=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
-CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y
@@ -44,7 +44,6 @@ CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
CONFIG_LIVEPATCH=y
-CONFIG_MARCH_Z196=y
CONFIG_NR_CPUS=256
CONFIG_NUMA=y
CONFIG_HZ_100=y
@@ -52,6 +51,14 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CLEANCACHE=y
+CONFIG_FRONTSWAP=y
+CONFIG_CMA=y
+CONFIG_ZSWAP=y
+CONFIG_ZBUD=m
+CONFIG_ZSMALLOC=m
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
@@ -61,7 +68,6 @@ CONFIG_UNIX=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
-# CONFIG_INET_LRO is not set
CONFIG_L2TP=m
CONFIG_L2TP_DEBUGFS=m
CONFIG_VLAN_8021Q=y
@@ -144,6 +150,9 @@ CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
CONFIG_UNUSED_SYMBOLS=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
@@ -158,20 +167,21 @@ CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PI_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_RCU_TRACE=y
CONFIG_LATENCYTOP=y
CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
-CONFIG_TRACER_SNAPSHOT=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
-# CONFIG_STRICT_DEVMEM is not set
CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
@@ -212,8 +222,6 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_ZLIB=m
-CONFIG_CRYPTO_LZO=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ANSI_CPRNG=m
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index c3e4099b60a5..87035fa58bbe 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -224,13 +224,13 @@ arch_initcall(service_level_perf_register);
static int __perf_callchain_kernel(void *data, unsigned long address)
{
- struct perf_callchain_entry *entry = data;
+ struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, address);
return 0;
}
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
if (user_mode(regs))
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7a3144017301..19288c1b36d3 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -250,6 +250,7 @@ static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
report_user_fault(regs, SIGSEGV, 1);
si.si_signo = SIGSEGV;
+ si.si_errno = 0;
si.si_code = si_code;
si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK);
force_sig_info(SIGSEGV, &si, current);
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index f010c93a88b1..fda605dbc1b4 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -37,7 +37,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* | | |
* +---------------+ |
* | 8 byte skbp | |
- * R15+170 -> +---------------+ |
+ * R15+176 -> +---------------+ |
* | 8 byte hlen | |
* R15+168 -> +---------------+ |
* | 4 byte align | |
@@ -58,7 +58,7 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
#define STK_OFF (STK_SPACE - STK_160_UNUSED)
#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
-#define STK_OFF_SKBP 170 /* Offset of SKB pointer on stack */
+#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9133b0ec000b..bee281f3163d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -45,7 +45,7 @@ struct bpf_jit {
int labels[1]; /* Labels for local jumps */
};
-#define BPF_SIZE_MAX 0x7ffff /* Max size for program (20 bit signed displ) */
+#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
#define SEEN_SKB 1 /* skb access */
#define SEEN_MEM 2 /* use mem[] for temporary storage */
@@ -450,7 +450,7 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
emit_load_skb_data_hlen(jit);
if (jit->seen & SEEN_SKB_CHANGE)
/* stg %b1,ST_OFF_SKBP(%r0,%r15) */
- EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0, REG_15,
+ EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_1, REG_0, REG_15,
STK_OFF_SKBP);
}
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index 6df826ee7316..c4c47ea9fa94 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -55,7 +55,6 @@ $(addprefix $(obj)/,$(lib1funcs-y)): $(obj)/%: $(lib1funcs-dir)/% FORCE
$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(lib1funcs-obj) FORCE
$(call if_changed,ld)
- @:
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/sh/boot/romimage/Makefile b/arch/sh/boot/romimage/Makefile
index 2216ee57f251..43c41191de5d 100644
--- a/arch/sh/boot/romimage/Makefile
+++ b/arch/sh/boot/romimage/Makefile
@@ -17,7 +17,6 @@ LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(load-y) -e romstart \
$(obj)/vmlinux: $(obj)/head.o $(obj-y) $(obj)/piggy.o FORCE
$(call if_changed,ld)
- @:
OBJCOPYFLAGS += -j .empty_zero_page
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index cc80b614b5fa..fa2c0cd23eaa 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -21,7 +21,7 @@ static int callchain_stack(void *data, char *name)
static void callchain_address(void *data, unsigned long addr, int reliable)
{
- struct perf_callchain_entry *entry = data;
+ struct perf_callchain_entry_ctx *entry = data;
if (reliable)
perf_callchain_store(entry, addr);
@@ -33,7 +33,7 @@ static const struct stacktrace_ops callchain_ops = {
};
void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
perf_callchain_store(entry, regs->pc);
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index a4b8b5aed21c..710f3278d448 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1711,7 +1711,7 @@ static int __init init_hw_perf_events(void)
}
pure_initcall(init_hw_perf_events);
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long ksp, fp;
@@ -1756,7 +1756,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
}
}
#endif
- } while (entry->nr < sysctl_perf_event_max_stack);
+ } while (entry->nr < entry->max_stack);
}
static inline int
@@ -1769,7 +1769,7 @@ valid_user_frame(const void __user *fp, unsigned long size)
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
}
-static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+static void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long ufp;
@@ -1790,10 +1790,10 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
pc = sf.callers_pc;
ufp = (unsigned long)sf.fp + STACK_BIAS;
perf_callchain_store(entry, pc);
- } while (entry->nr < sysctl_perf_event_max_stack);
+ } while (entry->nr < entry->max_stack);
}
-static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+static void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long ufp;
@@ -1822,11 +1822,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
ufp = (unsigned long)sf.fp;
}
perf_callchain_store(entry, pc);
- } while (entry->nr < sysctl_perf_event_max_stack);
+ } while (entry->nr < entry->max_stack);
}
void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
u64 saved_fault_address = current_thread_info()->fault_address;
u8 saved_fault_code = get_thread_fault_code();
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index 8767060d70fb..6394c1ccb68e 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -941,7 +941,7 @@ arch_initcall(init_hw_perf_events);
/*
* Tile specific backtracing code for perf_events.
*/
-static inline void perf_callchain(struct perf_callchain_entry *entry,
+static inline void perf_callchain(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
struct KBacktraceIterator kbt;
@@ -992,13 +992,13 @@ static inline void perf_callchain(struct perf_callchain_entry *entry,
}
}
-void perf_callchain_user(struct perf_callchain_entry *entry,
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
perf_callchain(entry, regs);
}
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
perf_callchain(entry, regs);
diff --git a/arch/um/include/shared/registers.h b/arch/um/include/shared/registers.h
index f5b76355ad71..a74449b5b0e3 100644
--- a/arch/um/include/shared/registers.h
+++ b/arch/um/include/shared/registers.h
@@ -9,6 +9,8 @@
#include <sysdep/ptrace.h>
#include <sysdep/archsetjmp.h>
+extern int save_i387_registers(int pid, unsigned long *fp_regs);
+extern int restore_i387_registers(int pid, unsigned long *fp_regs);
extern int save_fp_registers(int pid, unsigned long *fp_regs);
extern int restore_fp_registers(int pid, unsigned long *fp_regs);
extern int save_fpx_registers(int pid, unsigned long *fp_regs);
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 0b04711f1f18..034b42c7ab40 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -398,6 +398,6 @@ int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
{
int cpu = current_thread_info()->cpu;
- return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
+ return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
}
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 7801666514ed..8acaf4e384c0 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -29,23 +29,29 @@ void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
{
- struct uml_pt_regs r;
+ struct uml_pt_regs *r;
int save_errno = errno;
- r.is_user = 0;
+ r = malloc(sizeof(struct uml_pt_regs));
+ if (!r)
+ panic("out of memory");
+
+ r->is_user = 0;
if (sig == SIGSEGV) {
/* For segfaults, we want the data from the sigcontext. */
- get_regs_from_mc(&r, mc);
- GET_FAULTINFO_FROM_MC(r.faultinfo, mc);
+ get_regs_from_mc(r, mc);
+ GET_FAULTINFO_FROM_MC(r->faultinfo, mc);
}
/* enable signals if sig isn't IRQ signal */
if ((sig != SIGIO) && (sig != SIGWINCH) && (sig != SIGALRM))
unblock_signals();
- (*sig_info[sig])(sig, si, &r);
+ (*sig_info[sig])(sig, si, r);
errno = save_errno;
+
+ free(r);
}
/*
@@ -83,11 +89,17 @@ void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
static void timer_real_alarm_handler(mcontext_t *mc)
{
- struct uml_pt_regs regs;
+ struct uml_pt_regs *regs;
+
+ regs = malloc(sizeof(struct uml_pt_regs));
+ if (!regs)
+ panic("out of memory");
if (mc != NULL)
- get_regs_from_mc(&regs, mc);
- timer_handler(SIGALRM, NULL, &regs);
+ get_regs_from_mc(regs, mc);
+ timer_handler(SIGALRM, NULL, regs);
+
+ free(regs);
}
void timer_alarm_handler(int sig, struct siginfo *unused_si, mcontext_t *mc)
diff --git a/arch/unicore32/boot/Makefile b/arch/unicore32/boot/Makefile
index ec7fb70b412b..828855007b29 100644
--- a/arch/unicore32/boot/Makefile
+++ b/arch/unicore32/boot/Makefile
@@ -31,7 +31,7 @@ $(obj)/uImage: $(obj)/zImage FORCE
$(call if_changed,uimage)
@echo ' Image $@ is ready'
-PHONY += initrd FORCE
+PHONY += initrd
initrd:
@test "$(INITRD)" != "" || \
(echo You must specify INITRD; exit -1)
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
index 96494fb646f7..9aecdd3ddc48 100644
--- a/arch/unicore32/boot/compressed/Makefile
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -54,7 +54,6 @@ LDFLAGS_vmlinux += -T
$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head.o $(obj)/piggy.o \
$(obj)/misc.o FORCE
$(call if_changed,ld)
- @:
# We now have a PIC decompressor implementation. Decompressors running
# from RAM should not define ZTEXTADDR. Decompressors running directly
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index cfdd8c3f8af2..f1356889204e 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -87,7 +87,6 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
$(obj)/vmlinux: $(vmlinux-objs-y) FORCE
$(call if_changed,ld)
- @:
OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index 98df1fa8825c..027aec4a74df 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -8,16 +8,15 @@
#include <linux/linkage.h>
#include "calling.h"
#include <asm/asm.h>
-#include <asm/frame.h>
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0
.globl \name
.type \name, @function
\name:
- FRAME_BEGIN
+ pushq %rbp
+ movq %rsp, %rbp
- /* this one pushes 9 elems, the next one would be %rIP */
pushq %rdi
pushq %rsi
pushq %rdx
@@ -29,8 +28,8 @@
pushq %r11
.if \put_ret_addr_in_rdi
- /* 9*8(%rsp) is return addr on stack */
- movq 9*8(%rsp), %rdi
+ /* 8(%rbp) is return addr on stack */
+ movq 8(%rbp), %rdi
.endif
call \func
@@ -65,7 +64,7 @@ restore:
popq %rdx
popq %rsi
popq %rdi
- FRAME_END
+ popq %rbp
ret
_ASM_NOKPROBE(restore)
#endif
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index 6874da5f67fc..253b72eaade6 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -193,10 +193,10 @@ vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
$(MODLIB)/vdso: FORCE
@mkdir -p $(MODLIB)/vdso
-$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
+$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso
$(call cmd,vdso_install)
PHONY += vdso_install $(vdso_img_insttargets)
-vdso_install: $(vdso_img_insttargets) FORCE
+vdso_install: $(vdso_img_insttargets)
clean-files := vdso32.so vdso32.so.dbg vdso64* vdso-image-*.c vdsox32.so*
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 73a75aa5a66d..33787ee817f0 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2202,7 +2202,7 @@ static int backtrace_stack(void *data, char *name)
static int backtrace_address(void *data, unsigned long addr, int reliable)
{
- struct perf_callchain_entry *entry = data;
+ struct perf_callchain_entry_ctx *entry = data;
return perf_callchain_store(entry, addr);
}
@@ -2214,7 +2214,7 @@ static const struct stacktrace_ops backtrace_ops = {
};
void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* TODO: We don't support guest os callchain now */
@@ -2268,7 +2268,7 @@ static unsigned long get_segment_base(unsigned int segment)
#include <asm/compat.h>
static inline int
-perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
{
/* 32-bit process in 64-bit kernel. */
unsigned long ss_base, cs_base;
@@ -2283,7 +2283,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
fp = compat_ptr(ss_base + regs->bp);
pagefault_disable();
- while (entry->nr < sysctl_perf_event_max_stack) {
+ while (entry->nr < entry->max_stack) {
unsigned long bytes;
frame.next_frame = 0;
frame.return_address = 0;
@@ -2309,14 +2309,14 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
}
#else
static inline int
-perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
{
return 0;
}
#endif
void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct stack_frame frame;
const void __user *fp;
@@ -2343,7 +2343,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
pagefault_disable();
- while (entry->nr < sysctl_perf_event_max_stack) {
+ while (entry->nr < entry->max_stack) {
unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index 0a5ede187d9c..eb0533558c2b 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -826,7 +826,7 @@ static int p4_hw_config(struct perf_event *event)
* Clear bits we reserve to be managed by kernel itself
* and never allowed from a user space
*/
- event->attr.config &= P4_CONFIG_MASK;
+ event->attr.config &= P4_CONFIG_MASK;
rc = p4_validate_raw_event(event);
if (rc)
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 16c178916412..fce74062d981 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -891,7 +891,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
return -ENODEV;
pkg = topology_phys_to_logical_pkg(phys_id);
- if (WARN_ON_ONCE(pkg < 0))
+ if (pkg < 0)
return -EINVAL;
if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index f5e737ff0022..cb26f18d43af 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -116,12 +116,12 @@ static struct linux_binfmt aout_format = {
.min_coredump = PAGE_SIZE
};
-static unsigned long set_brk(unsigned long start, unsigned long end)
+static int set_brk(unsigned long start, unsigned long end)
{
start = PAGE_ALIGN(start);
end = PAGE_ALIGN(end);
if (end <= start)
- return start;
+ return 0;
return vm_brk(start, end - start);
}
@@ -321,7 +321,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
error = vm_brk(text_addr & PAGE_MASK, map_size);
- if (error != (text_addr & PAGE_MASK))
+ if (error)
return error;
error = read_code(bprm->file, text_addr, 32,
@@ -350,7 +350,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) {
error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
- if (IS_ERR_VALUE(error))
+ if (error)
return error;
read_code(bprm->file, N_TXTADDR(ex), fd_offset,
@@ -378,7 +378,7 @@ static int load_aout_binary(struct linux_binprm *bprm)
beyond_if:
error = set_brk(current->mm->start_brk, current->mm->brk);
- if (IS_ERR_VALUE(error))
+ if (error)
return error;
set_binfmt(&aout_format);
@@ -441,7 +441,7 @@ static int load_aout_library(struct file *file)
}
#endif
retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
- if (IS_ERR_VALUE(retval))
+ if (retval)
goto out;
read_code(file, start_addr, N_TXTOFF(ex),
@@ -461,9 +461,8 @@ static int load_aout_library(struct file *file)
len = PAGE_ALIGN(ex.a_text + ex.a_data);
bss = ex.a_text + ex.a_data + ex.a_bss;
if (bss > len) {
- error = vm_brk(start_addr + len, bss - len);
- retval = error;
- if (error != start_addr + len)
+ retval = vm_brk(start_addr + len, bss - len);
+ if (retval)
goto out;
}
retval = 0;
diff --git a/arch/x86/include/asm/bugs.h b/arch/x86/include/asm/bugs.h
index 08abf639075f..5490bbaf71d5 100644
--- a/arch/x86/include/asm/bugs.h
+++ b/arch/x86/include/asm/bugs.h
@@ -1,8 +1,16 @@
#ifndef _ASM_X86_BUGS_H
#define _ASM_X86_BUGS_H
+#include <asm/processor.h>
+
extern void check_bugs(void);
+#if defined(CONFIG_CPU_SUP_INTEL)
+void check_mpx_erratum(struct cpuinfo_x86 *c);
+#else
+static inline void check_mpx_erratum(struct cpuinfo_x86 *c) {}
+#endif
+
#if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32)
int ppro_with_ram_bug(void);
#else
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 25ebb54905e0..483fb547e3c0 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -64,9 +64,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
(((bit)>>5)==11 && (1UL<<((bit)&31) & REQUIRED_MASK11)) || \
(((bit)>>5)==12 && (1UL<<((bit)&31) & REQUIRED_MASK12)) || \
(((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK13)) || \
- (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \
- (((bit)>>5)==13 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \
- (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK16)) )
+ (((bit)>>5)==14 && (1UL<<((bit)&31) & REQUIRED_MASK14)) || \
+ (((bit)>>5)==15 && (1UL<<((bit)&31) & REQUIRED_MASK15)) || \
+ (((bit)>>5)==16 && (1UL<<((bit)&31) & REQUIRED_MASK16)) )
#define DISABLED_MASK_BIT_SET(bit) \
( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0 )) || \
@@ -83,9 +83,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
(((bit)>>5)==11 && (1UL<<((bit)&31) & DISABLED_MASK11)) || \
(((bit)>>5)==12 && (1UL<<((bit)&31) & DISABLED_MASK12)) || \
(((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK13)) || \
- (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \
- (((bit)>>5)==13 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \
- (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK16)) )
+ (((bit)>>5)==14 && (1UL<<((bit)&31) & DISABLED_MASK14)) || \
+ (((bit)>>5)==15 && (1UL<<((bit)&31) & DISABLED_MASK15)) || \
+ (((bit)>>5)==16 && (1UL<<((bit)&31) & DISABLED_MASK16)) )
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 39343be7d4f4..911e9358ceb1 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -29,11 +29,11 @@
#endif /* CONFIG_X86_64 */
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
-# define DISABLE_PKU (1<<(X86_FEATURE_PKU))
-# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE))
-#else
# define DISABLE_PKU 0
# define DISABLE_OSPKE 0
+#else
+# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
+# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
/*
diff --git a/arch/x86/include/asm/intel_telemetry.h b/arch/x86/include/asm/intel_telemetry.h
index ed65fe701de5..85029b58d0cd 100644
--- a/arch/x86/include/asm/intel_telemetry.h
+++ b/arch/x86/include/asm/intel_telemetry.h
@@ -99,7 +99,7 @@ struct telemetry_core_ops {
int (*reset_events)(void);
};
-int telemetry_set_pltdata(struct telemetry_core_ops *ops,
+int telemetry_set_pltdata(const struct telemetry_core_ops *ops,
struct telemetry_plt_config *pltconfig);
int telemetry_clear_pltdata(void);
diff --git a/arch/x86/include/asm/pmc_core.h b/arch/x86/include/asm/pmc_core.h
new file mode 100644
index 000000000000..d4855f11136d
--- /dev/null
+++ b/arch/x86/include/asm/pmc_core.h
@@ -0,0 +1,27 @@
+/*
+ * Intel Core SoC Power Management Controller Header File
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _ASM_PMC_CORE_H
+#define _ASM_PMC_CORE_H
+
+/* API to read SLP_S0_RESIDENCY counter */
+int intel_pmc_slp_s0_counter_read(u32 *data);
+
+#endif /* _ASM_PMC_CORE_H */
diff --git a/arch/x86/include/uapi/asm/svm.h b/arch/x86/include/uapi/asm/svm.h
index b9e9bb2c6089..3725e145aa58 100644
--- a/arch/x86/include/uapi/asm/svm.h
+++ b/arch/x86/include/uapi/asm/svm.h
@@ -2,10 +2,12 @@
#define _UAPI__SVM_H
#define SVM_EXIT_READ_CR0 0x000
+#define SVM_EXIT_READ_CR2 0x002
#define SVM_EXIT_READ_CR3 0x003
#define SVM_EXIT_READ_CR4 0x004
#define SVM_EXIT_READ_CR8 0x008
#define SVM_EXIT_WRITE_CR0 0x010
+#define SVM_EXIT_WRITE_CR2 0x012
#define SVM_EXIT_WRITE_CR3 0x013
#define SVM_EXIT_WRITE_CR4 0x014
#define SVM_EXIT_WRITE_CR8 0x018
@@ -80,10 +82,12 @@
#define SVM_EXIT_REASONS \
{ SVM_EXIT_READ_CR0, "read_cr0" }, \
+ { SVM_EXIT_READ_CR2, "read_cr2" }, \
{ SVM_EXIT_READ_CR3, "read_cr3" }, \
{ SVM_EXIT_READ_CR4, "read_cr4" }, \
{ SVM_EXIT_READ_CR8, "read_cr8" }, \
{ SVM_EXIT_WRITE_CR0, "write_cr0" }, \
+ { SVM_EXIT_WRITE_CR2, "write_cr2" }, \
{ SVM_EXIT_WRITE_CR3, "write_cr3" }, \
{ SVM_EXIT_WRITE_CR4, "write_cr4" }, \
{ SVM_EXIT_WRITE_CR8, "write_cr8" }, \
@@ -91,26 +95,57 @@
{ SVM_EXIT_READ_DR1, "read_dr1" }, \
{ SVM_EXIT_READ_DR2, "read_dr2" }, \
{ SVM_EXIT_READ_DR3, "read_dr3" }, \
+ { SVM_EXIT_READ_DR4, "read_dr4" }, \
+ { SVM_EXIT_READ_DR5, "read_dr5" }, \
+ { SVM_EXIT_READ_DR6, "read_dr6" }, \
+ { SVM_EXIT_READ_DR7, "read_dr7" }, \
{ SVM_EXIT_WRITE_DR0, "write_dr0" }, \
{ SVM_EXIT_WRITE_DR1, "write_dr1" }, \
{ SVM_EXIT_WRITE_DR2, "write_dr2" }, \
{ SVM_EXIT_WRITE_DR3, "write_dr3" }, \
+ { SVM_EXIT_WRITE_DR4, "write_dr4" }, \
{ SVM_EXIT_WRITE_DR5, "write_dr5" }, \
+ { SVM_EXIT_WRITE_DR6, "write_dr6" }, \
{ SVM_EXIT_WRITE_DR7, "write_dr7" }, \
+ { SVM_EXIT_EXCP_BASE + DE_VECTOR, "DE excp" }, \
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
+ { SVM_EXIT_EXCP_BASE + OF_VECTOR, "OF excp" }, \
+ { SVM_EXIT_EXCP_BASE + BR_VECTOR, "BR excp" }, \
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
- { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
{ SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
+ { SVM_EXIT_EXCP_BASE + DF_VECTOR, "DF excp" }, \
+ { SVM_EXIT_EXCP_BASE + TS_VECTOR, "TS excp" }, \
+ { SVM_EXIT_EXCP_BASE + NP_VECTOR, "NP excp" }, \
+ { SVM_EXIT_EXCP_BASE + SS_VECTOR, "SS excp" }, \
+ { SVM_EXIT_EXCP_BASE + GP_VECTOR, "GP excp" }, \
+ { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
+ { SVM_EXIT_EXCP_BASE + MF_VECTOR, "MF excp" }, \
{ SVM_EXIT_EXCP_BASE + AC_VECTOR, "AC excp" }, \
{ SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
+ { SVM_EXIT_EXCP_BASE + XM_VECTOR, "XF excp" }, \
{ SVM_EXIT_INTR, "interrupt" }, \
{ SVM_EXIT_NMI, "nmi" }, \
{ SVM_EXIT_SMI, "smi" }, \
{ SVM_EXIT_INIT, "init" }, \
{ SVM_EXIT_VINTR, "vintr" }, \
{ SVM_EXIT_CR0_SEL_WRITE, "cr0_sel_write" }, \
+ { SVM_EXIT_IDTR_READ, "read_idtr" }, \
+ { SVM_EXIT_GDTR_READ, "read_gdtr" }, \
+ { SVM_EXIT_LDTR_READ, "read_ldtr" }, \
+ { SVM_EXIT_TR_READ, "read_rt" }, \
+ { SVM_EXIT_IDTR_WRITE, "write_idtr" }, \
+ { SVM_EXIT_GDTR_WRITE, "write_gdtr" }, \
+ { SVM_EXIT_LDTR_WRITE, "write_ldtr" }, \
+ { SVM_EXIT_TR_WRITE, "write_rt" }, \
+ { SVM_EXIT_RDTSC, "rdtsc" }, \
+ { SVM_EXIT_RDPMC, "rdpmc" }, \
+ { SVM_EXIT_PUSHF, "pushf" }, \
+ { SVM_EXIT_POPF, "popf" }, \
{ SVM_EXIT_CPUID, "cpuid" }, \
+ { SVM_EXIT_RSM, "rsm" }, \
+ { SVM_EXIT_IRET, "iret" }, \
+ { SVM_EXIT_SWINT, "swint" }, \
{ SVM_EXIT_INVD, "invd" }, \
{ SVM_EXIT_PAUSE, "pause" }, \
{ SVM_EXIT_HLT, "hlt" }, \
@@ -119,6 +154,7 @@
{ SVM_EXIT_IOIO, "io" }, \
{ SVM_EXIT_MSR, "msr" }, \
{ SVM_EXIT_TASK_SWITCH, "task_switch" }, \
+ { SVM_EXIT_FERR_FREEZE, "ferr_freeze" }, \
{ SVM_EXIT_SHUTDOWN, "shutdown" }, \
{ SVM_EXIT_VMRUN, "vmrun" }, \
{ SVM_EXIT_VMMCALL, "hypercall" }, \
@@ -127,14 +163,16 @@
{ SVM_EXIT_STGI, "stgi" }, \
{ SVM_EXIT_CLGI, "clgi" }, \
{ SVM_EXIT_SKINIT, "skinit" }, \
+ { SVM_EXIT_RDTSCP, "rdtscp" }, \
+ { SVM_EXIT_ICEBP, "icebp" }, \
{ SVM_EXIT_WBINVD, "wbinvd" }, \
{ SVM_EXIT_MONITOR, "monitor" }, \
{ SVM_EXIT_MWAIT, "mwait" }, \
{ SVM_EXIT_XSETBV, "xsetbv" }, \
{ SVM_EXIT_NPF, "npf" }, \
- { SVM_EXIT_RSM, "rsm" }, \
{ SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \
- { SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }
+ { SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \
+ { SVM_EXIT_ERR, "invalid_guest_state" }
#endif /* _UAPI__SVM_H */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6ef6ed9ccca6..0fe6953f421c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -37,6 +37,7 @@
#include <asm/mtrr.h>
#include <linux/numa.h>
#include <asm/asm.h>
+#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/mce.h>
#include <asm/msr.h>
@@ -270,6 +271,8 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
static __init int setup_disable_smep(char *arg)
{
setup_clear_cpu_cap(X86_FEATURE_SMEP);
+ /* Check for things that depend on SMEP being enabled: */
+ check_mpx_erratum(&boot_cpu_data);
return 1;
}
__setup("nosmep", setup_disable_smep);
@@ -310,6 +313,10 @@ static bool pku_disabled;
static __always_inline void setup_pku(struct cpuinfo_x86 *c)
{
+ /* check the boot processor, plus compile options for PKU: */
+ if (!cpu_feature_enabled(X86_FEATURE_PKU))
+ return;
+ /* checks the actual processor's cpuid bits: */
if (!cpu_has(c, X86_FEATURE_PKU))
return;
if (pku_disabled)
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 8dae51fd3db1..6e2ffbebbcdb 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -25,6 +25,41 @@
#include <asm/apic.h>
#endif
+/*
+ * Just in case our CPU detection goes bad, or you have a weird system,
+ * allow a way to override the automatic disabling of MPX.
+ */
+static int forcempx;
+
+static int __init forcempx_setup(char *__unused)
+{
+ forcempx = 1;
+
+ return 1;
+}
+__setup("intel-skd-046-workaround=disable", forcempx_setup);
+
+void check_mpx_erratum(struct cpuinfo_x86 *c)
+{
+ if (forcempx)
+ return;
+ /*
+ * Turn off the MPX feature on CPUs where SMEP is not
+ * available or disabled.
+ *
+ * Works around Intel Erratum SKD046: "Branch Instructions
+ * May Initialize MPX Bound Registers Incorrectly".
+ *
+ * This might falsely disable MPX on systems without
+ * SMEP, like Atom processors without SMEP. But there
+ * is no such hardware known at the moment.
+ */
+ if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
+ setup_clear_cpu_cap(X86_FEATURE_MPX);
+ pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
+ }
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -173,6 +208,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (edx & (1U << 28))
c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
}
+
+ check_mpx_erratum(c);
}
#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6b16c36f0939..6e789ca1f841 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -532,7 +532,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
switch (code) {
case ARCH_SET_GS:
- if (addr >= TASK_SIZE_OF(task))
+ if (addr >= TASK_SIZE_MAX)
return -EPERM;
cpu = get_cpu();
task->thread.gsindex = 0;
@@ -546,7 +546,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
case ARCH_SET_FS:
/* Not strictly needed for fs, but do it for symmetry
with gs */
- if (addr >= TASK_SIZE_OF(task))
+ if (addr >= TASK_SIZE_MAX)
return -EPERM;
cpu = get_cpu();
task->thread.fsindex = 0;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index e60ef918f53d..600edd225e81 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -392,7 +392,7 @@ static int putreg(struct task_struct *child,
#ifdef CONFIG_X86_64
case offsetof(struct user_regs_struct,fs_base):
- if (value >= TASK_SIZE_OF(child))
+ if (value >= TASK_SIZE_MAX)
return -EIO;
/*
* When changing the segment base, use do_arch_prctl
@@ -406,7 +406,7 @@ static int putreg(struct task_struct *child,
/*
* Exactly the same here as the %fs handling above.
*/
- if (value >= TASK_SIZE_OF(child))
+ if (value >= TASK_SIZE_MAX)
return -EIO;
if (child->thread.gsbase != value)
return do_arch_prctl(child, ARCH_SET_GS, value);
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 6aa0f4d9eea6..9911a0620f9a 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -23,6 +23,7 @@
#include <asm/param.h>
/* CPU reference clock frequency: in KHz */
+#define FREQ_80 80000
#define FREQ_83 83200
#define FREQ_100 99840
#define FREQ_133 133200
@@ -56,6 +57,8 @@ static struct freq_desc freq_desc_tables[] = {
{ 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
/* ANN */
{ 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
+ /* AIRMONT */
+ { 6, 0x4c, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, FREQ_80, 0, 0, 0 } },
};
static int match_cpu(u8 family, u8 model)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 2214214c786b..1163e8173e5a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
#define TSC_RATIO_MIN 0x0000000000000001ULL
#define TSC_RATIO_MAX 0x000000ffffffffffULL
-#define AVIC_HPA_MASK ~((0xFFFULL << 52) || 0xFFF)
+#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
/*
* 0xff is broadcast, so the max index allowed for physical APIC ID
@@ -3597,7 +3597,7 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
u32 icrl = svm->vmcb->control.exit_info_1;
u32 id = svm->vmcb->control.exit_info_2 >> 32;
- u32 index = svm->vmcb->control.exit_info_2 && 0xFF;
+ u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
struct kvm_lapic *apic = svm->vcpu.arch.apic;
trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e605d1ed334f..fb93010beaa4 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2418,7 +2418,9 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
if (is_guest_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_nested;
- else if (vcpu->arch.apic_base & X2APIC_ENABLE) {
+ else if (cpu_has_secondary_exec_ctrls() &&
+ (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
if (is_long_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
else
@@ -4787,6 +4789,19 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
+ if (cpu_has_secondary_exec_ctrls()) {
+ if (kvm_vcpu_apicv_active(vcpu))
+ vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+ else
+ vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
+ SECONDARY_EXEC_APIC_REGISTER_VIRT |
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
+ }
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmx_set_msr_bitmap(vcpu);
}
static u32 vmx_exec_control(struct vcpu_vmx *vmx)
@@ -6333,23 +6348,20 @@ static __init int hardware_setup(void)
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
- if (enable_apicv) {
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr);
-
- /* According SDM, in x2apic mode, the whole id reg is used.
- * But in KVM, it only use the highest eight bits. Need to
- * intercept it */
- vmx_enable_intercept_msr_read_x2apic(0x802);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808);
- /* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b);
- /* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f);
- }
+ for (msr = 0x800; msr <= 0x8ff; msr++)
+ vmx_disable_intercept_msr_read_x2apic(msr);
+
+ /* According SDM, in x2apic mode, the whole id reg is used. But in
+ * KVM, it only use the highest eight bits. Need to intercept it */
+ vmx_enable_intercept_msr_read_x2apic(0x802);
+ /* TMCCT */
+ vmx_enable_intercept_msr_read_x2apic(0x839);
+ /* TPR */
+ vmx_disable_intercept_msr_write_x2apic(0x808);
+ /* EOI */
+ vmx_disable_intercept_msr_write_x2apic(0x80b);
+ /* SELF-IPI */
+ vmx_disable_intercept_msr_write_x2apic(0x83f);
if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull,
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 5ce1ed02f7e8..7d1fa7cd2374 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -292,7 +292,7 @@ void vmalloc_sync_all(void)
return;
for (address = VMALLOC_START & PMD_MASK;
- address >= TASK_SIZE && address < FIXADDR_TOP;
+ address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
address += PMD_SIZE) {
struct page *page;
@@ -854,8 +854,13 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
return;
}
#endif
- /* Kernel addresses are always protection faults: */
- if (address >= TASK_SIZE)
+
+ /*
+ * To avoid leaking information about the kernel page table
+ * layout, pretend that user-mode accesses to kernel addresses
+ * are always protection faults.
+ */
+ if (address >= TASK_SIZE_MAX)
error_code |= PF_PROT;
if (likely(show_unhandled_signals))
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 92723aeae0f9..cd95075944ab 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -11,7 +11,6 @@
#include <asm/msr.h>
#include <asm/processor-flags.h>
#include <asm/page_types.h>
-#include <asm/frame.h>
#define SAVE_XMM \
mov %rsp, %rax; \
@@ -40,10 +39,10 @@
mov (%rsp), %rsp
ENTRY(efi_call)
- FRAME_BEGIN
+ pushq %rbp
+ movq %rsp, %rbp
SAVE_XMM
- mov (%rsp), %rax
- mov 8(%rax), %rax
+ mov 16(%rbp), %rax
subq $48, %rsp
mov %r9, 32(%rsp)
mov %rax, 40(%rsp)
@@ -53,6 +52,6 @@ ENTRY(efi_call)
call *%rdi
addq $48, %rsp
RESTORE_XMM
- FRAME_END
+ popq %rbp
ret
ENDPROC(efi_call)
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 92e3e1d84c1d..12734a96df47 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -26,7 +26,5 @@ quiet_cmd_bin2c = BIN2C $@
$(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE
$(call if_changed,bin2c)
- @:
-
obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index b95964610ea7..c556c5ae8de5 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -59,7 +59,6 @@ OBJCOPYFLAGS_realmode.bin := -O binary
targets += realmode.bin
$(obj)/realmode.bin: $(obj)/realmode.elf $(obj)/realmode.relocs FORCE
$(call if_changed,objcopy)
- @:
quiet_cmd_relocs = RELOCS $@
cmd_relocs = arch/x86/tools/relocs --realmode $< > $@
diff --git a/arch/x86/um/os-Linux/registers.c b/arch/x86/um/os-Linux/registers.c
index 41bfe84e11ab..00f54a91bb4b 100644
--- a/arch/x86/um/os-Linux/registers.c
+++ b/arch/x86/um/os-Linux/registers.c
@@ -11,21 +11,56 @@
#endif
#include <longjmp.h>
#include <sysdep/ptrace_user.h>
+#include <sys/uio.h>
+#include <asm/sigcontext.h>
+#include <linux/elf.h>
-int save_fp_registers(int pid, unsigned long *fp_regs)
+int have_xstate_support;
+
+int save_i387_registers(int pid, unsigned long *fp_regs)
{
if (ptrace(PTRACE_GETFPREGS, pid, 0, fp_regs) < 0)
return -errno;
return 0;
}
-int restore_fp_registers(int pid, unsigned long *fp_regs)
+int save_fp_registers(int pid, unsigned long *fp_regs)
+{
+ struct iovec iov;
+
+ if (have_xstate_support) {
+ iov.iov_base = fp_regs;
+ iov.iov_len = sizeof(struct _xstate);
+ if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
+ return -errno;
+ return 0;
+ } else {
+ return save_i387_registers(pid, fp_regs);
+ }
+}
+
+int restore_i387_registers(int pid, unsigned long *fp_regs)
{
if (ptrace(PTRACE_SETFPREGS, pid, 0, fp_regs) < 0)
return -errno;
return 0;
}
+int restore_fp_registers(int pid, unsigned long *fp_regs)
+{
+ struct iovec iov;
+
+ if (have_xstate_support) {
+ iov.iov_base = fp_regs;
+ iov.iov_len = sizeof(struct _xstate);
+ if (ptrace(PTRACE_SETREGSET, pid, NT_X86_XSTATE, &iov) < 0)
+ return -errno;
+ return 0;
+ } else {
+ return restore_i387_registers(pid, fp_regs);
+ }
+}
+
#ifdef __i386__
int have_fpx_regs = 1;
int save_fpx_registers(int pid, unsigned long *fp_regs)
@@ -85,6 +120,16 @@ int put_fp_registers(int pid, unsigned long *regs)
return restore_fp_registers(pid, regs);
}
+void arch_init_registers(int pid)
+{
+ struct _xstate fp_regs;
+ struct iovec iov;
+
+ iov.iov_base = &fp_regs;
+ iov.iov_len = sizeof(struct _xstate);
+ if (ptrace(PTRACE_GETREGSET, pid, NT_X86_XSTATE, &iov) == 0)
+ have_xstate_support = 1;
+}
#endif
unsigned long get_thread_reg(int reg, jmp_buf *buf)
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index 47c78d5e5c32..ebd4dd6ef73b 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -194,7 +194,8 @@ static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *c
int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
struct user_i387_struct fpregs;
- err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
+ err = save_i387_registers(userspace_pid[cpu],
+ (unsigned long *) &fpregs);
if (err)
return err;
@@ -214,7 +215,7 @@ static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *c
if (n > 0)
return -EFAULT;
- return restore_fp_registers(userspace_pid[cpu],
+ return restore_i387_registers(userspace_pid[cpu],
(unsigned long *) &fpregs);
}
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index a629694ee750..faab418876ce 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -222,14 +222,14 @@ int is_syscall(unsigned long addr)
static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
- long fpregs[HOST_FP_SIZE];
+ struct user_i387_struct fpregs;
- BUG_ON(sizeof(*buf) != sizeof(fpregs));
- err = save_fp_registers(userspace_pid[cpu], fpregs);
+ err = save_i387_registers(userspace_pid[cpu],
+ (unsigned long *) &fpregs);
if (err)
return err;
- n = copy_to_user(buf, fpregs, sizeof(fpregs));
+ n = copy_to_user(buf, &fpregs, sizeof(fpregs));
if (n > 0)
return -EFAULT;
@@ -239,14 +239,14 @@ static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *c
static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
int n, cpu = ((struct thread_info *) child->stack)->cpu;
- long fpregs[HOST_FP_SIZE];
+ struct user_i387_struct fpregs;
- BUG_ON(sizeof(*buf) != sizeof(fpregs));
- n = copy_from_user(fpregs, buf, sizeof(fpregs));
+ n = copy_from_user(&fpregs, buf, sizeof(fpregs));
if (n > 0)
return -EFAULT;
- return restore_fp_registers(userspace_pid[cpu], fpregs);
+ return restore_i387_registers(userspace_pid[cpu],
+ (unsigned long *) &fpregs);
}
long subarch_ptrace(struct task_struct *child, long request,
diff --git a/arch/x86/um/shared/sysdep/ptrace_64.h b/arch/x86/um/shared/sysdep/ptrace_64.h
index 919789f1071e..0dc223aa1c2d 100644
--- a/arch/x86/um/shared/sysdep/ptrace_64.h
+++ b/arch/x86/um/shared/sysdep/ptrace_64.h
@@ -57,8 +57,6 @@
#define UPT_SYSCALL_ARG5(r) UPT_R8(r)
#define UPT_SYSCALL_ARG6(r) UPT_R9(r)
-static inline void arch_init_registers(int pid)
-{
-}
+extern void arch_init_registers(int pid);
#endif
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index 14fcd01ed992..49e503697022 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -225,26 +225,16 @@ static int copy_sc_from_user(struct pt_regs *regs,
} else
#endif
{
- struct user_i387_struct fp;
-
- err = copy_from_user(&fp, (void *)sc.fpstate,
- sizeof(struct user_i387_struct));
+ err = copy_from_user(regs->regs.fp, (void *)sc.fpstate,
+ sizeof(struct _xstate));
if (err)
return 1;
-
- err = restore_fp_registers(pid, (unsigned long *) &fp);
- if (err < 0) {
- printk(KERN_ERR "copy_sc_from_user - "
- "restore_fp_registers failed, errno = %d\n",
- -err);
- return 1;
- }
}
return 0;
}
static int copy_sc_to_user(struct sigcontext __user *to,
- struct _fpstate __user *to_fp, struct pt_regs *regs,
+ struct _xstate __user *to_fp, struct pt_regs *regs,
unsigned long mask)
{
struct sigcontext sc;
@@ -310,25 +300,22 @@ static int copy_sc_to_user(struct sigcontext __user *to,
return 1;
}
- err = convert_fxsr_to_user(to_fp, &fpx);
+ err = convert_fxsr_to_user(&to_fp->fpstate, &fpx);
if (err)
return 1;
- err |= __put_user(fpx.swd, &to_fp->status);
- err |= __put_user(X86_FXSR_MAGIC, &to_fp->magic);
+ err |= __put_user(fpx.swd, &to_fp->fpstate.status);
+ err |= __put_user(X86_FXSR_MAGIC, &to_fp->fpstate.magic);
if (err)
return 1;
- if (copy_to_user(&to_fp->_fxsr_env[0], &fpx,
+ if (copy_to_user(&to_fp->fpstate._fxsr_env[0], &fpx,
sizeof(struct user_fxsr_struct)))
return 1;
} else
#endif
{
- struct user_i387_struct fp;
-
- err = save_fp_registers(pid, (unsigned long *) &fp);
- if (copy_to_user(to_fp, &fp, sizeof(struct user_i387_struct)))
+ if (copy_to_user(to_fp, regs->regs.fp, sizeof(struct _xstate)))
return 1;
}
@@ -337,7 +324,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
#ifdef CONFIG_X86_32
static int copy_ucontext_to_user(struct ucontext __user *uc,
- struct _fpstate __user *fp, sigset_t *set,
+ struct _xstate __user *fp, sigset_t *set,
unsigned long sp)
{
int err = 0;
@@ -353,7 +340,7 @@ struct sigframe
char __user *pretcode;
int sig;
struct sigcontext sc;
- struct _fpstate fpstate;
+ struct _xstate fpstate;
unsigned long extramask[_NSIG_WORDS-1];
char retcode[8];
};
@@ -366,7 +353,7 @@ struct rt_sigframe
void __user *puc;
struct siginfo info;
struct ucontext uc;
- struct _fpstate fpstate;
+ struct _xstate fpstate;
char retcode[8];
};
@@ -495,7 +482,7 @@ struct rt_sigframe
char __user *pretcode;
struct ucontext uc;
struct siginfo info;
- struct _fpstate fpstate;
+ struct _xstate fpstate;
};
int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
diff --git a/arch/x86/um/user-offsets.c b/arch/x86/um/user-offsets.c
index 470564bbd08e..cb3c22370cf5 100644
--- a/arch/x86/um/user-offsets.c
+++ b/arch/x86/um/user-offsets.c
@@ -50,7 +50,7 @@ void foo(void)
DEFINE(HOST_GS, GS);
DEFINE(HOST_ORIG_AX, ORIG_EAX);
#else
- DEFINE(HOST_FP_SIZE, sizeof(struct _fpstate) / sizeof(unsigned long));
+ DEFINE(HOST_FP_SIZE, sizeof(struct _xstate) / sizeof(unsigned long));
DEFINE_LONGS(HOST_BX, RBX);
DEFINE_LONGS(HOST_CX, RCX);
DEFINE_LONGS(HOST_DI, RDI);
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index a6b00b3af429..ef90479e0397 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -323,23 +323,23 @@ static void xtensa_pmu_read(struct perf_event *event)
static int callchain_trace(struct stackframe *frame, void *data)
{
- struct perf_callchain_entry *entry = data;
+ struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, frame->pc);
return 0;
}
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- xtensa_backtrace_kernel(regs, sysctl_perf_event_max_stack,
+ xtensa_backtrace_kernel(regs, entry->max_stack,
callchain_trace, NULL, entry);
}
-void perf_callchain_user(struct perf_callchain_entry *entry,
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- xtensa_backtrace_user(regs, sysctl_perf_event_max_stack,
+ xtensa_backtrace_user(regs, entry->max_stack,
callchain_trace, entry);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 7df9c9263b21..29cbc1b5fbdb 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2020,7 +2020,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
if (!q->queue_ctx)
- return ERR_PTR(-ENOMEM);
+ goto err_exit;
q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
GFP_KERNEL, set->numa_node);
@@ -2084,6 +2084,8 @@ err_map:
kfree(q->queue_hw_ctx);
err_percpu:
free_percpu(q->queue_ctx);
+err_exit:
+ q->mq_ops = NULL;
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(blk_mq_init_allocated_queue);
diff --git a/block/ioctl.c b/block/ioctl.c
index 698c7933d582..ed2397f8de9d 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -4,7 +4,6 @@
#include <linux/gfp.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h>
-#include <linux/badblocks.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/blktrace_api.h>
diff --git a/crypto/asymmetric_keys/Kconfig b/crypto/asymmetric_keys/Kconfig
index e28e912000a7..331f6baf2df8 100644
--- a/crypto/asymmetric_keys/Kconfig
+++ b/crypto/asymmetric_keys/Kconfig
@@ -13,6 +13,7 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
tristate "Asymmetric public-key crypto algorithm subtype"
select MPILIB
select CRYPTO_HASH_INFO
+ select CRYPTO_AKCIPHER
help
This option provides support for asymmetric public key type handling.
If signature generation and/or verification are to be used,
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 15e4604efba7..1f4128487dd4 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -265,7 +265,7 @@ static int acpi_aml_write_kern(const char *buf, int len)
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync tail before inserting logs */
smp_mb();
@@ -286,7 +286,7 @@ static int acpi_aml_readb_kern(void)
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync head before removing cmds */
smp_rmb();
@@ -330,7 +330,7 @@ again:
goto again;
break;
}
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
break;
size += ret;
count -= ret;
@@ -373,7 +373,7 @@ again:
if (ret == 0)
goto again;
}
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
break;
*(msg + size) = (char)ret;
size++;
@@ -526,7 +526,7 @@ static int acpi_aml_open(struct inode *inode, struct file *file)
}
acpi_aml_io.users++;
err_lock:
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
if (acpi_aml_active_reader == file)
acpi_aml_active_reader = NULL;
}
@@ -587,7 +587,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync head before removing logs */
smp_rmb();
@@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret));
+ acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
return ret;
}
@@ -634,7 +634,7 @@ again:
goto again;
}
}
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
if (!acpi_aml_running())
ret = 0;
break;
@@ -657,7 +657,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync tail before inserting cmds */
smp_mb();
@@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret));
+ acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
return n;
}
@@ -704,7 +704,7 @@ again:
goto again;
}
}
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
if (!acpi_aml_running())
ret = 0;
break;
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b719ab3090bb..ab234791a0ba 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1316,7 +1316,7 @@ static int __init acpi_battery_init(void)
static void __exit acpi_battery_exit(void)
{
- async_synchronize_cookie(async_cookie);
+ async_synchronize_cookie(async_cookie + 1);
acpi_bus_unregister_driver(&acpi_battery_driver);
#ifdef CONFIG_ACPI_PROCFS_POWER
acpi_unlock_battery_dir(acpi_battery_dir);
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index cd2c3d6d40e0..993fd31394c8 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -319,6 +319,7 @@ int acpi_device_fix_up_power(struct acpi_device *device)
return ret;
}
+EXPORT_SYMBOL_GPL(acpi_device_fix_up_power);
int acpi_device_update_power(struct acpi_device *device, int *state_p)
{
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 8638d575b2b9..aafb8cc03523 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -197,7 +197,7 @@ static void highbank_set_em_messages(struct device *dev,
for (i = 0; i < SGPIO_PINS; i++) {
err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
- if (IS_ERR_VALUE(err))
+ if (err < 0)
return;
pdata->sgpio_gpio[i] = err;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c81667d4bb60..e44944f4be77 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1267,14 +1267,15 @@ int dpm_suspend_late(pm_message_t state)
error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
+ if (!list_empty(&dev->power.entry))
+ list_move(&dev->power.entry, &dpm_late_early_list);
+
if (error) {
pm_dev_err(dev, state, " late", error);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
}
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
if (async_error)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 51a071e32221..c04bd9bc39fd 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -381,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
#ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn)
+ void __pmem **kaddr, pfn_t *pfn, long size)
{
struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0ede6d7e2568..81666a56415e 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -350,12 +350,12 @@ struct rbd_device {
struct rbd_spec *spec;
struct rbd_options *opts;
- char *header_name;
+ struct ceph_object_id header_oid;
+ struct ceph_object_locator header_oloc;
struct ceph_file_layout layout;
- struct ceph_osd_event *watch_event;
- struct rbd_obj_request *watch_request;
+ struct ceph_osd_linger_request *watch_handle;
struct rbd_spec *parent_spec;
u64 parent_overlap;
@@ -1596,12 +1596,6 @@ static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
return __rbd_obj_request_wait(obj_request, 0);
}
-static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
- unsigned long timeout)
-{
- return __rbd_obj_request_wait(obj_request, timeout);
-}
-
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
@@ -1751,12 +1745,6 @@ static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
complete_all(&obj_request->completion);
}
-static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
-{
- dout("%s: obj %p\n", __func__, obj_request);
- obj_request_done_set(obj_request);
-}
-
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = NULL;
@@ -1828,13 +1816,12 @@ static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
obj_request_done_set(obj_request);
}
-static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
- struct ceph_msg *msg)
+static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
{
struct rbd_obj_request *obj_request = osd_req->r_priv;
u16 opcode;
- dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
+ dout("%s: osd_req %p\n", __func__, osd_req);
rbd_assert(osd_req == obj_request->osd_req);
if (obj_request_img_data_test(obj_request)) {
rbd_assert(obj_request->img_request);
@@ -1878,10 +1865,6 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
case CEPH_OSD_OP_CALL:
rbd_osd_call_callback(obj_request);
break;
- case CEPH_OSD_OP_NOTIFY_ACK:
- case CEPH_OSD_OP_WATCH:
- rbd_osd_trivial_callback(obj_request);
- break;
default:
rbd_warn(NULL, "%s: unsupported op %hu",
obj_request->object_name, (unsigned short) opcode);
@@ -1896,27 +1879,17 @@ static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req;
- u64 snap_id;
- rbd_assert(osd_req != NULL);
-
- snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
- ceph_osdc_build_request(osd_req, obj_request->offset,
- NULL, snap_id, NULL);
+ if (img_request)
+ osd_req->r_snapid = img_request->snap_id;
}
static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{
- struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req;
- struct ceph_snap_context *snapc;
- struct timespec mtime = CURRENT_TIME;
- rbd_assert(osd_req != NULL);
-
- snapc = img_request ? img_request->snapc : NULL;
- ceph_osdc_build_request(osd_req, obj_request->offset,
- snapc, CEPH_NOSNAP, &mtime);
+ osd_req->r_mtime = CURRENT_TIME;
+ osd_req->r_data_offset = obj_request->offset;
}
/*
@@ -1954,7 +1927,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
GFP_NOIO);
if (!osd_req)
- return NULL; /* ENOMEM */
+ goto fail;
if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
@@ -1965,9 +1938,18 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req->r_priv = obj_request;
osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
- ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
+ if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
+ obj_request->object_name))
+ goto fail;
+
+ if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
+ goto fail;
return osd_req;
+
+fail:
+ ceph_osdc_put_request(osd_req);
+ return NULL;
}
/*
@@ -2003,16 +1985,25 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
false, GFP_NOIO);
if (!osd_req)
- return NULL; /* ENOMEM */
+ goto fail;
osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
- ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
+ if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
+ obj_request->object_name))
+ goto fail;
+
+ if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
+ goto fail;
return osd_req;
+
+fail:
+ ceph_osdc_put_request(osd_req);
+ return NULL;
}
@@ -2973,17 +2964,20 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
struct rbd_obj_request *obj_request;
struct rbd_obj_request *next_obj_request;
+ int ret = 0;
dout("%s: img %p\n", __func__, img_request);
- for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
- int ret;
+ rbd_img_request_get(img_request);
+ for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
ret = rbd_img_obj_request_submit(obj_request);
if (ret)
- return ret;
+ goto out_put_ireq;
}
- return 0;
+out_put_ireq:
+ rbd_img_request_put(img_request);
+ return ret;
}
static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
@@ -3090,45 +3084,18 @@ out_err:
obj_request_done_set(obj_request);
}
-static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
-{
- struct rbd_obj_request *obj_request;
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- int ret;
-
- obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
- OBJ_REQUEST_NODATA);
- if (!obj_request)
- return -ENOMEM;
-
- ret = -ENOMEM;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
- obj_request);
- if (!obj_request->osd_req)
- goto out;
-
- osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
- notify_id, 0, 0);
- rbd_osd_req_format_read(obj_request);
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev);
+static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev);
- ret = rbd_obj_request_submit(osdc, obj_request);
- if (ret)
- goto out;
- ret = rbd_obj_request_wait(obj_request);
-out:
- rbd_obj_request_put(obj_request);
-
- return ret;
-}
-
-static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
+static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
+ u64 notifier_id, void *data, size_t data_len)
{
- struct rbd_device *rbd_dev = (struct rbd_device *)data;
+ struct rbd_device *rbd_dev = arg;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
int ret;
- dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
- rbd_dev->header_name, (unsigned long long)notify_id,
- (unsigned int)opcode);
+ dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev,
+ cookie, notify_id);
/*
* Until adequate refresh error handling is in place, there is
@@ -3140,63 +3107,31 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
if (ret)
rbd_warn(rbd_dev, "refresh failed: %d", ret);
- ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
+ ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, notify_id, cookie,
+ NULL, 0);
if (ret)
rbd_warn(rbd_dev, "notify_ack ret %d", ret);
}
-/*
- * Send a (un)watch request and wait for the ack. Return a request
- * with a ref held on success or error.
- */
-static struct rbd_obj_request *rbd_obj_watch_request_helper(
- struct rbd_device *rbd_dev,
- bool watch)
+static void rbd_watch_errcb(void *arg, u64 cookie, int err)
{
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct ceph_options *opts = osdc->client->options;
- struct rbd_obj_request *obj_request;
+ struct rbd_device *rbd_dev = arg;
int ret;
- obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
- OBJ_REQUEST_NODATA);
- if (!obj_request)
- return ERR_PTR(-ENOMEM);
-
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
- obj_request);
- if (!obj_request->osd_req) {
- ret = -ENOMEM;
- goto out;
- }
-
- osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
- rbd_dev->watch_event->cookie, 0, watch);
- rbd_osd_req_format_write(obj_request);
+ rbd_warn(rbd_dev, "encountered watch error: %d", err);
- if (watch)
- ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
-
- ret = rbd_obj_request_submit(osdc, obj_request);
- if (ret)
- goto out;
+ __rbd_dev_header_unwatch_sync(rbd_dev);
- ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
- if (ret)
- goto out;
-
- ret = obj_request->result;
+ ret = rbd_dev_header_watch_sync(rbd_dev);
if (ret) {
- if (watch)
- rbd_obj_request_end(obj_request);
- goto out;
+ rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
+ return;
}
- return obj_request;
-
-out:
- rbd_obj_request_put(obj_request);
- return ERR_PTR(ret);
+ ret = rbd_dev_refresh(rbd_dev);
+ if (ret)
+ rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
}
/*
@@ -3205,35 +3140,33 @@ out:
static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct rbd_obj_request *obj_request;
- int ret;
+ struct ceph_osd_linger_request *handle;
- rbd_assert(!rbd_dev->watch_event);
- rbd_assert(!rbd_dev->watch_request);
+ rbd_assert(!rbd_dev->watch_handle);
- ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
- &rbd_dev->watch_event);
- if (ret < 0)
- return ret;
+ handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, rbd_watch_cb,
+ rbd_watch_errcb, rbd_dev);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
- if (IS_ERR(obj_request)) {
- ceph_osdc_cancel_event(rbd_dev->watch_event);
- rbd_dev->watch_event = NULL;
- return PTR_ERR(obj_request);
- }
+ rbd_dev->watch_handle = handle;
+ return 0;
+}
- /*
- * A watch request is set to linger, so the underlying osd
- * request won't go away until we unregister it. We retain
- * a pointer to the object request during that time (in
- * rbd_dev->watch_request), so we'll keep a reference to it.
- * We'll drop that reference after we've unregistered it in
- * rbd_dev_header_unwatch_sync().
- */
- rbd_dev->watch_request = obj_request;
+static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ int ret;
- return 0;
+ if (!rbd_dev->watch_handle)
+ return;
+
+ ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
+ if (ret)
+ rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
+
+ rbd_dev->watch_handle = NULL;
}
/*
@@ -3241,24 +3174,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
*/
static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
{
- struct rbd_obj_request *obj_request;
-
- rbd_assert(rbd_dev->watch_event);
- rbd_assert(rbd_dev->watch_request);
-
- rbd_obj_request_end(rbd_dev->watch_request);
- rbd_obj_request_put(rbd_dev->watch_request);
- rbd_dev->watch_request = NULL;
-
- obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
- if (!IS_ERR(obj_request))
- rbd_obj_request_put(obj_request);
- else
- rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
- PTR_ERR(obj_request));
-
- ceph_osdc_cancel_event(rbd_dev->watch_event);
- rbd_dev->watch_event = NULL;
+ __rbd_dev_header_unwatch_sync(rbd_dev);
dout("%s flushing notifies\n", __func__);
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
@@ -3591,7 +3507,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
if (!ondisk)
return -ENOMEM;
- ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
0, size, ondisk);
if (ret < 0)
goto out;
@@ -4033,6 +3949,8 @@ static void rbd_dev_release(struct device *dev)
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
bool need_put = !!rbd_dev->opts;
+ ceph_oid_destroy(&rbd_dev->header_oid);
+
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
kfree(rbd_dev->opts);
@@ -4063,6 +3981,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
INIT_LIST_HEAD(&rbd_dev->node);
init_rwsem(&rbd_dev->header_rwsem);
+ ceph_oid_init(&rbd_dev->header_oid);
+ ceph_oloc_init(&rbd_dev->header_oloc);
+
rbd_dev->dev.bus = &rbd_bus_type;
rbd_dev->dev.type = &rbd_device_type;
rbd_dev->dev.parent = &rbd_root_dev;
@@ -4111,7 +4032,7 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
__le64 size;
} __attribute__ ((packed)) size_buf = { 0 };
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_size",
&snapid, sizeof (snapid),
&size_buf, sizeof (size_buf));
@@ -4151,7 +4072,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
if (!reply_buf)
return -ENOMEM;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_object_prefix", NULL, 0,
reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4186,7 +4107,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 unsup;
int ret;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_features",
&snapid, sizeof (snapid),
&features_buf, sizeof (features_buf));
@@ -4248,7 +4169,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
snapid = cpu_to_le64(rbd_dev->spec->snap_id);
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_parent",
&snapid, sizeof (snapid),
reply_buf, size);
@@ -4351,7 +4272,7 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
u64 stripe_count;
int ret;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_stripe_unit_count", NULL, 0,
(char *)&striping_info_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4599,7 +4520,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
if (!reply_buf)
return -ENOMEM;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_snapcontext", NULL, 0,
reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4664,7 +4585,7 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
return ERR_PTR(-ENOMEM);
snapid = cpu_to_le64(snap_id);
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_snapshot_name",
&snapid, sizeof (snapid),
reply_buf, size);
@@ -4975,13 +4896,13 @@ static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
again:
ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
if (ret == -ENOENT && tries++ < 1) {
- ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
- &newest_epoch);
+ ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
+ &newest_epoch);
if (ret < 0)
return ret;
if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
- ceph_monc_request_next_osdmap(&rbdc->client->monc);
+ ceph_osdc_maybe_request_map(&rbdc->client->osdc);
(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
newest_epoch,
opts->mount_timeout);
@@ -5260,35 +5181,26 @@ err_out_unlock:
static int rbd_dev_header_name(struct rbd_device *rbd_dev)
{
struct rbd_spec *spec = rbd_dev->spec;
- size_t size;
+ int ret;
/* Record the header object name for this rbd image. */
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_dev->header_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
if (rbd_dev->image_format == 1)
- size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
+ ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
+ spec->image_name, RBD_SUFFIX);
else
- size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
-
- rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
- if (!rbd_dev->header_name)
- return -ENOMEM;
+ ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
+ RBD_HEADER_PREFIX, spec->image_id);
- if (rbd_dev->image_format == 1)
- sprintf(rbd_dev->header_name, "%s%s",
- spec->image_name, RBD_SUFFIX);
- else
- sprintf(rbd_dev->header_name, "%s%s",
- RBD_HEADER_PREFIX, spec->image_id);
- return 0;
+ return ret;
}
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
rbd_dev_unprobe(rbd_dev);
- kfree(rbd_dev->header_name);
- rbd_dev->header_name = NULL;
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
@@ -5327,7 +5239,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
pr_info("image %s/%s does not exist\n",
rbd_dev->spec->pool_name,
rbd_dev->spec->image_name);
- goto out_header_name;
+ goto err_out_format;
}
}
@@ -5373,7 +5285,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
goto err_out_probe;
dout("discovered format %u image, header name is %s\n",
- rbd_dev->image_format, rbd_dev->header_name);
+ rbd_dev->image_format, rbd_dev->header_oid.name);
return 0;
err_out_probe:
@@ -5381,9 +5293,6 @@ err_out_probe:
err_out_watch:
if (!depth)
rbd_dev_header_unwatch_sync(rbd_dev);
-out_header_name:
- kfree(rbd_dev->header_name);
- rbd_dev->header_name = NULL;
err_out_format:
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index b8551813ec43..456cf586d2c2 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -1221,7 +1221,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw,
p = rate >= params->vco_min ? 1 : -EINVAL;
}
- if (IS_ERR_VALUE(p))
+ if (p < 0)
return -EINVAL;
cfg->m = tegra_pll_get_fixed_mdiv(hw, input_rate);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 035513b012ee..36bc11a106aa 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -78,9 +78,14 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
static int cpufreq_start_governor(struct cpufreq_policy *policy);
-static inline int cpufreq_exit_governor(struct cpufreq_policy *policy)
+static inline void cpufreq_exit_governor(struct cpufreq_policy *policy)
{
- return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+ (void)cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
+}
+
+static inline void cpufreq_stop_governor(struct cpufreq_policy *policy)
+{
+ (void)cpufreq_governor(policy, CPUFREQ_GOV_STOP);
}
/**
@@ -1026,13 +1031,8 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
return 0;
down_write(&policy->rwsem);
- if (has_target()) {
- ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- if (ret) {
- pr_err("%s: Failed to stop governor\n", __func__);
- goto unlock;
- }
- }
+ if (has_target())
+ cpufreq_stop_governor(policy);
cpumask_set_cpu(cpu, policy->cpus);
@@ -1041,8 +1041,6 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cp
if (ret)
pr_err("%s: Failed to start governor\n", __func__);
}
-
-unlock:
up_write(&policy->rwsem);
return ret;
}
@@ -1354,11 +1352,8 @@ static void cpufreq_offline(unsigned int cpu)
}
down_write(&policy->rwsem);
- if (has_target()) {
- ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- if (ret)
- pr_err("%s: Failed to stop governor\n", __func__);
- }
+ if (has_target())
+ cpufreq_stop_governor(policy);
cpumask_clear_cpu(cpu, policy->cpus);
@@ -1387,12 +1382,8 @@ static void cpufreq_offline(unsigned int cpu)
if (cpufreq_driver->stop_cpu)
cpufreq_driver->stop_cpu(policy);
- /* If cpu is last user of policy, free policy */
- if (has_target()) {
- ret = cpufreq_exit_governor(policy);
- if (ret)
- pr_err("%s: Failed to exit governor\n", __func__);
- }
+ if (has_target())
+ cpufreq_exit_governor(policy);
/*
* Perform the ->exit() even during light-weight tear-down,
@@ -1626,7 +1617,6 @@ EXPORT_SYMBOL(cpufreq_generic_suspend);
void cpufreq_suspend(void)
{
struct cpufreq_policy *policy;
- int ret;
if (!cpufreq_driver)
return;
@@ -1639,14 +1629,8 @@ void cpufreq_suspend(void)
for_each_active_policy(policy) {
if (has_target()) {
down_write(&policy->rwsem);
- ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ cpufreq_stop_governor(policy);
up_write(&policy->rwsem);
-
- if (ret) {
- pr_err("%s: Failed to stop governor for policy: %p\n",
- __func__, policy);
- continue;
- }
}
if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
@@ -2049,16 +2033,15 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
ret = policy->governor->governor(policy, event);
- if (!ret) {
- if (event == CPUFREQ_GOV_POLICY_INIT)
+ if (event == CPUFREQ_GOV_POLICY_INIT) {
+ if (ret)
+ module_put(policy->governor->owner);
+ else
policy->governor->initialized++;
- else if (event == CPUFREQ_GOV_POLICY_EXIT)
- policy->governor->initialized--;
- }
-
- if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
- ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
+ } else if (event == CPUFREQ_GOV_POLICY_EXIT) {
+ policy->governor->initialized--;
module_put(policy->governor->owner);
+ }
return ret;
}
@@ -2221,20 +2204,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
old_gov = policy->governor;
/* end old governor */
if (old_gov) {
- ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
- if (ret) {
- /* This can happen due to race with other operations */
- pr_debug("%s: Failed to Stop Governor: %s (%d)\n",
- __func__, old_gov->name, ret);
- return ret;
- }
-
- ret = cpufreq_exit_governor(policy);
- if (ret) {
- pr_err("%s: Failed to Exit Governor: %s (%d)\n",
- __func__, old_gov->name, ret);
- return ret;
- }
+ cpufreq_stop_governor(policy);
+ cpufreq_exit_governor(policy);
}
/* start new governor */
@@ -2495,10 +2466,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
register_hotcpu_notifier(&cpufreq_cpu_notifier);
pr_debug("driver %s up and running\n", driver_data->name);
-
-out:
- put_online_cpus();
- return ret;
+ goto out;
err_if_unreg:
subsys_interface_unregister(&cpufreq_interface);
@@ -2508,7 +2476,9 @@ err_null_driver:
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- goto out;
+out:
+ put_online_cpus();
+ return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b76a98dd9988..3a9c4325d6e2 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1461,12 +1461,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
intel_pstate_clear_update_util_hook(policy->cpu);
cpu = all_cpu_data[0];
- if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate) {
- if (policy->max < policy->cpuinfo.max_freq &&
- policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
- pr_debug("policy->max > max non turbo frequency\n");
- policy->max = policy->cpuinfo.max_freq;
- }
+ if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
+ policy->max < policy->cpuinfo.max_freq &&
+ policy->max > cpu->pstate.max_pstate * cpu->pstate.scaling) {
+ pr_debug("policy->max > max non turbo frequency\n");
+ policy->max = policy->cpuinfo.max_freq;
}
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 6f602c7a71bd..643f43179df1 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -307,17 +307,24 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy,
return 0;
}
+#define DYNAMIC_POWER "dynamic-power-coefficient"
+
static void mtk_cpufreq_ready(struct cpufreq_policy *policy)
{
struct mtk_cpu_dvfs_info *info = policy->driver_data;
struct device_node *np = of_node_get(info->cpu_dev->of_node);
+ u32 capacitance = 0;
if (WARN_ON(!np))
return;
if (of_find_property(np, "#cooling-cells", NULL)) {
- info->cdev = of_cpufreq_cooling_register(np,
- policy->related_cpus);
+ of_property_read_u32(np, DYNAMIC_POWER, &capacitance);
+
+ info->cdev = of_cpufreq_power_cooling_register(np,
+ policy->related_cpus,
+ capacitance,
+ NULL);
if (IS_ERR(info->cdev)) {
dev_err(info->cpu_dev,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index cead9bec4843..376e63ca94e8 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -54,7 +54,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
freq = new_freq * 1000;
ret = clk_round_rate(policy->clk, freq);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_warn(mpu_dev,
"CPUfreq: Cannot find matching frequency for %lu\n",
freq);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 2b8e6ce62e81..a4d0059e232c 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -214,7 +214,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
tick_broadcast_exit();
}
- if (!cpuidle_state_is_coupled(drv, entered_state))
+ if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
/*
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 44d30b45f3cc..5ad5f3009ae0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -402,7 +402,7 @@ int caam_get_era(void)
ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
of_node_put(caam_node);
- return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
+ return ret ? -ENOTSUPP : prop;
}
EXPORT_SYMBOL(caam_get_era);
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 52c7395cb8d8..0d0d4529ee36 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -122,6 +122,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
unsigned int unit;
+ u32 unit_size;
int ret;
if (!ctx->u.aes.key_len)
@@ -133,11 +134,17 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
if (!req->info)
return -EINVAL;
- for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++)
- if (!(req->nbytes & (unit_size_map[unit].size - 1)))
- break;
+ unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
+ if (req->nbytes <= unit_size_map[0].size) {
+ for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
+ if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
+ unit_size = unit_size_map[unit].value;
+ break;
+ }
+ }
+ }
- if ((unit_size_map[unit].value == CCP_XTS_AES_UNIT_SIZE__LAST) ||
+ if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
(ctx->u.aes.key_len != AES_KEYSIZE_128)) {
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
@@ -158,7 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
: CCP_AES_ACTION_DECRYPT;
- rctx->cmd.u.xts.unit_size = unit_size_map[unit].value;
+ rctx->cmd.u.xts.unit_size = unit_size;
rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
rctx->cmd.u.xts.iv = &rctx->iv_sg;
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index 6eefaa2fe58f..63464e86f2b1 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -1986,7 +1986,7 @@ err_algs:
&dd->pdata->algs_info[i].algs_list[j]);
err_pm:
pm_runtime_disable(dev);
- if (dd->polling_mode)
+ if (!dd->polling_mode)
dma_release_channel(dd->dma_lch);
data_err:
dev_err(dev, "initialization failed.\n");
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e0df233dde92..57aa227bfadb 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -461,25 +461,25 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
/* Source burst */
ret = convert_burst(sconfig->src_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
ret = convert_burst(sconfig->dst_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
/* Source bus width */
ret = convert_buswidth(sconfig->src_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
@@ -518,25 +518,25 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
/* Source burst */
ret = convert_burst(sconfig->src_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
ret = convert_burst(sconfig->dst_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
/* Source bus width */
ret = convert_buswidth(sconfig->src_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index d39014daeef9..fc5f197906ac 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -29,7 +29,6 @@
#include <mach/hardware.h>
#include <mach/platform.h>
-#include <mach/irqs.h>
#define LPC32XX_GPIO_P3_INP_STATE _GPREG(0x000)
#define LPC32XX_GPIO_P3_OUTP_SET _GPREG(0x004)
@@ -371,61 +370,16 @@ static int lpc32xx_gpio_request(struct gpio_chip *chip, unsigned pin)
static int lpc32xx_gpio_to_irq_p01(struct gpio_chip *chip, unsigned offset)
{
- return IRQ_LPC32XX_P0_P1_IRQ;
+ return -ENXIO;
}
-static const char lpc32xx_gpio_to_irq_gpio_p3_table[] = {
- IRQ_LPC32XX_GPIO_00,
- IRQ_LPC32XX_GPIO_01,
- IRQ_LPC32XX_GPIO_02,
- IRQ_LPC32XX_GPIO_03,
- IRQ_LPC32XX_GPIO_04,
- IRQ_LPC32XX_GPIO_05,
-};
-
static int lpc32xx_gpio_to_irq_gpio_p3(struct gpio_chip *chip, unsigned offset)
{
- if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpio_p3_table))
- return lpc32xx_gpio_to_irq_gpio_p3_table[offset];
return -ENXIO;
}
-static const char lpc32xx_gpio_to_irq_gpi_p3_table[] = {
- IRQ_LPC32XX_GPI_00,
- IRQ_LPC32XX_GPI_01,
- IRQ_LPC32XX_GPI_02,
- IRQ_LPC32XX_GPI_03,
- IRQ_LPC32XX_GPI_04,
- IRQ_LPC32XX_GPI_05,
- IRQ_LPC32XX_GPI_06,
- IRQ_LPC32XX_GPI_07,
- IRQ_LPC32XX_GPI_08,
- IRQ_LPC32XX_GPI_09,
- -ENXIO, /* 10 */
- -ENXIO, /* 11 */
- -ENXIO, /* 12 */
- -ENXIO, /* 13 */
- -ENXIO, /* 14 */
- -ENXIO, /* 15 */
- -ENXIO, /* 16 */
- -ENXIO, /* 17 */
- -ENXIO, /* 18 */
- IRQ_LPC32XX_GPI_19,
- -ENXIO, /* 20 */
- -ENXIO, /* 21 */
- -ENXIO, /* 22 */
- -ENXIO, /* 23 */
- -ENXIO, /* 24 */
- -ENXIO, /* 25 */
- -ENXIO, /* 26 */
- -ENXIO, /* 27 */
- IRQ_LPC32XX_GPI_28,
-};
-
static int lpc32xx_gpio_to_irq_gpi_p3(struct gpio_chip *chip, unsigned offset)
{
- if (offset < ARRAY_SIZE(lpc32xx_gpio_to_irq_gpi_p3_table))
- return lpc32xx_gpio_to_irq_gpi_p3_table[offset];
return -ENXIO;
}
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 08897dc11915..1a33a19d95b9 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -393,7 +393,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0);
else
irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
return irq_base;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index d407f904a31c..24f60d28f0c0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -20,6 +20,7 @@
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include <linux/compat.h>
#include <uapi/linux/gpio.h>
#include "gpiolib.h"
@@ -316,7 +317,7 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct gpio_device *gdev = filp->private_data;
struct gpio_chip *chip = gdev->chip;
- int __user *ip = (int __user *)arg;
+ void __user *ip = (void __user *)arg;
/* We fail any subsequent ioctl():s when the chip is gone */
if (!chip)
@@ -388,6 +389,14 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EINVAL;
}
+#ifdef CONFIG_COMPAT
+static long gpio_ioctl_compat(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return gpio_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
/**
* gpio_chrdev_open() - open the chardev for ioctl operations
* @inode: inode for this chardev
@@ -431,7 +440,9 @@ static const struct file_operations gpio_fileops = {
.owner = THIS_MODULE,
.llseek = noop_llseek,
.unlocked_ioctl = gpio_ioctl,
- .compat_ioctl = gpio_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = gpio_ioctl_compat,
+#endif
};
static void gpiodevice_release(struct device *dev)
@@ -618,6 +629,8 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
goto err_free_label;
}
+ spin_unlock_irqrestore(&gpio_lock, flags);
+
for (i = 0; i < chip->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
@@ -649,8 +662,6 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data)
}
}
- spin_unlock_irqrestore(&gpio_lock, flags);
-
#ifdef CONFIG_PINCTRL
INIT_LIST_HEAD(&gdev->pin_ranges);
#endif
@@ -1356,10 +1367,13 @@ done:
/*
* This descriptor validation needs to be inserted verbatim into each
* function taking a descriptor, so we need to use a preprocessor
- * macro to avoid endless duplication.
+ * macro to avoid endless duplication. If the desc is NULL it is an
+ * optional GPIO and calls should just bail out.
*/
#define VALIDATE_DESC(desc) do { \
- if (!desc || !desc->gdev) { \
+ if (!desc) \
+ return 0; \
+ if (!desc->gdev) { \
pr_warn("%s: invalid GPIO\n", __func__); \
return -EINVAL; \
} \
@@ -1370,7 +1384,9 @@ done:
} } while (0)
#define VALIDATE_DESC_VOID(desc) do { \
- if (!desc || !desc->gdev) { \
+ if (!desc) \
+ return; \
+ if (!desc->gdev) { \
pr_warn("%s: invalid GPIO\n", __func__); \
return; \
} \
@@ -2066,17 +2082,30 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq);
*/
int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset)
{
- if (offset >= chip->ngpio)
- return -EINVAL;
+ struct gpio_desc *desc;
+
+ desc = gpiochip_get_desc(chip, offset);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ /* Flush direction if something changed behind our back */
+ if (chip->get_direction) {
+ int dir = chip->get_direction(chip, offset);
+
+ if (dir)
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ }
- if (test_bit(FLAG_IS_OUT, &chip->gpiodev->descs[offset].flags)) {
+ if (test_bit(FLAG_IS_OUT, &desc->flags)) {
chip_err(chip,
"%s: tried to flag a GPIO set as output for IRQ\n",
__func__);
return -EIO;
}
- set_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags);
+ set_bit(FLAG_USED_AS_IRQ, &desc->flags);
return 0;
}
EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq);
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2bd3e5aa43c6..be43afb08c69 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -23,7 +23,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
- drm_kms_helper_common.o
+ drm_kms_helper_common.o drm_dp_dual_mode_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index ca77ec10147c..e503e3d6d920 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -2,6 +2,7 @@ menu "ACP (Audio CoProcessor) Configuration"
config DRM_AMD_ACP
bool "Enable AMD Audio CoProcessor IP support"
+ depends on DRM_AMDGPU
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2a009c398dcb..992f00b65be4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -602,6 +602,8 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
+int amdgpu_fence_slab_init(void);
+void amdgpu_fence_slab_fini(void);
/*
* GART structures, functions & helpers
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 60a0c9ac11b2..cb07da41152b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -194,12 +194,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
bpc = 8;
DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
connector->name, bpc);
- } else if (bpc > 8) {
- /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
- DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
- connector->name);
- bpc = 8;
}
+ } else if (bpc > 8) {
+ /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+ DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+ connector->name);
+ bpc = 8;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 1dab5f2b725b..f888c015f76c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -50,9 +50,11 @@
* KMS wrapper.
* - 3.0.0 - initial driver
* - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
+ * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
+ * at the end of IBs.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 1
+#define KMS_DRIVER_MINOR 2
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -279,14 +281,26 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
/* Polaris11 */
{0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
- {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
- {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
/* Polaris10 */
{0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0, 0, 0}
};
@@ -563,9 +577,12 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.driver.pm = &amdgpu_pm_ops,
};
+
+
static int __init amdgpu_init(void)
{
amdgpu_sync_init();
+ amdgpu_fence_slab_init();
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
@@ -576,7 +593,6 @@ static int __init amdgpu_init(void)
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
-
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
@@ -587,6 +603,7 @@ static void __exit amdgpu_exit(void)
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
+ amdgpu_fence_slab_fini();
}
module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index ba9c04283d01..d1558768cfb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -55,8 +55,21 @@ struct amdgpu_fence {
};
static struct kmem_cache *amdgpu_fence_slab;
-static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
+int amdgpu_fence_slab_init(void)
+{
+ amdgpu_fence_slab = kmem_cache_create(
+ "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!amdgpu_fence_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+void amdgpu_fence_slab_fini(void)
+{
+ kmem_cache_destroy(amdgpu_fence_slab);
+}
/*
* Cast helper
*/
@@ -396,13 +409,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
- if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
- amdgpu_fence_slab = kmem_cache_create(
- "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!amdgpu_fence_slab)
- return -ENOMEM;
- }
if (amdgpu_debugfs_fence_init(adev))
dev_err(adev->dev, "fence debugfs file creation failed\n");
@@ -437,13 +443,10 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
- fence_put(ring->fence_drv.fences[i]);
+ fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.initialized = false;
}
-
- if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
- kmem_cache_destroy(amdgpu_fence_slab);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ea708cb94862..9f36ed30ba11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -53,6 +53,18 @@
/* Special value that no flush is necessary */
#define AMDGPU_VM_NO_FLUSH (~0ll)
+/* Local structure. Encapsulate some VM table update parameters to reduce
+ * the number of function parameters
+ */
+struct amdgpu_vm_update_params {
+ /* address where to copy page table entries from */
+ uint64_t src;
+ /* DMA addresses to use for mapping */
+ dma_addr_t *pages_addr;
+ /* indirect buffer to fill with commands */
+ struct amdgpu_ib *ib;
+};
+
/**
* amdgpu_vm_num_pde - return the number of page directory entries
*
@@ -389,9 +401,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* amdgpu_vm_update_pages - helper to call the right asic function
*
* @adev: amdgpu_device pointer
- * @src: address where to copy page table entries from
- * @pages_addr: DMA addresses to use for mapping
- * @ib: indirect buffer to fill with commands
+ * @vm_update_params: see amdgpu_vm_update_params definition
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -402,29 +412,29 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* to setup the page table using the DMA.
*/
static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
- uint64_t src,
- dma_addr_t *pages_addr,
- struct amdgpu_ib *ib,
+ struct amdgpu_vm_update_params
+ *vm_update_params,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint32_t flags)
{
trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
- if (src) {
- src += (addr >> 12) * 8;
- amdgpu_vm_copy_pte(adev, ib, pe, src, count);
+ if (vm_update_params->src) {
+ amdgpu_vm_copy_pte(adev, vm_update_params->ib,
+ pe, (vm_update_params->src + (addr >> 12) * 8), count);
- } else if (pages_addr) {
- amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
- count, incr, flags);
+ } else if (vm_update_params->pages_addr) {
+ amdgpu_vm_write_pte(adev, vm_update_params->ib,
+ vm_update_params->pages_addr,
+ pe, addr, count, incr, flags);
} else if (count < 3) {
- amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
+ amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
count, incr, flags);
} else {
- amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
+ amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
count, incr, flags);
}
}
@@ -444,10 +454,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
struct fence *fence = NULL;
struct amdgpu_job *job;
+ struct amdgpu_vm_update_params vm_update_params;
unsigned entries;
uint64_t addr;
int r;
+ memset(&vm_update_params, 0, sizeof(vm_update_params));
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -465,7 +477,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
- amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries,
+ vm_update_params.ib = &job->ibs[0];
+ amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@@ -538,11 +551,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
- struct amdgpu_ib *ib;
+ struct amdgpu_vm_update_params vm_update_params;
struct fence *fence = NULL;
int r;
+ memset(&vm_update_params, 0, sizeof(vm_update_params));
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
/* padding, etc. */
@@ -555,7 +569,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (r)
return r;
- ib = &job->ibs[0];
+ vm_update_params.ib = &job->ibs[0];
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -575,7 +589,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
((last_pt + incr * count) != pt)) {
if (count) {
- amdgpu_vm_update_pages(adev, 0, NULL, ib,
+ amdgpu_vm_update_pages(adev, &vm_update_params,
last_pde, last_pt,
count, incr,
AMDGPU_PTE_VALID);
@@ -590,14 +604,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
- amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt,
- count, incr, AMDGPU_PTE_VALID);
+ amdgpu_vm_update_pages(adev, &vm_update_params,
+ last_pde, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
- if (ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, ib);
+ if (vm_update_params.ib->length_dw != 0) {
+ amdgpu_ring_pad_ib(ring, vm_update_params.ib);
amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(ib->length_dw > ndw);
+ WARN_ON(vm_update_params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
@@ -623,18 +638,15 @@ error_free:
* amdgpu_vm_frag_ptes - add fragment information to PTEs
*
* @adev: amdgpu_device pointer
- * @src: address where to copy page table entries from
- * @pages_addr: DMA addresses to use for mapping
- * @ib: IB for the update
+ * @vm_update_params: see amdgpu_vm_update_params definition
* @pe_start: first PTE to handle
* @pe_end: last PTE to handle
* @addr: addr those PTEs should point to
* @flags: hw mapping flags
*/
static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
- uint64_t src,
- dma_addr_t *pages_addr,
- struct amdgpu_ib *ib,
+ struct amdgpu_vm_update_params
+ *vm_update_params,
uint64_t pe_start, uint64_t pe_end,
uint64_t addr, uint32_t flags)
{
@@ -671,11 +683,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
return;
/* system pages are non continuously */
- if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) ||
- (frag_start >= frag_end)) {
+ if (vm_update_params->src || vm_update_params->pages_addr ||
+ !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8;
- amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start,
+ amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
addr, count, AMDGPU_GPU_PAGE_SIZE,
flags);
return;
@@ -684,21 +696,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
/* handle the 4K area at the beginning */
if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8;
- amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr,
+ amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
count, AMDGPU_GPU_PAGE_SIZE, flags);
addr += AMDGPU_GPU_PAGE_SIZE * count;
}
/* handle the area in the middle */
count = (frag_end - frag_start) / 8;
- amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count,
+ amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != pe_end) {
addr += AMDGPU_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8;
- amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr,
+ amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
count, AMDGPU_GPU_PAGE_SIZE, flags);
}
}
@@ -707,8 +719,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
* @adev: amdgpu_device pointer
- * @src: address where to copy page table entries from
- * @pages_addr: DMA addresses to use for mapping
+ * @vm_update_params: see amdgpu_vm_update_params definition
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
@@ -718,10 +729,9 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* Update the page tables in the range @start - @end.
*/
static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
- uint64_t src,
- dma_addr_t *pages_addr,
+ struct amdgpu_vm_update_params
+ *vm_update_params,
struct amdgpu_vm *vm,
- struct amdgpu_ib *ib,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
@@ -747,7 +757,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
if (last_pe_end != pe_start) {
- amdgpu_vm_frag_ptes(adev, src, pages_addr, ib,
+ amdgpu_vm_frag_ptes(adev, vm_update_params,
last_pe_start, last_pe_end,
last_dst, flags);
@@ -762,7 +772,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
- amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start,
+ amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
last_pe_end, last_dst, flags);
}
@@ -794,11 +804,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
- struct amdgpu_ib *ib;
+ struct amdgpu_vm_update_params vm_update_params;
struct fence *f = NULL;
int r;
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ memset(&vm_update_params, 0, sizeof(vm_update_params));
+ vm_update_params.src = src;
+ vm_update_params.pages_addr = pages_addr;
/* sync to everything on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
@@ -815,11 +828,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
- if (src) {
+ if (vm_update_params.src) {
/* only copy commands needed */
ndw += ncmds * 7;
- } else if (pages_addr) {
+ } else if (vm_update_params.pages_addr) {
/* header for write data commands */
ndw += ncmds * 4;
@@ -838,7 +851,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
return r;
- ib = &job->ibs[0];
+ vm_update_params.ib = &job->ibs[0];
r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
owner);
@@ -849,11 +862,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start,
+ amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
last + 1, addr, flags);
- amdgpu_ring_pad_ib(ring, ib);
- WARN_ON(ib->length_dw > ndw);
+ amdgpu_ring_pad_ib(ring, vm_update_params.ib);
+ WARN_ON(vm_update_params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &f);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 845c21b1b2ee..be3d6f79a864 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -103,7 +103,6 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int cik_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
u64 wptr_off;
@@ -156,7 +155,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
/* enable irqs */
cik_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index fa4449e126e6..933e425a8154 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1579,7 +1579,6 @@ static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
{
- int ret = 0;
struct cz_power_info *pi = cz_get_pi(adev);
if (pi->caps_sclk_ds) {
@@ -1588,20 +1587,19 @@ static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
CZ_MIN_DEEP_SLEEP_SCLK);
}
- return ret;
+ return 0;
}
/* ?? without dal support, is this still needed in setpowerstate list*/
static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
{
- int ret = 0;
struct cz_power_info *pi = cz_get_pi(adev);
cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetWatermarkFrequency,
pi->sclk_dpm.soft_max_clk);
- return ret;
+ return 0;
}
static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
@@ -1636,7 +1634,6 @@ static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
{
- int ret = 0;
struct cz_power_info *pi = cz_get_pi(adev);
struct cz_ps *ps = &pi->requested_ps;
@@ -1647,21 +1644,19 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
cz_dpm_nbdpm_lm_pstate_enable(adev, true);
}
- return ret;
+ return 0;
}
/* with dpm enabled */
static int cz_dpm_set_power_state(struct amdgpu_device *adev)
{
- int ret = 0;
-
cz_dpm_update_sclk_limit(adev);
cz_dpm_set_deep_sleep_sclk_threshold(adev);
cz_dpm_set_watermark_threshold(adev);
cz_dpm_enable_nbdpm(adev);
cz_dpm_update_low_memory_pstate(adev);
- return ret;
+ return 0;
}
static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 863cb16f6126..3d23a70b6432 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -103,7 +103,6 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int cz_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
u64 wptr_off;
@@ -157,7 +156,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
cz_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index c11b6007af80..af26ec0bc59d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -137,7 +137,7 @@ static const u32 polaris11_golden_settings_a11[] =
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
mmFBC_DEBUG1, 0xffffffff, 0x00000008,
- mmFBC_MISC, 0x9f313fff, 0x14300008,
+ mmFBC_MISC, 0x9f313fff, 0x14302008,
mmHDMI_CONTROL, 0x313f031f, 0x00000011,
};
@@ -145,7 +145,7 @@ static const u32 polaris10_golden_settings_a11[] =
{
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_MISC, 0x9f313fff, 0x14300008,
+ mmFBC_MISC, 0x9f313fff, 0x14302008,
mmHDMI_CONTROL, 0x313f031f, 0x00000011,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 92647fbf5b8b..f19bab68fd83 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -267,10 +267,13 @@ static const u32 tonga_mgcg_cgcg_init[] =
static const u32 golden_settings_polaris11_a11[] =
{
+ mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
+ mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
mmSQ_CONFIG, 0x07f80000, 0x07180000,
@@ -284,8 +287,6 @@ static const u32 golden_settings_polaris11_a11[] =
static const u32 polaris11_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
- mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
- mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
@@ -296,6 +297,7 @@ static const u32 polaris11_golden_common_all[] =
static const u32 golden_settings_polaris10_a11[] =
{
mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
+ mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -5725,6 +5727,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
+ EOP_TC_WB_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 39bfc52d0b42..3b8906ce3511 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -103,7 +103,6 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int iceland_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
u64 wptr_off;
@@ -157,7 +156,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
iceland_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index b45f54714574..a789a863d677 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2252,7 +2252,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
if (pi->caps_stable_p_state) {
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
- for (i = table->count - 1; i >= 0; i++) {
+ for (i = table->count - 1; i >= 0; i--) {
if (stable_p_state_sclk >= table->entries[i].clk) {
stable_p_state_sclk = table->entries[i].clk;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 063f08a9957a..31d99b0010f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -109,10 +109,12 @@ static const u32 fiji_mgcg_cgcg_init[] =
static const u32 golden_settings_polaris11_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index f036af937fbc..c92055805a45 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -99,7 +99,6 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int tonga_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr;
u64 wptr_off;
@@ -165,7 +164,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
tonga_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index c94f9faa220a..24a16e49b571 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -3573,46 +3573,11 @@ static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
return 0;
}
-static void fiji_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_clock_voltage_dependency_table *table =
- table_info->vddc_dep_on_dal_pwrl;
- struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
- enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
- uint32_t req_vddc = 0, req_volt, i;
-
- if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW &&
- dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE))
- return;
-
- for (i= 0; i < table->count; i++) {
- if (dal_power_level == table->entries[i].clk) {
- req_vddc = table->entries[i].v;
- break;
- }
- }
-
- vddc_table = table_info->vdd_dep_on_sclk;
- for (i= 0; i < vddc_table->count; i++) {
- if (req_vddc <= vddc_table->entries[i].vddc) {
- req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE)
- << VDDC_SHIFT;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VddC_Request, req_volt);
- return;
- }
- }
- printk(KERN_ERR "DAL requested level can not"
- " found a available voltage in VDDC DPM Table \n");
-}
-
static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- fiji_apply_dal_min_voltage_request(hwmgr);
+ phm_apply_dal_min_voltage_request(hwmgr);
if (!data->sclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
@@ -4349,7 +4314,7 @@ static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
if (data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = fiji_populate_all_memory_levels(hwmgr);
+ result = fiji_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
return result);
@@ -5109,11 +5074,11 @@ static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
if (!data->soft_pp_table)
return -ENOMEM;
- memcpy(data->soft_pp_table, hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size);
}
*table = (char *)&data->soft_pp_table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 7d69ed635bc2..1c48917da3cf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -30,6 +30,9 @@
#include "pppcielanes.h"
#include "pp_debug.h"
#include "ppatomctrl.h"
+#include "ppsmc.h"
+
+#define VOLTAGE_SCALE 4
extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
@@ -566,3 +569,38 @@ uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
return level;
}
+
+void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_clock_voltage_dependency_table *table =
+ table_info->vddc_dep_on_dal_pwrl;
+ struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
+ enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
+ uint32_t req_vddc = 0, req_volt, i;
+
+ if (!table || table->count <= 0
+ || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
+ || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
+ return;
+
+ for (i = 0; i < table->count; i++) {
+ if (dal_power_level == table->entries[i].clk) {
+ req_vddc = table->entries[i].v;
+ break;
+ }
+ }
+
+ vddc_table = table_info->vdd_dep_on_sclk;
+ for (i = 0; i < vddc_table->count; i++) {
+ if (req_vddc <= vddc_table->entries[i].vddc) {
+ req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_VddC_Request, req_volt);
+ return;
+ }
+ }
+ printk(KERN_ERR "DAL requested level can not"
+ " found a available voltage in VDDC DPM Table \n");
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index 93768fa1dcdc..aa6be033f21b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -189,41 +189,6 @@ int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
return decode_pcie_lane_width(link_width);
}
-void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_clock_voltage_dependency_table *table =
- table_info->vddc_dep_on_dal_pwrl;
- struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
- enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
- uint32_t req_vddc = 0, req_volt, i;
-
- if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW &&
- dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE))
- return;
-
- for (i = 0; i < table->count; i++) {
- if (dal_power_level == table->entries[i].clk) {
- req_vddc = table->entries[i].v;
- break;
- }
- }
-
- vddc_table = table_info->vdd_dep_on_sclk;
- for (i = 0; i < vddc_table->count; i++) {
- if (req_vddc <= vddc_table->entries[i].vddc) {
- req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE)
- << VDDC_SHIFT;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VddC_Request, req_volt);
- return;
- }
- }
- printk(KERN_ERR "DAL requested level can not"
- " found a available voltage in VDDC DPM Table \n");
-}
-
/**
* Enable voltage control
*
@@ -2091,7 +2056,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to populate Clock Stretcher Data Table!",
return result);
}
-
+ table->CurrSclkPllRange = 0xff;
table->GraphicsVoltageChangeEnable = 1;
table->GraphicsThermThrottleEnable = 1;
table->GraphicsInterval = 1;
@@ -2184,6 +2149,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
@@ -4760,11 +4726,11 @@ static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
if (!data->soft_pp_table)
return -ENOMEM;
- memcpy(data->soft_pp_table, hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size);
}
*table = (char *)&data->soft_pp_table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 1faad92b50d3..16fed487973b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -5331,7 +5331,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
PP_ASSERT_WITH_CODE(
- true == tonga_is_dpm_running(hwmgr),
+ 0 == tonga_is_dpm_running(hwmgr),
"Trying to freeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -5344,7 +5344,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr),
+ PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
"Trying to freeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -5445,7 +5445,7 @@ static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr
}
if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = tonga_populate_all_memory_levels(hwmgr);
+ result = tonga_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
return result);
@@ -5647,7 +5647,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr),
+ PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
"Trying to Unfreeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -5661,7 +5661,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
PP_ASSERT_WITH_CODE(
- true == tonga_is_dpm_running(hwmgr),
+ 0 == tonga_is_dpm_running(hwmgr),
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -6056,11 +6056,11 @@ static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
if (!data->soft_pp_table)
return -ENOMEM;
- memcpy(data->soft_pp_table, hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size);
}
*table = (char *)&data->soft_pp_table;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index fd4ce7aaeee9..28f571449495 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -673,7 +673,7 @@ extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_volta
extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
-
+extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index da18f44fd1c8..87c023e518ab 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -639,7 +639,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
cz_smu->driver_buffer_length = 0;
- for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) {
+ for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
firmware_list[i]);
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
new file mode 100644
index 000000000000..a7b2a751f6fe
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <drm/drm_dp_dual_mode_helper.h>
+#include <drm/drmP.h>
+
+/**
+ * DOC: dp dual mode helpers
+ *
+ * Helper functions to deal with DP dual mode (aka. DP++) adaptors.
+ *
+ * Type 1:
+ * Adaptor registers (if any) and the sink DDC bus may be accessed via I2C.
+ *
+ * Type 2:
+ * Adaptor registers and sink DDC bus can be accessed either via I2C or
+ * I2C-over-AUX. Source devices may choose to implement either of these
+ * access methods.
+ */
+
+#define DP_DUAL_MODE_SLAVE_ADDRESS 0x40
+
+/**
+ * drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s)
+ * @adapter: I2C adapter for the DDC bus
+ * @offset: register offset
+ * @buffer: buffer for return data
+ * @size: sizo of the buffer
+ *
+ * Reads @size bytes from the DP dual mode adaptor registers
+ * starting at @offset.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
+ u8 offset, void *buffer, size_t size)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
+ .flags = 0,
+ .len = 1,
+ .buf = &offset,
+ },
+ {
+ .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
+ .flags = I2C_M_RD,
+ .len = size,
+ .buf = buffer,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+ if (ret != ARRAY_SIZE(msgs))
+ return -EPROTO;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_read);
+
+/**
+ * drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s)
+ * @adapter: I2C adapter for the DDC bus
+ * @offset: register offset
+ * @buffer: buffer for write data
+ * @size: sizo of the buffer
+ *
+ * Writes @size bytes to the DP dual mode adaptor registers
+ * starting at @offset.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
+ u8 offset, const void *buffer, size_t size)
+{
+ struct i2c_msg msg = {
+ .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
+ .flags = 0,
+ .len = 1 + size,
+ .buf = NULL,
+ };
+ void *data;
+ int ret;
+
+ data = kmalloc(msg.len, GFP_TEMPORARY);
+ if (!data)
+ return -ENOMEM;
+
+ msg.buf = data;
+
+ memcpy(data, &offset, 1);
+ memcpy(data + 1, buffer, size);
+
+ ret = i2c_transfer(adapter, &msg, 1);
+
+ kfree(data);
+
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EPROTO;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_write);
+
+static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
+{
+ static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
+ "DP-HDMI ADAPTOR\x04";
+
+ return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
+ sizeof(dp_dual_mode_hdmi_id)) == 0;
+}
+
+static bool is_type2_adaptor(uint8_t adaptor_id)
+{
+ return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
+ DP_DUAL_MODE_REV_TYPE2);
+}
+
+/**
+ * drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
+ * @adapter: I2C adapter for the DDC bus
+ *
+ * Attempt to identify the type of the DP dual mode adaptor used.
+ *
+ * Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not
+ * certain whether we're dealing with a native HDMI port or
+ * a type 1 DVI dual mode adaptor. The driver will have to use
+ * some other hardware/driver specific mechanism to make that
+ * distinction.
+ *
+ * Returns:
+ * The type of the DP dual mode adaptor used
+ */
+enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
+{
+ char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {};
+ uint8_t adaptor_id = 0x00;
+ ssize_t ret;
+
+ /*
+ * Let's see if the adaptor is there the by reading the
+ * HDMI ID registers.
+ *
+ * Note that type 1 DVI adaptors are not required to implemnt
+ * any registers, and that presents a problem for detection.
+ * If the i2c transfer is nacked, we may or may not be dealing
+ * with a type 1 DVI adaptor. Some other mechanism of detecting
+ * the presence of the adaptor is required. One way would be
+ * to check the state of the CONFIG1 pin, Another method would
+ * simply require the driver to know whether the port is a DP++
+ * port or a native HDMI port. Both of these methods are entirely
+ * hardware/driver specific so we can't deal with them here.
+ */
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
+ hdmi_id, sizeof(hdmi_id));
+ if (ret)
+ return DRM_DP_DUAL_MODE_UNKNOWN;
+
+ /*
+ * Sigh. Some (maybe all?) type 1 adaptors are broken and ack
+ * the offset but ignore it, and instead they just always return
+ * data from the start of the HDMI ID buffer. So for a broken
+ * type 1 HDMI adaptor a single byte read will always give us
+ * 0x44, and for a type 1 DVI adaptor it should give 0x00
+ * (assuming it implements any registers). Fortunately neither
+ * of those values will match the type 2 signature of the
+ * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
+ * the type 2 adaptor detection safely even in the presence
+ * of broken type 1 adaptors.
+ */
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
+ &adaptor_id, sizeof(adaptor_id));
+ if (ret == 0) {
+ if (is_type2_adaptor(adaptor_id)) {
+ if (is_hdmi_adaptor(hdmi_id))
+ return DRM_DP_DUAL_MODE_TYPE2_HDMI;
+ else
+ return DRM_DP_DUAL_MODE_TYPE2_DVI;
+ }
+ }
+
+ if (is_hdmi_adaptor(hdmi_id))
+ return DRM_DP_DUAL_MODE_TYPE1_HDMI;
+ else
+ return DRM_DP_DUAL_MODE_TYPE1_DVI;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_detect);
+
+/**
+ * drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor
+ * @type: DP dual mode adaptor type
+ * @adapter: I2C adapter for the DDC bus
+ *
+ * Determine the max TMDS clock the adaptor supports based on the
+ * type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK
+ * register (on type2 adaptors). As some type 1 adaptors have
+ * problems with registers (see comments in drm_dp_dual_mode_detect())
+ * we don't read the register on those, instead we simply assume
+ * a 165 MHz limit based on the specification.
+ *
+ * Returns:
+ * Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz.
+ */
+int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter)
+{
+ uint8_t max_tmds_clock;
+ ssize_t ret;
+
+ /* native HDMI so no limit */
+ if (type == DRM_DP_DUAL_MODE_NONE)
+ return 0;
+
+ /*
+ * Type 1 adaptors are limited to 165MHz
+ * Type 2 adaptors can tells us their limit
+ */
+ if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+ return 165000;
+
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK,
+ &max_tmds_clock, sizeof(max_tmds_clock));
+ if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) {
+ DRM_DEBUG_KMS("Failed to query max TMDS clock\n");
+ return 165000;
+ }
+
+ return max_tmds_clock * 5000 / 2;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock);
+
+/**
+ * drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor
+ * @type: DP dual mode adaptor type
+ * @adapter: I2C adapter for the DDC bus
+ * @enabled: current state of the TMDS output buffers
+ *
+ * Get the state of the TMDS output buffers in the adaptor. For
+ * type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN
+ * register. As some type 1 adaptors have problems with registers
+ * (see comments in drm_dp_dual_mode_detect()) we don't read the
+ * register on those, instead we simply assume that the buffers
+ * are always enabled.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter,
+ bool *enabled)
+{
+ uint8_t tmds_oen;
+ ssize_t ret;
+
+ if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) {
+ *enabled = true;
+ return 0;
+ }
+
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmds_oen, sizeof(tmds_oen));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n");
+ return ret;
+ }
+
+ *enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
+
+/**
+ * drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor
+ * @type: DP dual mode adaptor type
+ * @adapter: I2C adapter for the DDC bus
+ * @enable: enable (as opposed to disable) the TMDS output buffers
+ *
+ * Set the state of the TMDS output buffers in the adaptor. For
+ * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
+ * some type 1 adaptors have problems with registers (see comments
+ * in drm_dp_dual_mode_detect()) we avoid touching the register,
+ * making this function a no-op on type 1 adaptors.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter, bool enable)
+{
+ uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
+ ssize_t ret;
+
+ if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+ return 0;
+
+ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmds_oen, sizeof(tmds_oen));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
+ enable ? "enable" : "disable");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
+
+/**
+ * drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string
+ * @type: DP dual mode adaptor type
+ *
+ * Returns:
+ * String representation of the DP dual mode adaptor type
+ */
+const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
+{
+ switch (type) {
+ case DRM_DP_DUAL_MODE_NONE:
+ return "none";
+ case DRM_DP_DUAL_MODE_TYPE1_DVI:
+ return "type 1 DVI";
+ case DRM_DP_DUAL_MODE_TYPE1_HDMI:
+ return "type 1 HDMI";
+ case DRM_DP_DUAL_MODE_TYPE2_DVI:
+ return "type 2 DVI";
+ case DRM_DP_DUAL_MODE_TYPE2_HDMI:
+ return "type 2 HDMI";
+ default:
+ WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN);
+ return "unknown";
+ }
+}
+EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 15615fb9bde6..b3198fcd0536 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1183,6 +1183,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ ret = i915_ggtt_enable_hw(dev);
+ if (ret) {
+ DRM_ERROR("failed to enable GGTT\n");
+ goto out_ggtt;
+ }
+
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
ret = i915_kick_out_firmware_fb(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d37c0a671eed..f313b4d8344f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -734,9 +734,14 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
disable_rpm_wakeref_asserts(dev_priv);
+ ret = i915_ggtt_enable_hw(dev);
+ if (ret)
+ DRM_ERROR("failed to re-enable GGTT\n");
+
intel_csr_ucode_resume(dev_priv);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b87ca4fae20a..5faacc6e548d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3482,6 +3482,7 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9b99490e8367..aad26851cee3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1456,7 +1456,10 @@ i915_wait_request(struct drm_i915_gem_request *req)
if (ret)
return ret;
- __i915_gem_request_retire__upto(req);
+ /* If the GPU hung, we want to keep the requests to find the guilty. */
+ if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+ __i915_gem_request_retire__upto(req);
+
return 0;
}
@@ -1513,7 +1516,8 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
else if (obj->last_write_req == req)
i915_gem_object_retire__write(obj);
- __i915_gem_request_retire__upto(req);
+ if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+ __i915_gem_request_retire__upto(req);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -4860,9 +4864,6 @@ i915_gem_init_hw(struct drm_device *dev)
struct intel_engine_cs *engine;
int ret, j;
- if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
- return -EIO;
-
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0d666b3f7e9b..92acdff9dad3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3236,6 +3236,14 @@ out_gtt_cleanup:
return ret;
}
+int i915_ggtt_enable_hw(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d7dd3d8a8758..0008543d55f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -514,6 +514,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
}
int i915_ggtt_init_hw(struct drm_device *dev);
+int i915_ggtt_enable_hw(struct drm_device *dev);
void i915_gem_init_ggtt(struct drm_device *dev);
void i915_ggtt_cleanup_hw(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e72dd9a8d6bf..b235b6e88ead 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1578,6 +1578,42 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ /*
+ * Buggy VBTs may declare DP ports as having
+ * HDMI type dvo_port :( So let's check both.
+ */
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+ int i;
+
+ if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
+ (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
+ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ return true;
+ }
+
+ return false;
+}
+
/**
* intel_bios_is_dsi_present - is DSI present in VBT
* @dev_priv: i915 device instance
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 3fac04602a25..01e523df363b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1601,6 +1601,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+ }
+
intel_prepare_ddi_buffer(intel_encoder);
if (type == INTEL_OUTPUT_EDP) {
@@ -1667,6 +1673,12 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
+ }
}
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@@ -2180,8 +2192,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
- break;
+ /* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
+ pipe_config->lane_count = 4;
+ break;
case TRANS_DDI_MODE_SELECT_FDI:
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 46f9be3ad5a2..2113f401f0ba 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12005,6 +12005,9 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
}
+ } else if (dev_priv->display.compute_intermediate_wm) {
+ if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
+ pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
}
if (INTEL_INFO(dev)->gen >= 9) {
@@ -15990,6 +15993,9 @@ retry:
state->acquire_ctx = &ctx;
+ /* ignore any reset values/BIOS leftovers in the WM registers */
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*
* Force recalculation even if we restore
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 639bf0209c15..3ac705936b04 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1702,9 +1702,9 @@ static const struct intel_dpll_mgr hsw_pll_mgr = {
static const struct dpll_info skl_plls[] = {
{ "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
- { "DPPL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
- { "DPPL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
- { "DPPL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
+ { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
+ { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
+ { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
{ NULL, -1, NULL, },
};
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5da29a02b9e3..a28b4aac1e02 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -33,6 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
@@ -753,6 +754,10 @@ struct cxsr_latency {
struct intel_hdmi {
i915_reg_t hdmi_reg;
int ddc_bus;
+ struct {
+ enum drm_dp_dual_mode_type type;
+ int max_tmds_clock;
+ } dp_dual_mode;
bool limited_color_range;
bool color_range_auto;
bool has_hdmi_sink;
@@ -1401,6 +1406,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
/* intel_lvds.c */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 2b22bb9bb86f..366ad6c67ce4 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -46,6 +46,22 @@ static const struct {
},
};
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
+ u16 burst_mode_ratio)
+{
+ return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
+ 8 * 100), lane_count);
+}
+
+/* return pixels equvalent to txbyteclkhs */
+static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
+ u16 burst_mode_ratio)
+{
+ return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100),
+ (bpp * burst_mode_ratio));
+}
+
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
{
/* It just so happens the VBT matches register contents. */
@@ -780,10 +796,19 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode_sw;
+ struct intel_crtc *intel_crtc;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
enum port port;
- u16 vfp, vsync, vbp;
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+ u16 hfp_sw, hsync_sw, hbp_sw;
+ u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
+ crtc_hblank_start_sw, crtc_hblank_end_sw;
+
+ intel_crtc = to_intel_crtc(encoder->base.crtc);
+ adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
/*
* Atleast one port is active as encoder->get_config called only if
@@ -808,26 +833,118 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
adjusted_mode->crtc_vtotal =
I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
+ hactive = adjusted_mode->crtc_hdisplay;
+ hfp = I915_READ(MIPI_HFP_COUNT(port));
+
/*
- * TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and
- * calculate hsync_start, hsync_end, htotal and hblank_end
+ * Meaningful for video mode non-burst sync pulse mode only,
+ * can be zero for non-burst sync events and burst modes
*/
+ hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port));
+ hbp = I915_READ(MIPI_HBP_COUNT(port));
+
+ /* harizontal values are in terms of high speed byte clock */
+ hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ if (intel_dsi->dual_link) {
+ hfp *= 2;
+ hsync *= 2;
+ hbp *= 2;
+ }
/* vertical values are in terms of lines */
vfp = I915_READ(MIPI_VFP_COUNT(port));
vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
vbp = I915_READ(MIPI_VBP_COUNT(port));
+ adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
+ adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start;
adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
- adjusted_mode->crtc_vsync_start =
- vfp + adjusted_mode->crtc_vdisplay;
- adjusted_mode->crtc_vsync_end =
- vsync + adjusted_mode->crtc_vsync_start;
+ adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start;
adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
-}
+ /*
+ * In BXT DSI there is no regs programmed with few horizontal timings
+ * in Pixels but txbyteclkhs.. So retrieval process adds some
+ * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs.
+ * Actually here for the given adjusted_mode, we are calculating the
+ * value programmed to the port and then back to the horizontal timing
+ * param in pixels. This is the expected value, including roundup errors
+ * And if that is same as retrieved value from port, then
+ * (HW state) adjusted_mode's horizontal timings are corrected to
+ * match with SW state to nullify the errors.
+ */
+ /* Calculating the value programmed to the Port register */
+ hfp_sw = adjusted_mode_sw->crtc_hsync_start -
+ adjusted_mode_sw->crtc_hdisplay;
+ hsync_sw = adjusted_mode_sw->crtc_hsync_end -
+ adjusted_mode_sw->crtc_hsync_start;
+ hbp_sw = adjusted_mode_sw->crtc_htotal -
+ adjusted_mode_sw->crtc_hsync_end;
+
+ if (intel_dsi->dual_link) {
+ hfp_sw /= 2;
+ hsync_sw /= 2;
+ hbp_sw /= 2;
+ }
+
+ hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ /* Reverse calculating the adjusted mode parameters from port reg vals*/
+ hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ if (intel_dsi->dual_link) {
+ hfp_sw *= 2;
+ hsync_sw *= 2;
+ hbp_sw *= 2;
+ }
+
+ crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw +
+ hsync_sw + hbp_sw;
+ crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay;
+ crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw;
+ crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay;
+ crtc_hblank_end_sw = crtc_htotal_sw;
+
+ if (adjusted_mode->crtc_htotal == crtc_htotal_sw)
+ adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal;
+
+ if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw)
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode_sw->crtc_hsync_start;
+
+ if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw)
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode_sw->crtc_hsync_end;
+
+ if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw)
+ adjusted_mode->crtc_hblank_start =
+ adjusted_mode_sw->crtc_hblank_start;
+
+ if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw)
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode_sw->crtc_hblank_end;
+}
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -891,14 +1008,6 @@ static u16 txclkesc(u32 divider, unsigned int us)
}
}
-/* return pixels in terms of txbyteclkhs */
-static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
- u16 burst_mode_ratio)
-{
- return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
- 8 * 100), lane_count);
-}
-
static void set_dsi_timings(struct drm_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2cdab73046f8..2c3bd9c2573e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -836,6 +836,22 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
+ struct i2c_adapter *adapter =
+ intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+
+ if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+ return;
+
+ DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
+ enable ? "Enabling" : "Disabling");
+
+ drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
+ adapter, enable);
+}
+
static void intel_hdmi_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
@@ -845,6 +861,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 hdmi_val;
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+
hdmi_val = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
@@ -953,6 +971,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
dotclock /= pipe_config->pixel_multiplier;
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+
+ pipe_config->lane_count = 4;
}
static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
@@ -1140,6 +1160,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
}
intel_hdmi->set_infoframes(&encoder->base, false, NULL);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
static void g4x_disable_hdmi(struct intel_encoder *encoder)
@@ -1165,27 +1187,42 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder)
intel_disable_hdmi(encoder);
}
-static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
-
- if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
+ if (IS_G4X(dev_priv))
return 165000;
- else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
+ else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
return 300000;
else
return 225000;
}
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
+ bool respect_downstream_limits)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
+
+ if (respect_downstream_limits) {
+ if (hdmi->dp_dual_mode.max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock,
+ hdmi->dp_dual_mode.max_tmds_clock);
+ if (!hdmi->has_hdmi_sink)
+ max_tmds_clock = min(max_tmds_clock, 165000);
+ }
+
+ return max_tmds_clock;
+}
+
static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
- int clock, bool respect_dvi_limit)
+ int clock, bool respect_downstream_limits)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
if (clock < 25000)
return MODE_CLOCK_LOW;
- if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
+ if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits))
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
@@ -1309,7 +1346,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
+ hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK &&
hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1337,6 +1374,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
+ pipe_config->lane_count = 4;
+
return true;
}
@@ -1349,10 +1388,57 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
+ intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
+ intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
+
kfree(to_intel_connector(connector)->detect_edid);
to_intel_connector(connector)->detect_edid = NULL;
}
+static void
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ enum port port = hdmi_to_dig_port(hdmi)->port;
+ struct i2c_adapter *adapter =
+ intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+ enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
+
+ /*
+ * Type 1 DVI adaptors are not required to implement any
+ * registers, so we can't always detect their presence.
+ * Ideally we should be able to check the state of the
+ * CONFIG1 pin, but no such luck on our hardware.
+ *
+ * The only method left to us is to check the VBT to see
+ * if the port is a dual mode capable DP port. But let's
+ * only do that when we sucesfully read the EDID, to avoid
+ * confusing log messages about DP dual mode adaptors when
+ * there's nothing connected to the port.
+ */
+ if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
+ if (has_edid &&
+ intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
+ DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
+ type = DRM_DP_DUAL_MODE_TYPE1_DVI;
+ } else {
+ type = DRM_DP_DUAL_MODE_NONE;
+ }
+ }
+
+ if (type == DRM_DP_DUAL_MODE_NONE)
+ return;
+
+ hdmi->dp_dual_mode.type = type;
+ hdmi->dp_dual_mode.max_tmds_clock =
+ drm_dp_dual_mode_max_tmds_clock(type, adapter);
+
+ DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
+ drm_dp_get_dual_mode_type_name(type),
+ hdmi->dp_dual_mode.max_tmds_clock);
+}
+
static bool
intel_hdmi_set_edid(struct drm_connector *connector, bool force)
{
@@ -1368,6 +1454,8 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
+ intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6179b591ee84..42eac37de047 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -721,48 +721,6 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
return ret;
}
-static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
- int bytes)
-{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_gem_request *target;
- unsigned space;
- int ret;
-
- if (intel_ring_space(ringbuf) >= bytes)
- return 0;
-
- /* The whole point of reserving space is to not wait! */
- WARN_ON(ringbuf->reserved_in_use);
-
- list_for_each_entry(target, &engine->request_list, list) {
- /*
- * The request queue is per-engine, so can contain requests
- * from multiple ringbuffers. Here, we must ignore any that
- * aren't from the ringbuffer we're considering.
- */
- if (target->ringbuf != ringbuf)
- continue;
-
- /* Would completion of this request free enough space? */
- space = __intel_ring_space(target->postfix, ringbuf->tail,
- ringbuf->size);
- if (space >= bytes)
- break;
- }
-
- if (WARN_ON(&target->list == &engine->request_list))
- return -ENOSPC;
-
- ret = i915_wait_request(target);
- if (ret)
- return ret;
-
- ringbuf->space = space;
- return 0;
-}
-
/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @request: Request to advance the logical ringbuffer of.
@@ -814,92 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
return 0;
}
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
- uint32_t __iomem *virt;
- int rem = ringbuf->size - ringbuf->tail;
-
- virt = ringbuf->virtual_start + ringbuf->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ringbuf->tail = 0;
- intel_ring_update_space(ringbuf);
-}
-
-static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
-{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- int remain_usable = ringbuf->effective_size - ringbuf->tail;
- int remain_actual = ringbuf->size - ringbuf->tail;
- int ret, total_bytes, wait_bytes = 0;
- bool need_wrap = false;
-
- if (ringbuf->reserved_in_use)
- total_bytes = bytes;
- else
- total_bytes = bytes + ringbuf->reserved_size;
-
- if (unlikely(bytes > remain_usable)) {
- /*
- * Not enough space for the basic request. So need to flush
- * out the remainder and then wait for base + reserved.
- */
- wait_bytes = remain_actual + total_bytes;
- need_wrap = true;
- } else {
- if (unlikely(total_bytes > remain_usable)) {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
- */
- wait_bytes = remain_actual + ringbuf->reserved_size;
- } else if (total_bytes > ringbuf->space) {
- /* No wrapping required, just waiting. */
- wait_bytes = total_bytes;
- }
- }
-
- if (wait_bytes) {
- ret = logical_ring_wait_for_space(req, wait_bytes);
- if (unlikely(ret))
- return ret;
-
- if (need_wrap)
- __wrap_ring_buffer(ringbuf);
- }
-
- return 0;
-}
-
-/**
- * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
- *
- * @req: The request to start some new work for
- * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
- *
- * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
- * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
- * and also preallocates a request (every workload submission is still mediated through
- * requests, same as it did with legacy ringbuffer submission).
- *
- * Return: non-zero if the ringbuffer is not ready to be written to.
- */
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
-{
- int ret;
-
- ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
-
- req->ringbuf->space -= num_dwords * sizeof(uint32_t);
- return 0;
-}
-
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
{
/*
@@ -912,7 +784,7 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
*/
intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
- return intel_logical_ring_begin(request, 0);
+ return intel_ring_begin(request, 0);
}
/**
@@ -982,7 +854,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
if (engine == &dev_priv->engine[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_logical_ring_begin(params->request, 4);
+ ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
@@ -1178,7 +1050,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
if (ret)
return ret;
- ret = intel_logical_ring_begin(req, w->count * 2 + 2);
+ ret = intel_ring_begin(req, w->count * 2 + 2);
if (ret)
return ret;
@@ -1669,7 +1541,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
int i, ret;
- ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+ ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
if (ret)
return ret;
@@ -1716,7 +1588,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
}
- ret = intel_logical_ring_begin(req, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -1778,7 +1650,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
uint32_t cmd;
int ret;
- ret = intel_logical_ring_begin(request, 4);
+ ret = intel_ring_begin(request, 4);
if (ret)
return ret;
@@ -1846,7 +1718,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
vf_flush_wa = true;
}
- ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
+ ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
if (ret)
return ret;
@@ -1920,7 +1792,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+ ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
if (ret)
return ret;
@@ -1944,7 +1816,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
+ ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 461f1ef9b5c1..60a7385bc531 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -63,7 +63,6 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int intel_logical_rings_init(struct drm_device *dev);
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
/**
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 23b8545ad6b0..6ba4bf7f2a89 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -239,11 +239,9 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
- if (ret) {
- DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ if (ret)
return ret;
- }
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
@@ -305,11 +303,9 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
- if (ret) {
- DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ if (ret)
return ret;
- }
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4b60005cda37..a7ef45da0a9e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3904,6 +3904,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+ memset(active, 0, sizeof(*active));
+
active->pipe_enabled = intel_crtc->active;
if (active->pipe_enabled) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c3abae4bc596..a788d1e9589b 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -280,7 +280,10 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
* with the 5 or 6 idle patterns.
*/
uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- uint32_t val = 0x0;
+ uint32_t val = EDP_PSR_ENABLE;
+
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
if (IS_HASWELL(dev))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
@@ -288,14 +291,50 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
if (dev_priv->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
- I915_WRITE(EDP_PSR_CTL, val |
- max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
- idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
- EDP_PSR_ENABLE);
+ if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
+ val |= EDP_PSR_TP1_TIME_2500us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
+ val |= EDP_PSR_TP1_TIME_500us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
+ val |= EDP_PSR_TP1_TIME_100us;
+ else
+ val |= EDP_PSR_TP1_TIME_0us;
+
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
+ val |= EDP_PSR_TP2_TP3_TIME_500us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+ val |= EDP_PSR_TP2_TP3_TIME_100us;
+ else
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+
+ if (intel_dp_source_supports_hbr2(intel_dp) &&
+ drm_dp_tps3_supported(intel_dp->dpcd))
+ val |= EDP_PSR_TP1_TP3_SEL;
+ else
+ val |= EDP_PSR_TP1_TP2_SEL;
+
+ I915_WRITE(EDP_PSR_CTL, val);
+
+ if (!dev_priv->psr.psr2_support)
+ return;
+
+ /* FIXME: selective update is probably totally broken because it doesn't
+ * mesh at all with our frontbuffer tracking. And the hw alone isn't
+ * good enough. */
+ val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
+ val |= EDP_PSR2_TP2_TIME_2500;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
+ val |= EDP_PSR2_TP2_TIME_500;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+ val |= EDP_PSR2_TP2_TIME_100;
+ else
+ val |= EDP_PSR2_TP2_TIME_50;
- if (dev_priv->psr.psr2_support)
- I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
- EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
+ I915_WRITE(EDP_PSR2_CTL, val);
}
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 245386e20c52..04402bb9d26b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,12 +53,6 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
ringbuf->tail, ringbuf->size);
}
-int intel_ring_space(struct intel_ringbuffer *ringbuf)
-{
- intel_ring_update_space(ringbuf);
- return ringbuf->space;
-}
-
bool intel_engine_stopped(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->dev->dev_private;
@@ -1309,7 +1303,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->id));
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(signaller, 0);
}
@@ -1349,7 +1343,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->id));
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(signaller, 0);
}
@@ -1573,6 +1567,8 @@ pc_render_add_request(struct drm_i915_gem_request *req)
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page.
@@ -1584,9 +1580,13 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
* the write time to land, but that would incur a delay after every
* batch i.e. much more frequent than a delay when waiting for the
* interrupt (with the same net latency).
+ *
+ * Also note that to prevent whole machine hangs on gen7, we have to
+ * take the spinlock to guard against concurrent cacheline access.
*/
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ spin_lock_irq(&dev_priv->uncore.lock);
POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
static u32
@@ -2312,51 +2312,6 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
engine->dev = NULL;
}
-static int ring_wait_for_space(struct intel_engine_cs *engine, int n)
-{
- struct intel_ringbuffer *ringbuf = engine->buffer;
- struct drm_i915_gem_request *request;
- unsigned space;
- int ret;
-
- if (intel_ring_space(ringbuf) >= n)
- return 0;
-
- /* The whole point of reserving space is to not wait! */
- WARN_ON(ringbuf->reserved_in_use);
-
- list_for_each_entry(request, &engine->request_list, list) {
- space = __intel_ring_space(request->postfix, ringbuf->tail,
- ringbuf->size);
- if (space >= n)
- break;
- }
-
- if (WARN_ON(&request->list == &engine->request_list))
- return -ENOSPC;
-
- ret = i915_wait_request(request);
- if (ret)
- return ret;
-
- ringbuf->space = space;
- return 0;
-}
-
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
- uint32_t __iomem *virt;
- int rem = ringbuf->size - ringbuf->tail;
-
- virt = ringbuf->virtual_start + ringbuf->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ringbuf->tail = 0;
- intel_ring_update_space(ringbuf);
-}
-
int intel_engine_idle(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req;
@@ -2398,63 +2353,82 @@ int intel_ring_reserve_space(struct drm_i915_gem_request *request)
void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
{
- WARN_ON(ringbuf->reserved_size);
- WARN_ON(ringbuf->reserved_in_use);
-
+ GEM_BUG_ON(ringbuf->reserved_size);
ringbuf->reserved_size = size;
}
void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(ringbuf->reserved_in_use);
-
+ GEM_BUG_ON(!ringbuf->reserved_size);
ringbuf->reserved_size = 0;
- ringbuf->reserved_in_use = false;
}
void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(ringbuf->reserved_in_use);
-
- ringbuf->reserved_in_use = true;
- ringbuf->reserved_tail = ringbuf->tail;
+ GEM_BUG_ON(!ringbuf->reserved_size);
+ ringbuf->reserved_size = 0;
}
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(!ringbuf->reserved_in_use);
- if (ringbuf->tail > ringbuf->reserved_tail) {
- WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
- "request reserved size too small: %d vs %d!\n",
- ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
- } else {
+ GEM_BUG_ON(ringbuf->reserved_size);
+}
+
+static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
+{
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_i915_gem_request *target;
+
+ intel_ring_update_space(ringbuf);
+ if (ringbuf->space >= bytes)
+ return 0;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the request,
+ * as that cannot be allowed to fail. During request finalisation,
+ * reserved_space is set to 0 to stop the overallocation and the
+ * assumption is that then we never need to wait (which has the
+ * risk of failing with EINTR).
+ *
+ * See also i915_gem_request_alloc() and i915_add_request().
+ */
+ GEM_BUG_ON(!ringbuf->reserved_size);
+
+ list_for_each_entry(target, &engine->request_list, list) {
+ unsigned space;
+
/*
- * The ring was wrapped while the reserved space was in use.
- * That means that some unknown amount of the ring tail was
- * no-op filled and skipped. Thus simply adding the ring size
- * to the tail and doing the above space check will not work.
- * Rather than attempt to track how much tail was skipped,
- * it is much simpler to say that also skipping the sanity
- * check every once in a while is not a big issue.
+ * The request queue is per-engine, so can contain requests
+ * from multiple ringbuffers. Here, we must ignore any that
+ * aren't from the ringbuffer we're considering.
*/
+ if (target->ringbuf != ringbuf)
+ continue;
+
+ /* Would completion of this request free enough space? */
+ space = __intel_ring_space(target->postfix, ringbuf->tail,
+ ringbuf->size);
+ if (space >= bytes)
+ break;
}
- ringbuf->reserved_size = 0;
- ringbuf->reserved_in_use = false;
+ if (WARN_ON(&target->list == &engine->request_list))
+ return -ENOSPC;
+
+ return i915_wait_request(target);
}
-static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
+int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
- struct intel_ringbuffer *ringbuf = engine->buffer;
- int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
int remain_actual = ringbuf->size - ringbuf->tail;
- int ret, total_bytes, wait_bytes = 0;
+ int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ int bytes = num_dwords * sizeof(u32);
+ int total_bytes, wait_bytes;
bool need_wrap = false;
- if (ringbuf->reserved_in_use)
- total_bytes = bytes;
- else
- total_bytes = bytes + ringbuf->reserved_size;
+ total_bytes = bytes + ringbuf->reserved_size;
if (unlikely(bytes > remain_usable)) {
/*
@@ -2463,44 +2437,42 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
*/
wait_bytes = remain_actual + total_bytes;
need_wrap = true;
+ } else if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
} else {
- if (unlikely(total_bytes > remain_usable)) {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
- */
- wait_bytes = remain_actual + ringbuf->reserved_size;
- } else if (total_bytes > ringbuf->space) {
- /* No wrapping required, just waiting. */
- wait_bytes = total_bytes;
- }
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
}
- if (wait_bytes) {
- ret = ring_wait_for_space(engine, wait_bytes);
+ if (wait_bytes > ringbuf->space) {
+ int ret = wait_for_space(req, wait_bytes);
if (unlikely(ret))
return ret;
- if (need_wrap)
- __wrap_ring_buffer(ringbuf);
+ intel_ring_update_space(ringbuf);
+ if (unlikely(ringbuf->space < wait_bytes))
+ return -EAGAIN;
}
- return 0;
-}
+ if (unlikely(need_wrap)) {
+ GEM_BUG_ON(remain_actual > ringbuf->space);
+ GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
-int intel_ring_begin(struct drm_i915_gem_request *req,
- int num_dwords)
-{
- struct intel_engine_cs *engine = req->engine;
- int ret;
-
- ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
+ /* Fill the tail with MI_NOOP */
+ memset(ringbuf->virtual_start + ringbuf->tail,
+ 0, remain_actual);
+ ringbuf->tail = 0;
+ ringbuf->space -= remain_actual;
+ }
- engine->buffer->space -= num_dwords * sizeof(uint32_t);
+ ringbuf->space -= bytes;
+ GEM_BUG_ON(ringbuf->space < 0);
return 0;
}
@@ -2772,6 +2744,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
engine->name = "render ring";
engine->id = RCS;
engine->exec_id = I915_EXEC_RENDER;
+ engine->hw_id = 0;
engine->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 8) {
@@ -2923,6 +2896,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
engine->name = "bsd ring";
engine->id = VCS;
engine->exec_id = I915_EXEC_BSD;
+ engine->hw_id = 1;
engine->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) {
@@ -3001,6 +2975,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
engine->name = "bsd2 ring";
engine->id = VCS2;
engine->exec_id = I915_EXEC_BSD;
+ engine->hw_id = 4;
engine->write_tail = ring_write_tail;
engine->mmio_base = GEN8_BSD2_RING_BASE;
@@ -3033,6 +3008,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
engine->name = "blitter ring";
engine->id = BCS;
engine->exec_id = I915_EXEC_BLT;
+ engine->hw_id = 2;
engine->mmio_base = BLT_RING_BASE;
engine->write_tail = ring_write_tail;
@@ -3092,6 +3068,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
engine->name = "video enhancement ring";
engine->id = VECS;
engine->exec_id = I915_EXEC_VEBOX;
+ engine->hw_id = 3;
engine->mmio_base = VEBOX_RING_BASE;
engine->write_tail = ring_write_tail;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2ade194bbea9..ff126485d398 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -108,8 +108,6 @@ struct intel_ringbuffer {
int size;
int effective_size;
int reserved_size;
- int reserved_tail;
- bool reserved_in_use;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -156,7 +154,8 @@ struct intel_engine_cs {
#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
- unsigned int guc_id;
+ unsigned int hw_id;
+ unsigned int guc_id; /* XXX same as hw_id? */
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
@@ -459,7 +458,6 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_engine_stopped(struct intel_engine_cs *engine);
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 9ff1e960d617..c15051de8023 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -740,6 +740,7 @@ struct bdb_psr {
#define DEVICE_TYPE_INT_TV 0x1009
#define DEVICE_TYPE_HDMI 0x60D2
#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
#define DEVICE_TYPE_eDP 0x78C6
#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
@@ -774,6 +775,17 @@ struct bdb_psr {
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
DEVICE_TYPE_ANALOG_OUTPUT)
+#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_DIGITAL_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
/* define the DVO port for HDMI output type */
#define DVO_B 1
#define DVO_C 2
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1080019e7b17..1f14b602882b 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -25,6 +25,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_of.h>
+#include <video/imx-ipu-v3.h>
#include "imx-drm.h"
@@ -437,6 +438,13 @@ static int compare_of(struct device *dev, void *data)
{
struct device_node *np = data;
+ /* Special case for DI, dev->of_node may not be set yet */
+ if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) {
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+
+ return pdata->of_node == np;
+ }
+
/* Special case for LDB, one device for two channels */
if (of_node_cmp(np->name, "lvds-channel") == 0) {
np = of_get_parent(np);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index dee8e8b3523b..b2c30b8d9816 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -473,7 +473,7 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
&ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
- ipu_crtc->dev->of_node);
+ pdata->of_node);
if (ret) {
dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
goto err_put_resources;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index d0240743a17c..a7e978677937 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2164,7 +2164,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
if (pi->caps_stable_p_state) {
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
- for (i = table->count - 1; i >= 0; i++) {
+ for (i = table->count - 1; i >= 0; i--) {
if (stable_p_state_sclk >= table->entries[i].clk) {
stable_p_state_sclk = table->entries[i].clk;
break;
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 32c7986b63ab..6bf4ce466d20 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -437,7 +437,7 @@ static int vtg_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
} else {
vtg->irq = platform_get_irq(pdev, 0);
- if (IS_ERR_VALUE(vtg->irq)) {
+ if (vtg->irq < 0) {
DRM_ERROR("Failed to get VTG interrupt\n");
return vtg->irq;
}
@@ -447,7 +447,7 @@ static int vtg_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
vtg_irq_thread, IRQF_ONESHOT,
dev_name(dev), vtg);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
DRM_ERROR("Failed to register VTG interrupt\n");
return ret;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 7716f42f8aab..6b8c5b3bf588 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -342,7 +342,7 @@ static int tfp410_probe(struct platform_device *pdev)
tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
0, NULL);
- if (IS_ERR_VALUE(tfp410_mod->gpio)) {
+ if (tfp410_mod->gpio < 0) {
dev_warn(&pdev->dev, "No power down GPIO\n");
} else {
ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 498b37e39058..e1e31e9e67cd 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -85,7 +85,7 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
err = devm_request_irq(host->dev, host->intr_syncpt_irq,
syncpt_thresh_isr, IRQF_SHARED,
"host1x_syncpt", host);
- if (IS_ERR_VALUE(err)) {
+ if (err < 0) {
WARN_ON(1);
return err;
}
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index abb98c77bad2..99dcacf05b99 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -997,7 +997,7 @@ struct ipu_platform_reg {
};
/* These must be in the order of the corresponding device tree port nodes */
-static const struct ipu_platform_reg client_reg[] = {
+static struct ipu_platform_reg client_reg[] = {
{
.pdata = {
.csi = 0,
@@ -1048,7 +1048,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
mutex_unlock(&ipu_client_id_mutex);
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
- const struct ipu_platform_reg *reg = &client_reg[i];
+ struct ipu_platform_reg *reg = &client_reg[i];
struct platform_device *pdev;
struct device_node *of_node;
@@ -1070,6 +1070,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
pdev->dev.parent = dev;
+ reg->pdata.of_node = of_node;
ret = platform_device_add_data(pdev, &reg->pdata,
sizeof(reg->pdata));
if (!ret)
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 952fe692d764..24e395c5907d 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -58,7 +58,7 @@ static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 };
*/
static int apd = -1;
module_param(apd, bint, 0);
-MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
+MODULE_PARM_DESC(apd, "Set to zero to disable anti-parallel diode mode");
struct temperature {
s8 degrees;
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 0addc84ba948..69166ab3151d 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -77,7 +77,6 @@ static const u8 LM75_REG_TEMP[3] = {
struct lm75_data {
struct i2c_client *client;
struct device *hwmon_dev;
- struct thermal_zone_device *tz;
struct mutex update_lock;
u8 orig_conf;
u8 resolution; /* In bits, between 9 and 12 */
@@ -306,11 +305,9 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (IS_ERR(data->hwmon_dev))
return PTR_ERR(data->hwmon_dev);
- data->tz = thermal_zone_of_sensor_register(data->hwmon_dev, 0,
- data->hwmon_dev,
- &lm75_of_thermal_ops);
- if (IS_ERR(data->tz))
- data->tz = NULL;
+ devm_thermal_zone_of_sensor_register(data->hwmon_dev, 0,
+ data->hwmon_dev,
+ &lm75_of_thermal_ops);
dev_info(dev, "%s: sensor '%s'\n",
dev_name(data->hwmon_dev), client->name);
@@ -322,7 +319,6 @@ static int lm75_remove(struct i2c_client *client)
{
struct lm75_data *data = i2c_get_clientdata(client);
- thermal_zone_of_sensor_unregister(data->hwmon_dev, data->tz);
hwmon_device_unregister(data->hwmon_dev);
lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
return 0;
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index faa6e8dfbaaf..8ef7b713cb1a 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -259,7 +259,6 @@ struct ntc_data {
struct device *dev;
int n_comp;
char name[PLATFORM_NAME_SIZE];
- struct thermal_zone_device *tz;
};
#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO)
@@ -579,6 +578,7 @@ static const struct thermal_zone_of_device_ops ntc_of_thermal_ops = {
static int ntc_thermistor_probe(struct platform_device *pdev)
{
+ struct thermal_zone_device *tz;
const struct of_device_id *of_id =
of_match_device(of_match_ptr(ntc_match), &pdev->dev);
const struct platform_device_id *pdev_id;
@@ -677,12 +677,10 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
pdev_id->name);
- data->tz = thermal_zone_of_sensor_register(data->dev, 0, data->dev,
- &ntc_of_thermal_ops);
- if (IS_ERR(data->tz)) {
+ tz = devm_thermal_zone_of_sensor_register(data->dev, 0, data->dev,
+ &ntc_of_thermal_ops);
+ if (IS_ERR(tz))
dev_dbg(&pdev->dev, "Failed to register to thermal fw.\n");
- data->tz = NULL;
- }
return 0;
err_after_sysfs:
@@ -700,8 +698,6 @@ static int ntc_thermistor_remove(struct platform_device *pdev)
sysfs_remove_group(&data->dev->kobj, &ntc_attr_group);
ntc_iio_channel_release(pdata);
- thermal_zone_of_sensor_unregister(data->dev, data->tz);
-
return 0;
}
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 912b449c8303..25b44e68926d 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -31,10 +31,8 @@ struct sensor_data {
};
struct scpi_thermal_zone {
- struct list_head list;
int sensor_id;
struct scpi_sensors *scpi_sensors;
- struct thermal_zone_device *tzd;
};
struct scpi_sensors {
@@ -92,20 +90,6 @@ scpi_show_label(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s\n", sensor->info.name);
}
-static void
-unregister_thermal_zones(struct platform_device *pdev,
- struct scpi_sensors *scpi_sensors)
-{
- struct list_head *pos;
-
- list_for_each(pos, &scpi_sensors->thermal_zones) {
- struct scpi_thermal_zone *zone;
-
- zone = list_entry(pos, struct scpi_thermal_zone, list);
- thermal_zone_of_sensor_unregister(&pdev->dev, zone->tzd);
- }
-}
-
static struct thermal_zone_of_device_ops scpi_sensor_ops = {
.get_temp = scpi_read_temp,
};
@@ -118,7 +102,7 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
struct scpi_ops *scpi_ops;
struct device *hwdev, *dev = &pdev->dev;
struct scpi_sensors *scpi_sensors;
- int ret, idx;
+ int idx, ret;
scpi_ops = get_scpi_ops();
if (!scpi_ops)
@@ -232,48 +216,35 @@ static int scpi_hwmon_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&scpi_sensors->thermal_zones);
for (i = 0; i < nr_sensors; i++) {
struct sensor_data *sensor = &scpi_sensors->data[i];
+ struct thermal_zone_device *z;
struct scpi_thermal_zone *zone;
if (sensor->info.class != TEMPERATURE)
continue;
zone = devm_kzalloc(dev, sizeof(*zone), GFP_KERNEL);
- if (!zone) {
- ret = -ENOMEM;
- goto unregister_tzd;
- }
+ if (!zone)
+ return -ENOMEM;
zone->sensor_id = i;
zone->scpi_sensors = scpi_sensors;
- zone->tzd = thermal_zone_of_sensor_register(dev,
- sensor->info.sensor_id, zone, &scpi_sensor_ops);
+ z = devm_thermal_zone_of_sensor_register(dev,
+ sensor->info.sensor_id,
+ zone,
+ &scpi_sensor_ops);
/*
* The call to thermal_zone_of_sensor_register returns
* an error for sensors that are not associated with
* any thermal zones or if the thermal subsystem is
* not configured.
*/
- if (IS_ERR(zone->tzd)) {
+ if (IS_ERR(z)) {
devm_kfree(dev, zone);
continue;
}
- list_add(&zone->list, &scpi_sensors->thermal_zones);
}
return 0;
-
-unregister_tzd:
- unregister_thermal_zones(pdev, scpi_sensors);
- return ret;
-}
-
-static int scpi_hwmon_remove(struct platform_device *pdev)
-{
- struct scpi_sensors *scpi_sensors = platform_get_drvdata(pdev);
-
- unregister_thermal_zones(pdev, scpi_sensors);
-
- return 0;
}
static const struct of_device_id scpi_of_match[] = {
@@ -288,7 +259,6 @@ static struct platform_driver scpi_hwmon_platdrv = {
.of_match_table = scpi_of_match,
},
.probe = scpi_hwmon_probe,
- .remove = scpi_hwmon_remove,
};
module_platform_driver(scpi_hwmon_platdrv);
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 5289aa0980a8..f1e96fd7f445 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -53,7 +53,6 @@
struct tmp102 {
struct i2c_client *client;
struct device *hwmon_dev;
- struct thermal_zone_device *tz;
struct mutex lock;
u16 config_orig;
unsigned long last_update;
@@ -232,10 +231,8 @@ static int tmp102_probe(struct i2c_client *client,
goto fail_restore_config;
}
tmp102->hwmon_dev = hwmon_dev;
- tmp102->tz = thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
- &tmp102_of_thermal_ops);
- if (IS_ERR(tmp102->tz))
- tmp102->tz = NULL;
+ devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
+ &tmp102_of_thermal_ops);
dev_info(dev, "initialized\n");
@@ -251,7 +248,6 @@ static int tmp102_remove(struct i2c_client *client)
{
struct tmp102 *tmp102 = i2c_get_clientdata(client);
- thermal_zone_of_sensor_unregister(tmp102->hwmon_dev, tmp102->tz);
hwmon_device_unregister(tmp102->hwmon_dev);
/* Stop monitoring if device was stopped originally */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 2dd40ddf04de..f167021b8c21 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -965,7 +965,7 @@ config I2C_XILINX
config I2C_XLR
tristate "Netlogic XLR and Sigma Designs I2C support"
- depends on CPU_XLR || ARCH_TANGOX
+ depends on CPU_XLR || ARCH_TANGO
help
This driver enables support for the on-chip I2C interface of
the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs.
@@ -985,6 +985,7 @@ config I2C_XLP9XX
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
+ depends on HAS_DMA
depends on ARCH_RENESAS || COMPILE_TEST
select I2C_SLAVE
help
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 921d32bfcda8..f23372669f77 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -1013,7 +1013,7 @@ static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
error:
if (ret != -EPROBE_DEFER)
- dev_info(dev->dev, "can't use DMA, error %d\n", ret);
+ dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
if (dma->chan_rx)
dma_release_channel(dma->chan_rx);
if (dma->chan_tx)
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 9aca1b4e2d8d..52407f3c9e1c 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -623,7 +623,7 @@ static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev,
char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
int ret;
- chan = dma_request_slave_channel_reason(dev, chan_name);
+ chan = dma_request_chan(dev, chan_name);
if (IS_ERR(chan)) {
ret = PTR_ERR(chan);
dev_dbg(dev, "request_channel failed for %s (%d)\n",
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 0b1108d3c2f3..6ecfd76270f2 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -22,6 +22,7 @@
/* The I2C_RDWR ioctl code is written by Kolja Waschk <waschk@telos.de> */
+#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/i2c-dev.h>
@@ -47,9 +48,10 @@ struct i2c_dev {
struct list_head list;
struct i2c_adapter *adap;
struct device *dev;
+ struct cdev cdev;
};
-#define I2C_MINORS 256
+#define I2C_MINORS MINORMASK
static LIST_HEAD(i2c_dev_list);
static DEFINE_SPINLOCK(i2c_dev_list_lock);
@@ -89,7 +91,7 @@ static struct i2c_dev *get_free_i2c_dev(struct i2c_adapter *adap)
return i2c_dev;
}
-static void return_i2c_dev(struct i2c_dev *i2c_dev)
+static void put_i2c_dev(struct i2c_dev *i2c_dev)
{
spin_lock(&i2c_dev_list_lock);
list_del(&i2c_dev->list);
@@ -552,6 +554,12 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
if (IS_ERR(i2c_dev))
return PTR_ERR(i2c_dev);
+ cdev_init(&i2c_dev->cdev, &i2cdev_fops);
+ i2c_dev->cdev.owner = THIS_MODULE;
+ res = cdev_add(&i2c_dev->cdev, MKDEV(I2C_MAJOR, adap->nr), 1);
+ if (res)
+ goto error_cdev;
+
/* register this i2c device with the driver core */
i2c_dev->dev = device_create(i2c_dev_class, &adap->dev,
MKDEV(I2C_MAJOR, adap->nr), NULL,
@@ -565,7 +573,9 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
adap->name, adap->nr);
return 0;
error:
- return_i2c_dev(i2c_dev);
+ cdev_del(&i2c_dev->cdev);
+error_cdev:
+ put_i2c_dev(i2c_dev);
return res;
}
@@ -582,7 +592,8 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
if (!i2c_dev) /* attach_adapter must have failed */
return 0;
- return_i2c_dev(i2c_dev);
+ cdev_del(&i2c_dev->cdev);
+ put_i2c_dev(i2c_dev);
device_destroy(i2c_dev_class, MKDEV(I2C_MAJOR, adap->nr));
pr_debug("i2c-dev: adapter [%s] unregistered\n", adap->name);
@@ -620,7 +631,7 @@ static int __init i2c_dev_init(void)
printk(KERN_INFO "i2c /dev entries driver\n");
- res = register_chrdev(I2C_MAJOR, "i2c", &i2cdev_fops);
+ res = register_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS, "i2c");
if (res)
goto out;
@@ -644,7 +655,7 @@ static int __init i2c_dev_init(void)
out_unreg_class:
class_destroy(i2c_dev_class);
out_unreg_chrdev:
- unregister_chrdev(I2C_MAJOR, "i2c");
+ unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
out:
printk(KERN_ERR "%s: Driver Initialisation failed\n", __FILE__);
return res;
@@ -655,7 +666,7 @@ static void __exit i2c_dev_exit(void)
bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
i2c_for_each_dev(NULL, i2cdev_detach_adapter);
class_destroy(i2c_dev_class);
- unregister_chrdev(I2C_MAJOR, "i2c");
+ unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 6425c0e5d18a..2137adfbd8c3 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -85,4 +85,6 @@ source "drivers/infiniband/ulp/isert/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
+source "drivers/infiniband/hw/hfi1/Kconfig"
+
endif # INFINIBAND
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index 26987d9d7e1c..edaae9f9853c 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -1,8 +1,7 @@
infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_cm.o
user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o
-obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
- ib_cm.o iw_cm.o ib_addr.o \
+obj-$(CONFIG_INFINIBAND) += ib_core.o ib_cm.o iw_cm.o \
$(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
@@ -10,14 +9,11 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
- roce_gid_mgmt.o mr_pool.o
+ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \
+ multicast.o mad.o smi.o agent.o mad_rmpp.o
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
-ib_mad-y := mad.o smi.o agent.o mad_rmpp.o
-
-ib_sa-y := sa_query.o multicast.o
-
ib_cm-y := cm.o
iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o
@@ -28,8 +24,6 @@ rdma_cm-$(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) += cma_configfs.o
rdma_ucm-y := ucma.o
-ib_addr-y := addr.o
-
ib_umad-y := user_mad.o
ib_ucm-y := ucm.o
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 337353d86cfa..1374541a4528 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -46,10 +46,10 @@
#include <net/ip6_route.h>
#include <rdma/ib_addr.h>
#include <rdma/ib.h>
+#include <rdma/rdma_netlink.h>
+#include <net/netlink.h>
-MODULE_AUTHOR("Sean Hefty");
-MODULE_DESCRIPTION("IB Address Translation");
-MODULE_LICENSE("Dual BSD/GPL");
+#include "core_priv.h"
struct addr_req {
struct list_head list;
@@ -62,8 +62,11 @@ struct addr_req {
struct rdma_dev_addr *addr, void *context);
unsigned long timeout;
int status;
+ u32 seq;
};
+static atomic_t ib_nl_addr_request_seq = ATOMIC_INIT(0);
+
static void process_req(struct work_struct *work);
static DEFINE_MUTEX(lock);
@@ -71,6 +74,126 @@ static LIST_HEAD(req_list);
static DECLARE_DELAYED_WORK(work, process_req);
static struct workqueue_struct *addr_wq;
+static const struct nla_policy ib_nl_addr_policy[LS_NLA_TYPE_MAX] = {
+ [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
+ .len = sizeof(struct rdma_nla_ls_gid)},
+};
+
+static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
+{
+ struct nlattr *tb[LS_NLA_TYPE_MAX] = {};
+ int ret;
+
+ if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
+ return false;
+
+ ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+ nlmsg_len(nlh), ib_nl_addr_policy);
+ if (ret)
+ return false;
+
+ return true;
+}
+
+static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
+{
+ const struct nlattr *head, *curr;
+ union ib_gid gid;
+ struct addr_req *req;
+ int len, rem;
+ int found = 0;
+
+ head = (const struct nlattr *)nlmsg_data(nlh);
+ len = nlmsg_len(nlh);
+
+ nla_for_each_attr(curr, head, len, rem) {
+ if (curr->nla_type == LS_NLA_TYPE_DGID)
+ memcpy(&gid, nla_data(curr), nla_len(curr));
+ }
+
+ mutex_lock(&lock);
+ list_for_each_entry(req, &req_list, list) {
+ if (nlh->nlmsg_seq != req->seq)
+ continue;
+ /* We set the DGID part, the rest was set earlier */
+ rdma_addr_set_dgid(req->addr, &gid);
+ req->status = 0;
+ found = 1;
+ break;
+ }
+ mutex_unlock(&lock);
+
+ if (!found)
+ pr_info("Couldn't find request waiting for DGID: %pI6\n",
+ &gid);
+}
+
+int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
+
+ if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
+ !(NETLINK_CB(skb).sk) ||
+ !netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (ib_nl_is_good_ip_resp(nlh))
+ ib_nl_process_good_ip_rsep(nlh);
+
+ return skb->len;
+}
+
+static int ib_nl_ip_send_msg(struct rdma_dev_addr *dev_addr,
+ const void *daddr,
+ u32 seq, u16 family)
+{
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ struct rdma_ls_ip_resolve_header *header;
+ void *data;
+ size_t size;
+ int attrtype;
+ int len;
+
+ if (family == AF_INET) {
+ size = sizeof(struct in_addr);
+ attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV4;
+ } else {
+ size = sizeof(struct in6_addr);
+ attrtype = RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_IPV6;
+ }
+
+ len = nla_total_size(sizeof(size));
+ len += NLMSG_ALIGN(sizeof(*header));
+
+ skb = nlmsg_new(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ data = ibnl_put_msg(skb, &nlh, seq, 0, RDMA_NL_LS,
+ RDMA_NL_LS_OP_IP_RESOLVE, NLM_F_REQUEST);
+ if (!data) {
+ nlmsg_free(skb);
+ return -ENODATA;
+ }
+
+ /* Construct the family header first */
+ header = (struct rdma_ls_ip_resolve_header *)
+ skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
+ header->ifindex = dev_addr->bound_dev_if;
+ nla_put(skb, attrtype, size, daddr);
+
+ /* Repair the nlmsg header length */
+ nlmsg_end(skb, nlh);
+ ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, GFP_KERNEL);
+
+ /* Make the request retry, so when we get the response from userspace
+ * we will have something.
+ */
+ return -ENODATA;
+}
+
int rdma_addr_size(struct sockaddr *addr)
{
switch (addr->sa_family) {
@@ -199,6 +322,17 @@ static void queue_req(struct addr_req *req)
mutex_unlock(&lock);
}
+static int ib_nl_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
+ const void *daddr, u32 seq, u16 family)
+{
+ if (ibnl_chk_listeners(RDMA_NL_GROUP_LS))
+ return -EADDRNOTAVAIL;
+
+ /* We fill in what we can, the response will fill the rest */
+ rdma_copy_addr(dev_addr, dst->dev, NULL);
+ return ib_nl_ip_send_msg(dev_addr, daddr, seq, family);
+}
+
static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
const void *daddr)
{
@@ -223,6 +357,39 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
return ret;
}
+static bool has_gateway(struct dst_entry *dst, sa_family_t family)
+{
+ struct rtable *rt;
+ struct rt6_info *rt6;
+
+ if (family == AF_INET) {
+ rt = container_of(dst, struct rtable, dst);
+ return rt->rt_uses_gateway;
+ }
+
+ rt6 = container_of(dst, struct rt6_info, dst);
+ return rt6->rt6i_flags & RTF_GATEWAY;
+}
+
+static int fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
+ const struct sockaddr *dst_in, u32 seq)
+{
+ const struct sockaddr_in *dst_in4 =
+ (const struct sockaddr_in *)dst_in;
+ const struct sockaddr_in6 *dst_in6 =
+ (const struct sockaddr_in6 *)dst_in;
+ const void *daddr = (dst_in->sa_family == AF_INET) ?
+ (const void *)&dst_in4->sin_addr.s_addr :
+ (const void *)&dst_in6->sin6_addr;
+ sa_family_t family = dst_in->sa_family;
+
+ /* Gateway + ARPHRD_INFINIBAND -> IB router */
+ if (has_gateway(dst, family) && dst->dev->type == ARPHRD_INFINIBAND)
+ return ib_nl_fetch_ha(dst, dev_addr, daddr, seq, family);
+ else
+ return dst_fetch_ha(dst, dev_addr, daddr);
+}
+
static int addr4_resolve(struct sockaddr_in *src_in,
const struct sockaddr_in *dst_in,
struct rdma_dev_addr *addr,
@@ -246,10 +413,11 @@ static int addr4_resolve(struct sockaddr_in *src_in,
src_in->sin_family = AF_INET;
src_in->sin_addr.s_addr = fl4.saddr;
- /* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't
- * routable) and we could set the network type accordingly.
+ /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
+ * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
+ * type accordingly.
*/
- if (rt->rt_uses_gateway)
+ if (rt->rt_uses_gateway && rt->dst.dev->type != ARPHRD_INFINIBAND)
addr->network = RDMA_NETWORK_IPV4;
addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
@@ -291,10 +459,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
src_in->sin6_addr = fl6.saddr;
}
- /* If there's a gateway, we're definitely in RoCE v2 (as RoCE v1 isn't
- * routable) and we could set the network type accordingly.
+ /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
+ * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
+ * type accordingly.
*/
- if (rt->rt6i_flags & RTF_GATEWAY)
+ if (rt->rt6i_flags & RTF_GATEWAY &&
+ ip6_dst_idev(dst)->dev->type != ARPHRD_INFINIBAND)
addr->network = RDMA_NETWORK_IPV6;
addr->hoplimit = ip6_dst_hoplimit(dst);
@@ -317,7 +487,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
static int addr_resolve_neigh(struct dst_entry *dst,
const struct sockaddr *dst_in,
- struct rdma_dev_addr *addr)
+ struct rdma_dev_addr *addr,
+ u32 seq)
{
if (dst->dev->flags & IFF_LOOPBACK) {
int ret;
@@ -331,17 +502,8 @@ static int addr_resolve_neigh(struct dst_entry *dst,
}
/* If the device doesn't do ARP internally */
- if (!(dst->dev->flags & IFF_NOARP)) {
- const struct sockaddr_in *dst_in4 =
- (const struct sockaddr_in *)dst_in;
- const struct sockaddr_in6 *dst_in6 =
- (const struct sockaddr_in6 *)dst_in;
-
- return dst_fetch_ha(dst, addr,
- dst_in->sa_family == AF_INET ?
- (const void *)&dst_in4->sin_addr.s_addr :
- (const void *)&dst_in6->sin6_addr);
- }
+ if (!(dst->dev->flags & IFF_NOARP))
+ return fetch_ha(dst, addr, dst_in, seq);
return rdma_copy_addr(addr, dst->dev, NULL);
}
@@ -349,7 +511,8 @@ static int addr_resolve_neigh(struct dst_entry *dst,
static int addr_resolve(struct sockaddr *src_in,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
- bool resolve_neigh)
+ bool resolve_neigh,
+ u32 seq)
{
struct net_device *ndev;
struct dst_entry *dst;
@@ -366,7 +529,7 @@ static int addr_resolve(struct sockaddr *src_in,
return ret;
if (resolve_neigh)
- ret = addr_resolve_neigh(&rt->dst, dst_in, addr);
+ ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
ndev = rt->dst.dev;
dev_hold(ndev);
@@ -383,7 +546,7 @@ static int addr_resolve(struct sockaddr *src_in,
return ret;
if (resolve_neigh)
- ret = addr_resolve_neigh(dst, dst_in, addr);
+ ret = addr_resolve_neigh(dst, dst_in, addr, seq);
ndev = dst->dev;
dev_hold(ndev);
@@ -412,7 +575,7 @@ static void process_req(struct work_struct *work)
src_in = (struct sockaddr *) &req->src_addr;
dst_in = (struct sockaddr *) &req->dst_addr;
req->status = addr_resolve(src_in, dst_in, req->addr,
- true);
+ true, req->seq);
if (req->status && time_after_eq(jiffies, req->timeout))
req->status = -ETIMEDOUT;
else if (req->status == -ENODATA)
@@ -471,8 +634,9 @@ int rdma_resolve_ip(struct rdma_addr_client *client,
req->context = context;
req->client = client;
atomic_inc(&client->refcount);
+ req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
- req->status = addr_resolve(src_in, dst_in, addr, true);
+ req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
switch (req->status) {
case 0:
req->timeout = jiffies;
@@ -510,7 +674,7 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr,
src_in->sa_family = dst_addr->sa_family;
}
- return addr_resolve(src_in, dst_addr, addr, false);
+ return addr_resolve(src_in, dst_addr, addr, false, 0);
}
EXPORT_SYMBOL(rdma_resolve_ip_route);
@@ -634,7 +798,7 @@ static struct notifier_block nb = {
.notifier_call = netevent_callback
};
-static int __init addr_init(void)
+int addr_init(void)
{
addr_wq = create_singlethread_workqueue("ib_addr");
if (!addr_wq)
@@ -642,15 +806,13 @@ static int __init addr_init(void)
register_netevent_notifier(&nb);
rdma_addr_register_client(&self);
+
return 0;
}
-static void __exit addr_cleanup(void)
+void addr_cleanup(void)
{
rdma_addr_unregister_client(&self);
unregister_netevent_notifier(&nb);
destroy_workqueue(addr_wq);
}
-
-module_init(addr_init);
-module_exit(addr_cleanup);
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index eab32215756b..19d499dcab76 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -137,4 +137,20 @@ static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
return _upper == upper;
}
+int addr_init(void);
+void addr_cleanup(void);
+
+int ib_mad_init(void);
+void ib_mad_cleanup(void);
+
+int ib_sa_init(void);
+void ib_sa_cleanup(void);
+
+int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ struct netlink_callback *cb);
+int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
+ struct netlink_callback *cb);
+
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 10979844026a..5516fb070344 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -955,6 +955,29 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
}
EXPORT_SYMBOL(ib_get_net_dev_by_params);
+static struct ibnl_client_cbs ibnl_ls_cb_table[] = {
+ [RDMA_NL_LS_OP_RESOLVE] = {
+ .dump = ib_nl_handle_resolve_resp,
+ .module = THIS_MODULE },
+ [RDMA_NL_LS_OP_SET_TIMEOUT] = {
+ .dump = ib_nl_handle_set_timeout,
+ .module = THIS_MODULE },
+ [RDMA_NL_LS_OP_IP_RESOLVE] = {
+ .dump = ib_nl_handle_ip_res_resp,
+ .module = THIS_MODULE },
+};
+
+static int ib_add_ibnl_clients(void)
+{
+ return ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ibnl_ls_cb_table),
+ ibnl_ls_cb_table);
+}
+
+static void ib_remove_ibnl_clients(void)
+{
+ ibnl_remove_client(RDMA_NL_LS);
+}
+
static int __init ib_core_init(void)
{
int ret;
@@ -983,10 +1006,41 @@ static int __init ib_core_init(void)
goto err_sysfs;
}
+ ret = addr_init();
+ if (ret) {
+ pr_warn("Could't init IB address resolution\n");
+ goto err_ibnl;
+ }
+
+ ret = ib_mad_init();
+ if (ret) {
+ pr_warn("Couldn't init IB MAD\n");
+ goto err_addr;
+ }
+
+ ret = ib_sa_init();
+ if (ret) {
+ pr_warn("Couldn't init SA\n");
+ goto err_mad;
+ }
+
+ if (ib_add_ibnl_clients()) {
+ pr_warn("Couldn't register ibnl clients\n");
+ goto err_sa;
+ }
+
ib_cache_setup();
return 0;
+err_sa:
+ ib_sa_cleanup();
+err_mad:
+ ib_mad_cleanup();
+err_addr:
+ addr_cleanup();
+err_ibnl:
+ ibnl_cleanup();
err_sysfs:
class_unregister(&ib_class);
err_comp:
@@ -999,6 +1053,10 @@ err:
static void __exit ib_core_cleanup(void)
{
ib_cache_cleanup();
+ ib_remove_ibnl_clients();
+ ib_sa_cleanup();
+ ib_mad_cleanup();
+ addr_cleanup();
ibnl_cleanup();
class_unregister(&ib_class);
destroy_workqueue(ib_comp_wq);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 9fa5bf33f5a3..82fb511112da 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -47,11 +47,7 @@
#include "smi.h"
#include "opa_smi.h"
#include "agent.h"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("kernel IB MAD API");
-MODULE_AUTHOR("Hal Rosenstock");
-MODULE_AUTHOR("Sean Hefty");
+#include "core_priv.h"
static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
@@ -3316,7 +3312,7 @@ static struct ib_client mad_client = {
.remove = ib_mad_remove_device
};
-static int __init ib_mad_init_module(void)
+int ib_mad_init(void)
{
mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
@@ -3334,10 +3330,7 @@ static int __init ib_mad_init_module(void)
return 0;
}
-static void __exit ib_mad_cleanup_module(void)
+void ib_mad_cleanup(void)
{
ib_unregister_client(&mad_client);
}
-
-module_init(ib_mad_init_module);
-module_exit(ib_mad_cleanup_module);
diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c
index 250937cb9a1a..a83ec28a147b 100644
--- a/drivers/infiniband/core/multicast.c
+++ b/drivers/infiniband/core/multicast.c
@@ -93,6 +93,18 @@ enum {
struct mcast_member;
+/*
+* There are 4 types of join states:
+* FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember.
+*/
+enum {
+ FULLMEMBER_JOIN,
+ NONMEMBER_JOIN,
+ SENDONLY_NONMEBER_JOIN,
+ SENDONLY_FULLMEMBER_JOIN,
+ NUM_JOIN_MEMBERSHIP_TYPES,
+};
+
struct mcast_group {
struct ib_sa_mcmember_rec rec;
struct rb_node node;
@@ -102,7 +114,7 @@ struct mcast_group {
struct list_head pending_list;
struct list_head active_list;
struct mcast_member *last_join;
- int members[3];
+ int members[NUM_JOIN_MEMBERSHIP_TYPES];
atomic_t refcount;
enum mcast_group_state state;
struct ib_sa_query *query;
@@ -220,8 +232,9 @@ static void queue_join(struct mcast_member *member)
}
/*
- * A multicast group has three types of members: full member, non member, and
- * send only member. We need to keep track of the number of members of each
+ * A multicast group has four types of members: full member, non member,
+ * sendonly non member and sendonly full member.
+ * We need to keep track of the number of members of each
* type based on their join state. Adjust the number of members the belong to
* the specified join states.
*/
@@ -229,7 +242,7 @@ static void adjust_membership(struct mcast_group *group, u8 join_state, int inc)
{
int i;
- for (i = 0; i < 3; i++, join_state >>= 1)
+ for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1)
if (join_state & 0x1)
group->members[i] += inc;
}
@@ -245,7 +258,7 @@ static u8 get_leave_state(struct mcast_group *group)
u8 leave_state = 0;
int i;
- for (i = 0; i < 3; i++)
+ for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++)
if (!group->members[i])
leave_state |= (0x1 << i);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 3ebd108bcc5f..e95538650dc6 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -53,10 +53,6 @@
#include "sa.h"
#include "core_priv.h"
-MODULE_AUTHOR("Roland Dreier");
-MODULE_DESCRIPTION("InfiniBand subnet administration query support");
-MODULE_LICENSE("Dual BSD/GPL");
-
#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
@@ -119,6 +115,12 @@ struct ib_sa_guidinfo_query {
struct ib_sa_query sa_query;
};
+struct ib_sa_classport_info_query {
+ void (*callback)(int, struct ib_class_port_info *, void *);
+ void *context;
+ struct ib_sa_query sa_query;
+};
+
struct ib_sa_mcmember_query {
void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
void *context;
@@ -392,6 +394,82 @@ static const struct ib_field service_rec_table[] = {
.size_bits = 2*64 },
};
+#define CLASSPORTINFO_REC_FIELD(field) \
+ .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
+ .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
+ .field_name = "ib_class_port_info:" #field
+
+static const struct ib_field classport_info_rec_table[] = {
+ { CLASSPORTINFO_REC_FIELD(base_version),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 8 },
+ { CLASSPORTINFO_REC_FIELD(class_version),
+ .offset_words = 0,
+ .offset_bits = 8,
+ .size_bits = 8 },
+ { CLASSPORTINFO_REC_FIELD(capability_mask),
+ .offset_words = 0,
+ .offset_bits = 16,
+ .size_bits = 16 },
+ { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
+ .offset_words = 1,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(redirect_gid),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
+ .offset_words = 6,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(redirect_lid),
+ .offset_words = 7,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { CLASSPORTINFO_REC_FIELD(redirect_pkey),
+ .offset_words = 7,
+ .offset_bits = 16,
+ .size_bits = 16 },
+
+ { CLASSPORTINFO_REC_FIELD(redirect_qp),
+ .offset_words = 8,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(redirect_qkey),
+ .offset_words = 9,
+ .offset_bits = 0,
+ .size_bits = 32 },
+
+ { CLASSPORTINFO_REC_FIELD(trap_gid),
+ .offset_words = 10,
+ .offset_bits = 0,
+ .size_bits = 128 },
+ { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
+ .offset_words = 14,
+ .offset_bits = 0,
+ .size_bits = 32 },
+
+ { CLASSPORTINFO_REC_FIELD(trap_lid),
+ .offset_words = 15,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { CLASSPORTINFO_REC_FIELD(trap_pkey),
+ .offset_words = 15,
+ .offset_bits = 16,
+ .size_bits = 16 },
+
+ { CLASSPORTINFO_REC_FIELD(trap_hlqp),
+ .offset_words = 16,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { CLASSPORTINFO_REC_FIELD(trap_qkey),
+ .offset_words = 17,
+ .offset_bits = 0,
+ .size_bits = 32 },
+};
+
#define GUIDINFO_REC_FIELD(field) \
.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
.struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
@@ -705,8 +783,8 @@ static void ib_nl_request_timeout(struct work_struct *work)
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
}
-static int ib_nl_handle_set_timeout(struct sk_buff *skb,
- struct netlink_callback *cb)
+int ib_nl_handle_set_timeout(struct sk_buff *skb,
+ struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
int timeout, delta, abs_delta;
@@ -782,8 +860,8 @@ static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
return 1;
}
-static int ib_nl_handle_resolve_resp(struct sk_buff *skb,
- struct netlink_callback *cb)
+int ib_nl_handle_resolve_resp(struct sk_buff *skb,
+ struct netlink_callback *cb)
{
const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
unsigned long flags;
@@ -838,15 +916,6 @@ resp_out:
return skb->len;
}
-static struct ibnl_client_cbs ib_sa_cb_table[] = {
- [RDMA_NL_LS_OP_RESOLVE] = {
- .dump = ib_nl_handle_resolve_resp,
- .module = THIS_MODULE },
- [RDMA_NL_LS_OP_SET_TIMEOUT] = {
- .dump = ib_nl_handle_set_timeout,
- .module = THIS_MODULE },
-};
-
static void free_sm_ah(struct kref *kref)
{
struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
@@ -1645,6 +1714,97 @@ err1:
}
EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
+/* Support get SA ClassPortInfo */
+static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
+ int status,
+ struct ib_sa_mad *mad)
+{
+ struct ib_sa_classport_info_query *query =
+ container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
+
+ if (mad) {
+ struct ib_class_port_info rec;
+
+ ib_unpack(classport_info_rec_table,
+ ARRAY_SIZE(classport_info_rec_table),
+ mad->data, &rec);
+ query->callback(status, &rec, query->context);
+ } else {
+ query->callback(status, NULL, query->context);
+ }
+}
+
+static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query)
+{
+ kfree(container_of(sa_query, struct ib_sa_classport_info_query,
+ sa_query));
+}
+
+int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
+ struct ib_device *device, u8 port_num,
+ int timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_class_port_info *resp,
+ void *context),
+ void *context,
+ struct ib_sa_query **sa_query)
+{
+ struct ib_sa_classport_info_query *query;
+ struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
+ struct ib_sa_port *port;
+ struct ib_mad_agent *agent;
+ struct ib_sa_mad *mad;
+ int ret;
+
+ if (!sa_dev)
+ return -ENODEV;
+
+ port = &sa_dev->port[port_num - sa_dev->start_port];
+ agent = port->agent;
+
+ query = kzalloc(sizeof(*query), gfp_mask);
+ if (!query)
+ return -ENOMEM;
+
+ query->sa_query.port = port;
+ ret = alloc_mad(&query->sa_query, gfp_mask);
+ if (ret)
+ goto err1;
+
+ ib_sa_client_get(client);
+ query->sa_query.client = client;
+ query->callback = callback;
+ query->context = context;
+
+ mad = query->sa_query.mad_buf->mad;
+ init_mad(mad, agent);
+
+ query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL;
+
+ query->sa_query.release = ib_sa_portclass_info_rec_release;
+ /* support GET only */
+ mad->mad_hdr.method = IB_MGMT_METHOD_GET;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
+ mad->sa_hdr.comp_mask = 0;
+ *sa_query = &query->sa_query;
+
+ ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
+ if (ret < 0)
+ goto err2;
+
+ return ret;
+
+err2:
+ *sa_query = NULL;
+ ib_sa_client_put(query->sa_query.client);
+ free_mad(&query->sa_query);
+
+err1:
+ kfree(query);
+ return ret;
+}
+EXPORT_SYMBOL(ib_sa_classport_info_rec_query);
+
static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc)
{
@@ -1794,7 +1954,7 @@ static void ib_sa_remove_one(struct ib_device *device, void *client_data)
kfree(sa_dev);
}
-static int __init ib_sa_init(void)
+int ib_sa_init(void)
{
int ret;
@@ -1820,17 +1980,10 @@ static int __init ib_sa_init(void)
goto err3;
}
- if (ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ib_sa_cb_table),
- ib_sa_cb_table)) {
- pr_err("Failed to add netlink callback\n");
- ret = -EINVAL;
- goto err4;
- }
INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
return 0;
-err4:
- destroy_workqueue(ib_nl_wq);
+
err3:
mcast_cleanup();
err2:
@@ -1839,9 +1992,8 @@ err1:
return ret;
}
-static void __exit ib_sa_cleanup(void)
+void ib_sa_cleanup(void)
{
- ibnl_remove_client(RDMA_NL_LS);
cancel_delayed_work(&ib_nl_timed_work);
flush_workqueue(ib_nl_wq);
destroy_workqueue(ib_nl_wq);
@@ -1849,6 +2001,3 @@ static void __exit ib_sa_cleanup(void)
ib_unregister_client(&sa_client);
idr_destroy(&query_idr);
}
-
-module_init(ib_sa_init);
-module_exit(ib_sa_cleanup);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 14606afbfaa8..5e573bb18660 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -56,8 +56,10 @@ struct ib_port {
struct gid_attr_group *gid_attr_group;
struct attribute_group gid_group;
struct attribute_group pkey_group;
- u8 port_num;
struct attribute_group *pma_table;
+ struct attribute_group *hw_stats_ag;
+ struct rdma_hw_stats *hw_stats;
+ u8 port_num;
};
struct port_attribute {
@@ -80,6 +82,18 @@ struct port_table_attribute {
__be16 attr_id;
};
+struct hw_stats_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf,
+ size_t count);
+ int index;
+ u8 port_num;
+};
+
static ssize_t port_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -733,6 +747,212 @@ static struct attribute_group *get_counter_table(struct ib_device *dev,
return &pma_group;
}
+static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats,
+ u8 port_num, int index)
+{
+ int ret;
+
+ if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan))
+ return 0;
+ ret = dev->get_hw_stats(dev, stats, port_num, index);
+ if (ret < 0)
+ return ret;
+ if (ret == stats->num_counters)
+ stats->timestamp = jiffies;
+
+ return 0;
+}
+
+static ssize_t print_hw_stat(struct rdma_hw_stats *stats, int index, char *buf)
+{
+ return sprintf(buf, "%llu\n", stats->value[index]);
+}
+
+static ssize_t show_hw_stats(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct ib_device *dev;
+ struct ib_port *port;
+ struct hw_stats_attribute *hsa;
+ struct rdma_hw_stats *stats;
+ int ret;
+
+ hsa = container_of(attr, struct hw_stats_attribute, attr);
+ if (!hsa->port_num) {
+ dev = container_of((struct device *)kobj,
+ struct ib_device, dev);
+ stats = dev->hw_stats;
+ } else {
+ port = container_of(kobj, struct ib_port, kobj);
+ dev = port->ibdev;
+ stats = port->hw_stats;
+ }
+ ret = update_hw_stats(dev, stats, hsa->port_num, hsa->index);
+ if (ret)
+ return ret;
+ return print_hw_stat(stats, hsa->index, buf);
+}
+
+static ssize_t show_stats_lifespan(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct hw_stats_attribute *hsa;
+ int msecs;
+
+ hsa = container_of(attr, struct hw_stats_attribute, attr);
+ if (!hsa->port_num) {
+ struct ib_device *dev = container_of((struct device *)kobj,
+ struct ib_device, dev);
+ msecs = jiffies_to_msecs(dev->hw_stats->lifespan);
+ } else {
+ struct ib_port *p = container_of(kobj, struct ib_port, kobj);
+ msecs = jiffies_to_msecs(p->hw_stats->lifespan);
+ }
+ return sprintf(buf, "%d\n", msecs);
+}
+
+static ssize_t set_stats_lifespan(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hw_stats_attribute *hsa;
+ int msecs;
+ int jiffies;
+ int ret;
+
+ ret = kstrtoint(buf, 10, &msecs);
+ if (ret)
+ return ret;
+ if (msecs < 0 || msecs > 10000)
+ return -EINVAL;
+ jiffies = msecs_to_jiffies(msecs);
+ hsa = container_of(attr, struct hw_stats_attribute, attr);
+ if (!hsa->port_num) {
+ struct ib_device *dev = container_of((struct device *)kobj,
+ struct ib_device, dev);
+ dev->hw_stats->lifespan = jiffies;
+ } else {
+ struct ib_port *p = container_of(kobj, struct ib_port, kobj);
+ p->hw_stats->lifespan = jiffies;
+ }
+ return count;
+}
+
+static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group)
+{
+ struct attribute **attr;
+
+ sysfs_remove_group(kobj, attr_group);
+
+ for (attr = attr_group->attrs; *attr; attr++)
+ kfree(*attr);
+ kfree(attr_group);
+}
+
+static struct attribute *alloc_hsa(int index, u8 port_num, const char *name)
+{
+ struct hw_stats_attribute *hsa;
+
+ hsa = kmalloc(sizeof(*hsa), GFP_KERNEL);
+ if (!hsa)
+ return NULL;
+
+ hsa->attr.name = (char *)name;
+ hsa->attr.mode = S_IRUGO;
+ hsa->show = show_hw_stats;
+ hsa->store = NULL;
+ hsa->index = index;
+ hsa->port_num = port_num;
+
+ return &hsa->attr;
+}
+
+static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num)
+{
+ struct hw_stats_attribute *hsa;
+
+ hsa = kmalloc(sizeof(*hsa), GFP_KERNEL);
+ if (!hsa)
+ return NULL;
+
+ hsa->attr.name = name;
+ hsa->attr.mode = S_IWUSR | S_IRUGO;
+ hsa->show = show_stats_lifespan;
+ hsa->store = set_stats_lifespan;
+ hsa->index = 0;
+ hsa->port_num = port_num;
+
+ return &hsa->attr;
+}
+
+static void setup_hw_stats(struct ib_device *device, struct ib_port *port,
+ u8 port_num)
+{
+ struct attribute_group *hsag = NULL;
+ struct rdma_hw_stats *stats;
+ int i = 0, ret;
+
+ stats = device->alloc_hw_stats(device, port_num);
+
+ if (!stats)
+ return;
+
+ if (!stats->names || stats->num_counters <= 0)
+ goto err;
+
+ hsag = kzalloc(sizeof(*hsag) +
+ // 1 extra for the lifespan config entry
+ sizeof(void *) * (stats->num_counters + 1),
+ GFP_KERNEL);
+ if (!hsag)
+ return;
+
+ ret = device->get_hw_stats(device, stats, port_num,
+ stats->num_counters);
+ if (ret != stats->num_counters)
+ goto err;
+
+ stats->timestamp = jiffies;
+
+ hsag->name = "hw_counters";
+ hsag->attrs = (void *)hsag + sizeof(*hsag);
+
+ for (i = 0; i < stats->num_counters; i++) {
+ hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]);
+ if (!hsag->attrs[i])
+ goto err;
+ }
+
+ /* treat an error here as non-fatal */
+ hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num);
+
+ if (port) {
+ struct kobject *kobj = &port->kobj;
+ ret = sysfs_create_group(kobj, hsag);
+ if (ret)
+ goto err;
+ port->hw_stats_ag = hsag;
+ port->hw_stats = stats;
+ } else {
+ struct kobject *kobj = &device->dev.kobj;
+ ret = sysfs_create_group(kobj, hsag);
+ if (ret)
+ goto err;
+ device->hw_stats_ag = hsag;
+ device->hw_stats = stats;
+ }
+
+ return;
+
+err:
+ kfree(stats);
+ for (; i >= 0; i--)
+ kfree(hsag->attrs[i]);
+ kfree(hsag);
+ return;
+}
+
static int add_port(struct ib_device *device, int port_num,
int (*port_callback)(struct ib_device *,
u8, struct kobject *))
@@ -835,6 +1055,14 @@ static int add_port(struct ib_device *device, int port_num,
goto err_remove_pkey;
}
+ /*
+ * If port == 0, it means we have only one port and the parent
+ * device, not this port device, should be the holder of the
+ * hw_counters
+ */
+ if (device->alloc_hw_stats && port_num)
+ setup_hw_stats(device, p, port_num);
+
list_add_tail(&p->kobj.entry, &device->port_list);
kobject_uevent(&p->kobj, KOBJ_ADD);
@@ -972,120 +1200,6 @@ static struct device_attribute *ib_class_attributes[] = {
&dev_attr_node_desc
};
-/* Show a given an attribute in the statistics group */
-static ssize_t show_protocol_stat(const struct device *device,
- struct device_attribute *attr, char *buf,
- unsigned offset)
-{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
- union rdma_protocol_stats stats;
- ssize_t ret;
-
- ret = dev->get_protocol_stats(dev, &stats);
- if (ret)
- return ret;
-
- return sprintf(buf, "%llu\n",
- (unsigned long long) ((u64 *) &stats)[offset]);
-}
-
-/* generate a read-only iwarp statistics attribute */
-#define IW_STATS_ENTRY(name) \
-static ssize_t show_##name(struct device *device, \
- struct device_attribute *attr, char *buf) \
-{ \
- return show_protocol_stat(device, attr, buf, \
- offsetof(struct iw_protocol_stats, name) / \
- sizeof (u64)); \
-} \
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
-
-IW_STATS_ENTRY(ipInReceives);
-IW_STATS_ENTRY(ipInHdrErrors);
-IW_STATS_ENTRY(ipInTooBigErrors);
-IW_STATS_ENTRY(ipInNoRoutes);
-IW_STATS_ENTRY(ipInAddrErrors);
-IW_STATS_ENTRY(ipInUnknownProtos);
-IW_STATS_ENTRY(ipInTruncatedPkts);
-IW_STATS_ENTRY(ipInDiscards);
-IW_STATS_ENTRY(ipInDelivers);
-IW_STATS_ENTRY(ipOutForwDatagrams);
-IW_STATS_ENTRY(ipOutRequests);
-IW_STATS_ENTRY(ipOutDiscards);
-IW_STATS_ENTRY(ipOutNoRoutes);
-IW_STATS_ENTRY(ipReasmTimeout);
-IW_STATS_ENTRY(ipReasmReqds);
-IW_STATS_ENTRY(ipReasmOKs);
-IW_STATS_ENTRY(ipReasmFails);
-IW_STATS_ENTRY(ipFragOKs);
-IW_STATS_ENTRY(ipFragFails);
-IW_STATS_ENTRY(ipFragCreates);
-IW_STATS_ENTRY(ipInMcastPkts);
-IW_STATS_ENTRY(ipOutMcastPkts);
-IW_STATS_ENTRY(ipInBcastPkts);
-IW_STATS_ENTRY(ipOutBcastPkts);
-IW_STATS_ENTRY(tcpRtoAlgorithm);
-IW_STATS_ENTRY(tcpRtoMin);
-IW_STATS_ENTRY(tcpRtoMax);
-IW_STATS_ENTRY(tcpMaxConn);
-IW_STATS_ENTRY(tcpActiveOpens);
-IW_STATS_ENTRY(tcpPassiveOpens);
-IW_STATS_ENTRY(tcpAttemptFails);
-IW_STATS_ENTRY(tcpEstabResets);
-IW_STATS_ENTRY(tcpCurrEstab);
-IW_STATS_ENTRY(tcpInSegs);
-IW_STATS_ENTRY(tcpOutSegs);
-IW_STATS_ENTRY(tcpRetransSegs);
-IW_STATS_ENTRY(tcpInErrs);
-IW_STATS_ENTRY(tcpOutRsts);
-
-static struct attribute *iw_proto_stats_attrs[] = {
- &dev_attr_ipInReceives.attr,
- &dev_attr_ipInHdrErrors.attr,
- &dev_attr_ipInTooBigErrors.attr,
- &dev_attr_ipInNoRoutes.attr,
- &dev_attr_ipInAddrErrors.attr,
- &dev_attr_ipInUnknownProtos.attr,
- &dev_attr_ipInTruncatedPkts.attr,
- &dev_attr_ipInDiscards.attr,
- &dev_attr_ipInDelivers.attr,
- &dev_attr_ipOutForwDatagrams.attr,
- &dev_attr_ipOutRequests.attr,
- &dev_attr_ipOutDiscards.attr,
- &dev_attr_ipOutNoRoutes.attr,
- &dev_attr_ipReasmTimeout.attr,
- &dev_attr_ipReasmReqds.attr,
- &dev_attr_ipReasmOKs.attr,
- &dev_attr_ipReasmFails.attr,
- &dev_attr_ipFragOKs.attr,
- &dev_attr_ipFragFails.attr,
- &dev_attr_ipFragCreates.attr,
- &dev_attr_ipInMcastPkts.attr,
- &dev_attr_ipOutMcastPkts.attr,
- &dev_attr_ipInBcastPkts.attr,
- &dev_attr_ipOutBcastPkts.attr,
- &dev_attr_tcpRtoAlgorithm.attr,
- &dev_attr_tcpRtoMin.attr,
- &dev_attr_tcpRtoMax.attr,
- &dev_attr_tcpMaxConn.attr,
- &dev_attr_tcpActiveOpens.attr,
- &dev_attr_tcpPassiveOpens.attr,
- &dev_attr_tcpAttemptFails.attr,
- &dev_attr_tcpEstabResets.attr,
- &dev_attr_tcpCurrEstab.attr,
- &dev_attr_tcpInSegs.attr,
- &dev_attr_tcpOutSegs.attr,
- &dev_attr_tcpRetransSegs.attr,
- &dev_attr_tcpInErrs.attr,
- &dev_attr_tcpOutRsts.attr,
- NULL
-};
-
-static struct attribute_group iw_stats_group = {
- .name = "proto_stats",
- .attrs = iw_proto_stats_attrs,
-};
-
static void free_port_list_attributes(struct ib_device *device)
{
struct kobject *p, *t;
@@ -1093,6 +1207,10 @@ static void free_port_list_attributes(struct ib_device *device)
list_for_each_entry_safe(p, t, &device->port_list, entry) {
struct ib_port *port = container_of(p, struct ib_port, kobj);
list_del(&p->entry);
+ if (port->hw_stats) {
+ kfree(port->hw_stats);
+ free_hsag(&port->kobj, port->hw_stats_ag);
+ }
sysfs_remove_group(p, port->pma_table);
sysfs_remove_group(p, &port->pkey_group);
sysfs_remove_group(p, &port->gid_group);
@@ -1149,11 +1267,8 @@ int ib_device_register_sysfs(struct ib_device *device,
}
}
- if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats) {
- ret = sysfs_create_group(&class_dev->kobj, &iw_stats_group);
- if (ret)
- goto err_put;
- }
+ if (device->alloc_hw_stats)
+ setup_hw_stats(device, NULL, 0);
return 0;
@@ -1169,15 +1284,18 @@ err:
void ib_device_unregister_sysfs(struct ib_device *device)
{
- /* Hold kobject until ib_dealloc_device() */
- struct kobject *kobj_dev = kobject_get(&device->dev.kobj);
int i;
- if (device->node_type == RDMA_NODE_RNIC && device->get_protocol_stats)
- sysfs_remove_group(kobj_dev, &iw_stats_group);
+ /* Hold kobject until ib_dealloc_device() */
+ kobject_get(&device->dev.kobj);
free_port_list_attributes(device);
+ if (device->hw_stats) {
+ kfree(device->hw_stats);
+ free_hsag(&device->dev.kobj, device->hw_stats_ag);
+ }
+
for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i)
device_remove_file(&device->dev, ib_class_attributes[i]);
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index c7ad0a4c8b15..c0c7cf8af3f4 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5/
obj-$(CONFIG_INFINIBAND_NES) += nes/
obj-$(CONFIG_INFINIBAND_OCRDMA) += ocrdma/
obj-$(CONFIG_INFINIBAND_USNIC) += usnic/
+obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index de1c61b417d6..ada2e5009c86 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -327,7 +327,7 @@ int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
kfree(cq->sw_queue);
dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
(1UL << (cq->size_log2))
- * sizeof(struct t3_cqe), cq->queue,
+ * sizeof(struct t3_cqe) + 1, cq->queue,
dma_unmap_addr(cq, mapping));
cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
return err;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 47cb927a0dd6..bb1a839d4d6d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1218,59 +1218,119 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
iwch_dev->rdev.rnic_info.pdev->device);
}
-static int iwch_get_mib(struct ib_device *ibdev,
- union rdma_protocol_stats *stats)
+enum counters {
+ IPINRECEIVES,
+ IPINHDRERRORS,
+ IPINADDRERRORS,
+ IPINUNKNOWNPROTOS,
+ IPINDISCARDS,
+ IPINDELIVERS,
+ IPOUTREQUESTS,
+ IPOUTDISCARDS,
+ IPOUTNOROUTES,
+ IPREASMTIMEOUT,
+ IPREASMREQDS,
+ IPREASMOKS,
+ IPREASMFAILS,
+ TCPACTIVEOPENS,
+ TCPPASSIVEOPENS,
+ TCPATTEMPTFAILS,
+ TCPESTABRESETS,
+ TCPCURRESTAB,
+ TCPINSEGS,
+ TCPOUTSEGS,
+ TCPRETRANSSEGS,
+ TCPINERRS,
+ TCPOUTRSTS,
+ TCPRTOMIN,
+ TCPRTOMAX,
+ NR_COUNTERS
+};
+
+static const char * const names[] = {
+ [IPINRECEIVES] = "ipInReceives",
+ [IPINHDRERRORS] = "ipInHdrErrors",
+ [IPINADDRERRORS] = "ipInAddrErrors",
+ [IPINUNKNOWNPROTOS] = "ipInUnknownProtos",
+ [IPINDISCARDS] = "ipInDiscards",
+ [IPINDELIVERS] = "ipInDelivers",
+ [IPOUTREQUESTS] = "ipOutRequests",
+ [IPOUTDISCARDS] = "ipOutDiscards",
+ [IPOUTNOROUTES] = "ipOutNoRoutes",
+ [IPREASMTIMEOUT] = "ipReasmTimeout",
+ [IPREASMREQDS] = "ipReasmReqds",
+ [IPREASMOKS] = "ipReasmOKs",
+ [IPREASMFAILS] = "ipReasmFails",
+ [TCPACTIVEOPENS] = "tcpActiveOpens",
+ [TCPPASSIVEOPENS] = "tcpPassiveOpens",
+ [TCPATTEMPTFAILS] = "tcpAttemptFails",
+ [TCPESTABRESETS] = "tcpEstabResets",
+ [TCPCURRESTAB] = "tcpCurrEstab",
+ [TCPINSEGS] = "tcpInSegs",
+ [TCPOUTSEGS] = "tcpOutSegs",
+ [TCPRETRANSSEGS] = "tcpRetransSegs",
+ [TCPINERRS] = "tcpInErrs",
+ [TCPOUTRSTS] = "tcpOutRsts",
+ [TCPRTOMIN] = "tcpRtoMin",
+ [TCPRTOMAX] = "tcpRtoMax",
+};
+
+static struct rdma_hw_stats *iwch_alloc_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
+
+ /* Our driver only supports device level stats */
+ if (port_num != 0)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
+static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
+ u8 port, int index)
{
struct iwch_dev *dev;
struct tp_mib_stats m;
int ret;
+ if (port != 0 || !stats)
+ return -ENOSYS;
+
PDBG("%s ibdev %p\n", __func__, ibdev);
dev = to_iwch_dev(ibdev);
ret = dev->rdev.t3cdev_p->ctl(dev->rdev.t3cdev_p, RDMA_GET_MIB, &m);
if (ret)
return -ENOSYS;
- memset(stats, 0, sizeof *stats);
- stats->iw.ipInReceives = ((u64) m.ipInReceive_hi << 32) +
- m.ipInReceive_lo;
- stats->iw.ipInHdrErrors = ((u64) m.ipInHdrErrors_hi << 32) +
- m.ipInHdrErrors_lo;
- stats->iw.ipInAddrErrors = ((u64) m.ipInAddrErrors_hi << 32) +
- m.ipInAddrErrors_lo;
- stats->iw.ipInUnknownProtos = ((u64) m.ipInUnknownProtos_hi << 32) +
- m.ipInUnknownProtos_lo;
- stats->iw.ipInDiscards = ((u64) m.ipInDiscards_hi << 32) +
- m.ipInDiscards_lo;
- stats->iw.ipInDelivers = ((u64) m.ipInDelivers_hi << 32) +
- m.ipInDelivers_lo;
- stats->iw.ipOutRequests = ((u64) m.ipOutRequests_hi << 32) +
- m.ipOutRequests_lo;
- stats->iw.ipOutDiscards = ((u64) m.ipOutDiscards_hi << 32) +
- m.ipOutDiscards_lo;
- stats->iw.ipOutNoRoutes = ((u64) m.ipOutNoRoutes_hi << 32) +
- m.ipOutNoRoutes_lo;
- stats->iw.ipReasmTimeout = (u64) m.ipReasmTimeout;
- stats->iw.ipReasmReqds = (u64) m.ipReasmReqds;
- stats->iw.ipReasmOKs = (u64) m.ipReasmOKs;
- stats->iw.ipReasmFails = (u64) m.ipReasmFails;
- stats->iw.tcpActiveOpens = (u64) m.tcpActiveOpens;
- stats->iw.tcpPassiveOpens = (u64) m.tcpPassiveOpens;
- stats->iw.tcpAttemptFails = (u64) m.tcpAttemptFails;
- stats->iw.tcpEstabResets = (u64) m.tcpEstabResets;
- stats->iw.tcpOutRsts = (u64) m.tcpOutRsts;
- stats->iw.tcpCurrEstab = (u64) m.tcpCurrEstab;
- stats->iw.tcpInSegs = ((u64) m.tcpInSegs_hi << 32) +
- m.tcpInSegs_lo;
- stats->iw.tcpOutSegs = ((u64) m.tcpOutSegs_hi << 32) +
- m.tcpOutSegs_lo;
- stats->iw.tcpRetransSegs = ((u64) m.tcpRetransSeg_hi << 32) +
- m.tcpRetransSeg_lo;
- stats->iw.tcpInErrs = ((u64) m.tcpInErrs_hi << 32) +
- m.tcpInErrs_lo;
- stats->iw.tcpRtoMin = (u64) m.tcpRtoMin;
- stats->iw.tcpRtoMax = (u64) m.tcpRtoMax;
- return 0;
+ stats->value[IPINRECEIVES] = ((u64)m.ipInReceive_hi << 32) + m.ipInReceive_lo;
+ stats->value[IPINHDRERRORS] = ((u64)m.ipInHdrErrors_hi << 32) + m.ipInHdrErrors_lo;
+ stats->value[IPINADDRERRORS] = ((u64)m.ipInAddrErrors_hi << 32) + m.ipInAddrErrors_lo;
+ stats->value[IPINUNKNOWNPROTOS] = ((u64)m.ipInUnknownProtos_hi << 32) + m.ipInUnknownProtos_lo;
+ stats->value[IPINDISCARDS] = ((u64)m.ipInDiscards_hi << 32) + m.ipInDiscards_lo;
+ stats->value[IPINDELIVERS] = ((u64)m.ipInDelivers_hi << 32) + m.ipInDelivers_lo;
+ stats->value[IPOUTREQUESTS] = ((u64)m.ipOutRequests_hi << 32) + m.ipOutRequests_lo;
+ stats->value[IPOUTDISCARDS] = ((u64)m.ipOutDiscards_hi << 32) + m.ipOutDiscards_lo;
+ stats->value[IPOUTNOROUTES] = ((u64)m.ipOutNoRoutes_hi << 32) + m.ipOutNoRoutes_lo;
+ stats->value[IPREASMTIMEOUT] = m.ipReasmTimeout;
+ stats->value[IPREASMREQDS] = m.ipReasmReqds;
+ stats->value[IPREASMOKS] = m.ipReasmOKs;
+ stats->value[IPREASMFAILS] = m.ipReasmFails;
+ stats->value[TCPACTIVEOPENS] = m.tcpActiveOpens;
+ stats->value[TCPPASSIVEOPENS] = m.tcpPassiveOpens;
+ stats->value[TCPATTEMPTFAILS] = m.tcpAttemptFails;
+ stats->value[TCPESTABRESETS] = m.tcpEstabResets;
+ stats->value[TCPCURRESTAB] = m.tcpOutRsts;
+ stats->value[TCPINSEGS] = m.tcpCurrEstab;
+ stats->value[TCPOUTSEGS] = ((u64)m.tcpInSegs_hi << 32) + m.tcpInSegs_lo;
+ stats->value[TCPRETRANSSEGS] = ((u64)m.tcpOutSegs_hi << 32) + m.tcpOutSegs_lo;
+ stats->value[TCPINERRS] = ((u64)m.tcpRetransSeg_hi << 32) + m.tcpRetransSeg_lo,
+ stats->value[TCPOUTRSTS] = ((u64)m.tcpInErrs_hi << 32) + m.tcpInErrs_lo;
+ stats->value[TCPRTOMIN] = m.tcpRtoMin;
+ stats->value[TCPRTOMAX] = m.tcpRtoMax;
+
+ return stats->num_counters;
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -1373,7 +1433,8 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.req_notify_cq = iwch_arm_cq;
dev->ibdev.post_send = iwch_post_send;
dev->ibdev.post_recv = iwch_post_receive;
- dev->ibdev.get_protocol_stats = iwch_get_mib;
+ dev->ibdev.alloc_hw_stats = iwch_alloc_stats;
+ dev->ibdev.get_hw_stats = iwch_get_mib;
dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = iwch_port_immutable;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 7574f394fdac..dd8a86b726d2 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -446,20 +446,59 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
c4iw_dev->rdev.lldi.pdev->device);
}
+enum counters {
+ IP4INSEGS,
+ IP4OUTSEGS,
+ IP4RETRANSSEGS,
+ IP4OUTRSTS,
+ IP6INSEGS,
+ IP6OUTSEGS,
+ IP6RETRANSSEGS,
+ IP6OUTRSTS,
+ NR_COUNTERS
+};
+
+static const char * const names[] = {
+ [IP4INSEGS] = "ip4InSegs",
+ [IP4OUTSEGS] = "ip4OutSegs",
+ [IP4RETRANSSEGS] = "ip4RetransSegs",
+ [IP4OUTRSTS] = "ip4OutRsts",
+ [IP6INSEGS] = "ip6InSegs",
+ [IP6OUTSEGS] = "ip6OutSegs",
+ [IP6RETRANSSEGS] = "ip6RetransSegs",
+ [IP6OUTRSTS] = "ip6OutRsts"
+};
+
+static struct rdma_hw_stats *c4iw_alloc_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
+
+ if (port_num != 0)
+ return NULL;
+
+ return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
+
static int c4iw_get_mib(struct ib_device *ibdev,
- union rdma_protocol_stats *stats)
+ struct rdma_hw_stats *stats,
+ u8 port, int index)
{
struct tp_tcp_stats v4, v6;
struct c4iw_dev *c4iw_dev = to_c4iw_dev(ibdev);
cxgb4_get_tcp_stats(c4iw_dev->rdev.lldi.pdev, &v4, &v6);
- memset(stats, 0, sizeof *stats);
- stats->iw.tcpInSegs = v4.tcp_in_segs + v6.tcp_in_segs;
- stats->iw.tcpOutSegs = v4.tcp_out_segs + v6.tcp_out_segs;
- stats->iw.tcpRetransSegs = v4.tcp_retrans_segs + v6.tcp_retrans_segs;
- stats->iw.tcpOutRsts = v4.tcp_out_rsts + v6.tcp_out_rsts;
-
- return 0;
+ stats->value[IP4INSEGS] = v4.tcp_in_segs;
+ stats->value[IP4OUTSEGS] = v4.tcp_out_segs;
+ stats->value[IP4RETRANSSEGS] = v4.tcp_retrans_segs;
+ stats->value[IP4OUTRSTS] = v4.tcp_out_rsts;
+ stats->value[IP6INSEGS] = v6.tcp_in_segs;
+ stats->value[IP6OUTSEGS] = v6.tcp_out_segs;
+ stats->value[IP6RETRANSSEGS] = v6.tcp_retrans_segs;
+ stats->value[IP6OUTRSTS] = v6.tcp_out_rsts;
+
+ return stats->num_counters;
}
static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
@@ -562,7 +601,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.req_notify_cq = c4iw_arm_cq;
dev->ibdev.post_send = c4iw_post_send;
dev->ibdev.post_recv = c4iw_post_receive;
- dev->ibdev.get_protocol_stats = c4iw_get_mib;
+ dev->ibdev.alloc_hw_stats = c4iw_alloc_stats;
+ dev->ibdev.get_hw_stats = c4iw_get_mib;
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
dev->ibdev.drain_sq = c4iw_drain_sq;
diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index a925fb0db706..a925fb0db706 100644
--- a/drivers/staging/rdma/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
diff --git a/drivers/staging/rdma/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index 8dc59382ee96..9b5382c94b0c 100644
--- a/drivers/staging/rdma/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -7,7 +7,7 @@
#
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
-hfi1-y := affinity.o chip.o device.o diag.o driver.o efivar.o \
+hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
eprom.o file_ops.o firmware.o \
init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o twsi.o \
diff --git a/drivers/staging/rdma/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index 6e7050ab9e16..6e7050ab9e16 100644
--- a/drivers/staging/rdma/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
diff --git a/drivers/staging/rdma/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h
index 20f52fe74091..20f52fe74091 100644
--- a/drivers/staging/rdma/hfi1/affinity.h
+++ b/drivers/infiniband/hw/hfi1/affinity.h
diff --git a/drivers/staging/rdma/hfi1/aspm.h b/drivers/infiniband/hw/hfi1/aspm.h
index 0d58fe3b49b5..0d58fe3b49b5 100644
--- a/drivers/staging/rdma/hfi1/aspm.h
+++ b/drivers/infiniband/hw/hfi1/aspm.h
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index dcae8e723f98..3b876da745a1 100644
--- a/drivers/staging/rdma/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1037,6 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *);
static void dc_start(struct hfi1_devdata *);
static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
unsigned int *np);
+static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
/*
* Error interrupt table entry. This is used as input to the interrupt
@@ -6105,7 +6106,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
}
/* this access is valid only when the link is up */
- if ((ppd->host_link_state & HLS_UP) == 0) {
+ if (ppd->host_link_state & HLS_DOWN) {
dd_dev_info(dd, "%s: link state %s not up\n",
__func__, link_state_name(ppd->host_link_state));
ret = -EBUSY;
@@ -6961,6 +6962,8 @@ void handle_link_down(struct work_struct *work)
}
reset_neighbor_info(ppd);
+ if (ppd->mgmt_allowed)
+ remove_full_mgmt_pkey(ppd);
/* disable the port */
clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@@ -7069,6 +7072,12 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
}
+static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+{
+ ppd->pkeys[2] = 0;
+ (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+}
+
/*
* Convert the given link width to the OPA link width bitmask.
*/
@@ -7429,7 +7438,7 @@ void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
retry:
mutex_lock(&ppd->hls_lock);
/* only apply if the link is up */
- if (!(ppd->host_link_state & HLS_UP)) {
+ if (ppd->host_link_state & HLS_DOWN) {
/* still going up..wait and retry */
if (ppd->host_link_state & HLS_GOING_UP) {
if (++tries < 1000) {
@@ -9212,9 +9221,6 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
/* Reset the QSFP */
mask = (u64)QSFP_HFI0_RESET_N;
- qsfp_mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE);
- qsfp_mask |= mask;
- write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_OE : ASIC_QSFP1_OE, qsfp_mask);
qsfp_mask = read_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
@@ -9252,6 +9258,12 @@ static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
__func__);
+ /*
+ * The remaining alarms/warnings don't matter if the link is down.
+ */
+ if (ppd->host_link_state & HLS_DOWN)
+ return 0;
+
if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
(qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
@@ -9346,9 +9358,8 @@ void qsfp_event(struct work_struct *work)
return;
/*
- * Turn DC back on after cables has been
- * re-inserted. Up until now, the DC has been in
- * reset to save power.
+ * Turn DC back on after cable has been re-inserted. Up until
+ * now, the DC has been in reset to save power.
*/
dc_start(dd);
@@ -9480,7 +9491,15 @@ int bringup_serdes(struct hfi1_pportdata *ppd)
return ret;
}
- /* tune the SERDES to a ballpark setting for
+ get_port_type(ppd);
+ if (ppd->port_type == PORT_TYPE_QSFP) {
+ set_qsfp_int_n(ppd, 0);
+ wait_for_qsfp_init(ppd);
+ set_qsfp_int_n(ppd, 1);
+ }
+
+ /*
+ * Tune the SerDes to a ballpark setting for
* optimal signal and bit error rate
* Needs to be done before starting the link
*/
@@ -10074,7 +10093,7 @@ u32 driver_physical_state(struct hfi1_pportdata *ppd)
*/
u32 driver_logical_state(struct hfi1_pportdata *ppd)
{
- if (ppd->host_link_state && !(ppd->host_link_state & HLS_UP))
+ if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
return IB_PORT_DOWN;
switch (ppd->host_link_state & HLS_UP) {
@@ -14578,7 +14597,7 @@ u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
(reason), (ret))
/*
- * Initialize the Avago Thermal sensor.
+ * Initialize the thermal sensor.
*
* After initialization, enable polling of thermal sensor through
* SBus interface. In order for this to work, the SBus Master
diff --git a/drivers/staging/rdma/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 1948706fff1a..66a327978739 100644
--- a/drivers/staging/rdma/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -398,6 +398,12 @@
/* Lane ID for general configuration registers */
#define GENERAL_CONFIG 4
+/* LINK_TUNING_PARAMETERS fields */
+#define TUNING_METHOD_SHIFT 24
+
+/* LINK_OPTIMIZATION_SETTINGS fields */
+#define ENABLE_EXT_DEV_CONFIG_SHIFT 24
+
/* LOAD_DATA 8051 command shifts and fields */
#define LOAD_DATA_FIELD_ID_SHIFT 40
#define LOAD_DATA_FIELD_ID_MASK 0xfull
diff --git a/drivers/staging/rdma/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index 8744de6667c2..8744de6667c2 100644
--- a/drivers/staging/rdma/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
diff --git a/drivers/staging/rdma/hfi1/common.h b/drivers/infiniband/hw/hfi1/common.h
index e9b6bb322025..fcc9c217a97a 100644
--- a/drivers/staging/rdma/hfi1/common.h
+++ b/drivers/infiniband/hw/hfi1/common.h
@@ -178,7 +178,8 @@
HFI1_CAP_PKEY_CHECK | \
HFI1_CAP_NO_INTEGRITY)
-#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << 16) | HFI1_USER_SWMINOR)
+#define HFI1_USER_SWVERSION ((HFI1_USER_SWMAJOR << HFI1_SWMAJOR_SHIFT) | \
+ HFI1_USER_SWMINOR)
#ifndef HFI1_KERN_TYPE
#define HFI1_KERN_TYPE 0
@@ -349,6 +350,8 @@ struct hfi1_message_header {
#define HFI1_BECN_MASK 1
#define HFI1_BECN_SMASK BIT(HFI1_BECN_SHIFT)
+#define HFI1_PSM_IOC_BASE_SEQ 0x0
+
static inline __u64 rhf_to_cpu(const __le32 *rbuf)
{
return __le64_to_cpu(*((__le64 *)rbuf));
diff --git a/drivers/staging/rdma/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index dbab9d9cc288..dbab9d9cc288 100644
--- a/drivers/staging/rdma/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
diff --git a/drivers/staging/rdma/hfi1/debugfs.h b/drivers/infiniband/hw/hfi1/debugfs.h
index b6fb6814f1b8..b6fb6814f1b8 100644
--- a/drivers/staging/rdma/hfi1/debugfs.h
+++ b/drivers/infiniband/hw/hfi1/debugfs.h
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/infiniband/hw/hfi1/device.c
index c05c39da83b1..bf64b5a7bfd7 100644
--- a/drivers/staging/rdma/hfi1/device.c
+++ b/drivers/infiniband/hw/hfi1/device.c
@@ -60,7 +60,8 @@ static dev_t hfi1_dev;
int hfi1_cdev_init(int minor, const char *name,
const struct file_operations *fops,
struct cdev *cdev, struct device **devp,
- bool user_accessible)
+ bool user_accessible,
+ struct kobject *parent)
{
const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
struct device *device = NULL;
@@ -68,6 +69,7 @@ int hfi1_cdev_init(int minor, const char *name,
cdev_init(cdev, fops);
cdev->owner = THIS_MODULE;
+ cdev->kobj.parent = parent;
kobject_set_name(&cdev->kobj, name);
ret = cdev_add(cdev, dev, 1);
@@ -82,13 +84,13 @@ int hfi1_cdev_init(int minor, const char *name,
else
device = device_create(class, NULL, dev, NULL, "%s", name);
- if (!IS_ERR(device))
- goto done;
- ret = PTR_ERR(device);
- device = NULL;
- pr_err("Could not create device for minor %d, %s (err %d)\n",
- minor, name, -ret);
- cdev_del(cdev);
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
+ device = NULL;
+ pr_err("Could not create device for minor %d, %s (err %d)\n",
+ minor, name, -ret);
+ cdev_del(cdev);
+ }
done:
*devp = device;
return ret;
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/infiniband/hw/hfi1/device.h
index 5bb3e83cf2da..c3ec19cb0ac9 100644
--- a/drivers/staging/rdma/hfi1/device.h
+++ b/drivers/infiniband/hw/hfi1/device.h
@@ -50,7 +50,8 @@
int hfi1_cdev_init(int minor, const char *name,
const struct file_operations *fops,
struct cdev *cdev, struct device **devp,
- bool user_accessible);
+ bool user_accessible,
+ struct kobject *parent);
void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
const char *class_name(void);
int __init dev_init(void);
diff --git a/drivers/staging/rdma/hfi1/dma.c b/drivers/infiniband/hw/hfi1/dma.c
index 7e8dab892848..7e8dab892848 100644
--- a/drivers/staging/rdma/hfi1/dma.c
+++ b/drivers/infiniband/hw/hfi1/dma.c
diff --git a/drivers/staging/rdma/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index 700c6fa3a633..c75b0ae688f8 100644
--- a/drivers/staging/rdma/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1161,7 +1161,7 @@ int hfi1_set_lid(struct hfi1_pportdata *ppd, u32 lid, u8 lmc)
ppd->lmc = lmc;
hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_LIDLMC, 0);
- dd_dev_info(dd, "IB%u:%u got a lid: 0x%x\n", dd->unit, ppd->port, lid);
+ dd_dev_info(dd, "port %u: got a lid: 0x%x\n", ppd->port, lid);
return 0;
}
diff --git a/drivers/staging/rdma/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
index 106349fc1fb9..106349fc1fb9 100644
--- a/drivers/staging/rdma/hfi1/efivar.c
+++ b/drivers/infiniband/hw/hfi1/efivar.c
diff --git a/drivers/staging/rdma/hfi1/efivar.h b/drivers/infiniband/hw/hfi1/efivar.h
index 94e9e70de568..94e9e70de568 100644
--- a/drivers/staging/rdma/hfi1/efivar.h
+++ b/drivers/infiniband/hw/hfi1/efivar.h
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
new file mode 100644
index 000000000000..36b77943cbfd
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright(c) 2015, 2016 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/delay.h>
+#include "hfi.h"
+#include "common.h"
+#include "eprom.h"
+
+#define CMD_SHIFT 24
+#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
+
+/* controller interface speeds */
+#define EP_SPEED_FULL 0x2 /* full speed */
+
+/*
+ * How long to wait for the EPROM to become available, in ms.
+ * The spec 32 Mb EPROM takes around 40s to erase then write.
+ * Double it for safety.
+ */
+#define EPROM_TIMEOUT 80000 /* ms */
+/*
+ * Initialize the EPROM handler.
+ */
+int eprom_init(struct hfi1_devdata *dd)
+{
+ int ret = 0;
+
+ /* only the discrete chip has an EPROM */
+ if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
+ return 0;
+
+ /*
+ * It is OK if both HFIs reset the EPROM as long as they don't
+ * do it at the same time.
+ */
+ ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: unable to acquire EPROM resource, no EPROM support\n",
+ __func__);
+ goto done_asic;
+ }
+
+ /* reset EPROM to be sure it is in a good state */
+
+ /* set reset */
+ write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
+ /* clear reset, set speed */
+ write_csr(dd, ASIC_EEP_CTL_STAT,
+ EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
+
+ /* wake the device with command "release powerdown NoID" */
+ write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
+
+ dd->eprom_available = true;
+ release_chip_resource(dd, CR_EPROM);
+done_asic:
+ return ret;
+}
diff --git a/drivers/staging/rdma/hfi1/eprom.h b/drivers/infiniband/hw/hfi1/eprom.h
index d41f0b1afb15..d41f0b1afb15 100644
--- a/drivers/staging/rdma/hfi1/eprom.h
+++ b/drivers/infiniband/hw/hfi1/eprom.h
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index c1c5bf82addb..7a5b0e676cc7 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -72,8 +72,6 @@
*/
static int hfi1_file_open(struct inode *, struct file *);
static int hfi1_file_close(struct inode *, struct file *);
-static ssize_t hfi1_file_write(struct file *, const char __user *,
- size_t, loff_t *);
static ssize_t hfi1_write_iter(struct kiocb *, struct iov_iter *);
static unsigned int hfi1_poll(struct file *, struct poll_table_struct *);
static int hfi1_file_mmap(struct file *, struct vm_area_struct *);
@@ -86,8 +84,7 @@ static int get_ctxt_info(struct file *, void __user *, __u32);
static int get_base_info(struct file *, void __user *, __u32);
static int setup_ctxt(struct file *);
static int setup_subctxt(struct hfi1_ctxtdata *);
-static int get_user_context(struct file *, struct hfi1_user_info *,
- int, unsigned);
+static int get_user_context(struct file *, struct hfi1_user_info *, int);
static int find_shared_ctxt(struct file *, const struct hfi1_user_info *);
static int allocate_ctxt(struct file *, struct hfi1_devdata *,
struct hfi1_user_info *);
@@ -97,13 +94,15 @@ static int user_event_ack(struct hfi1_ctxtdata *, int, unsigned long);
static int set_ctxt_pkey(struct hfi1_ctxtdata *, unsigned, u16);
static int manage_rcvq(struct hfi1_ctxtdata *, unsigned, int);
static int vma_fault(struct vm_area_struct *, struct vm_fault *);
+static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg);
static const struct file_operations hfi1_file_ops = {
.owner = THIS_MODULE,
- .write = hfi1_file_write,
.write_iter = hfi1_write_iter,
.open = hfi1_file_open,
.release = hfi1_file_close,
+ .unlocked_ioctl = hfi1_file_ioctl,
.poll = hfi1_poll,
.mmap = hfi1_file_mmap,
.llseek = noop_llseek,
@@ -169,6 +168,13 @@ static inline int is_valid_mmap(u64 token)
static int hfi1_file_open(struct inode *inode, struct file *fp)
{
+ struct hfi1_devdata *dd = container_of(inode->i_cdev,
+ struct hfi1_devdata,
+ user_cdev);
+
+ /* Just take a ref now. Not all opens result in a context assign */
+ kobject_get(&dd->kobj);
+
/* The real work is performed later in assign_ctxt() */
fp->private_data = kzalloc(sizeof(struct hfi1_filedata), GFP_KERNEL);
if (fp->private_data) /* no cpu affinity by default */
@@ -176,127 +182,59 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
return fp->private_data ? 0 : -ENOMEM;
}
-static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
- size_t count, loff_t *offset)
+static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
{
- const struct hfi1_cmd __user *ucmd;
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_cmd cmd;
struct hfi1_user_info uinfo;
struct hfi1_tid_info tinfo;
+ int ret = 0;
unsigned long addr;
- ssize_t consumed = 0, copy = 0, ret = 0;
- void *dest = NULL;
- __u64 user_val = 0;
- int uctxt_required = 1;
- int must_be_root = 0;
-
- /* FIXME: This interface cannot continue out of staging */
- if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
- return -EACCES;
-
- if (count < sizeof(cmd)) {
- ret = -EINVAL;
- goto bail;
- }
-
- ucmd = (const struct hfi1_cmd __user *)data;
- if (copy_from_user(&cmd, ucmd, sizeof(cmd))) {
- ret = -EFAULT;
- goto bail;
- }
-
- consumed = sizeof(cmd);
-
- switch (cmd.type) {
- case HFI1_CMD_ASSIGN_CTXT:
- uctxt_required = 0; /* assigned user context not required */
- copy = sizeof(uinfo);
- dest = &uinfo;
- break;
- case HFI1_CMD_SDMA_STATUS_UPD:
- case HFI1_CMD_CREDIT_UPD:
- copy = 0;
- break;
- case HFI1_CMD_TID_UPDATE:
- case HFI1_CMD_TID_FREE:
- case HFI1_CMD_TID_INVAL_READ:
- copy = sizeof(tinfo);
- dest = &tinfo;
- break;
- case HFI1_CMD_USER_INFO:
- case HFI1_CMD_RECV_CTRL:
- case HFI1_CMD_POLL_TYPE:
- case HFI1_CMD_ACK_EVENT:
- case HFI1_CMD_CTXT_INFO:
- case HFI1_CMD_SET_PKEY:
- case HFI1_CMD_CTXT_RESET:
- copy = 0;
- user_val = cmd.addr;
- break;
- case HFI1_CMD_EP_INFO:
- case HFI1_CMD_EP_ERASE_CHIP:
- case HFI1_CMD_EP_ERASE_RANGE:
- case HFI1_CMD_EP_READ_RANGE:
- case HFI1_CMD_EP_WRITE_RANGE:
- uctxt_required = 0; /* assigned user context not required */
- must_be_root = 1; /* validate user */
- copy = 0;
- break;
- default:
- ret = -EINVAL;
- goto bail;
- }
-
- /* If the command comes with user data, copy it. */
- if (copy) {
- if (copy_from_user(dest, (void __user *)cmd.addr, copy)) {
- ret = -EFAULT;
- goto bail;
- }
- consumed += copy;
- }
-
- /*
- * Make sure there is a uctxt when needed.
- */
- if (uctxt_required && !uctxt) {
- ret = -EINVAL;
- goto bail;
- }
+ int uval = 0;
+ unsigned long ul_uval = 0;
+ u16 uval16 = 0;
+
+ hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
+ if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
+ cmd != HFI1_IOCTL_GET_VERS &&
+ !uctxt)
+ return -EINVAL;
- /* only root can do these operations */
- if (must_be_root && !capable(CAP_SYS_ADMIN)) {
- ret = -EPERM;
- goto bail;
- }
+ switch (cmd) {
+ case HFI1_IOCTL_ASSIGN_CTXT:
+ if (copy_from_user(&uinfo,
+ (struct hfi1_user_info __user *)arg,
+ sizeof(uinfo)))
+ return -EFAULT;
- switch (cmd.type) {
- case HFI1_CMD_ASSIGN_CTXT:
ret = assign_ctxt(fp, &uinfo);
if (ret < 0)
- goto bail;
- ret = setup_ctxt(fp);
+ return ret;
+ setup_ctxt(fp);
if (ret)
- goto bail;
+ return ret;
ret = user_init(fp);
break;
- case HFI1_CMD_CTXT_INFO:
- ret = get_ctxt_info(fp, (void __user *)(unsigned long)
- user_val, cmd.len);
- break;
- case HFI1_CMD_USER_INFO:
- ret = get_base_info(fp, (void __user *)(unsigned long)
- user_val, cmd.len);
+ case HFI1_IOCTL_CTXT_INFO:
+ ret = get_ctxt_info(fp, (void __user *)(unsigned long)arg,
+ sizeof(struct hfi1_ctxt_info));
break;
- case HFI1_CMD_SDMA_STATUS_UPD:
+ case HFI1_IOCTL_USER_INFO:
+ ret = get_base_info(fp, (void __user *)(unsigned long)arg,
+ sizeof(struct hfi1_base_info));
break;
- case HFI1_CMD_CREDIT_UPD:
+ case HFI1_IOCTL_CREDIT_UPD:
if (uctxt && uctxt->sc)
sc_return_credits(uctxt->sc);
break;
- case HFI1_CMD_TID_UPDATE:
+
+ case HFI1_IOCTL_TID_UPDATE:
+ if (copy_from_user(&tinfo,
+ (struct hfi11_tid_info __user *)arg,
+ sizeof(tinfo)))
+ return -EFAULT;
+
ret = hfi1_user_exp_rcv_setup(fp, &tinfo);
if (!ret) {
/*
@@ -305,57 +243,82 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
* These fields are adjacent in the structure so
* we can copy them at the same time.
*/
- addr = (unsigned long)cmd.addr +
- offsetof(struct hfi1_tid_info, tidcnt);
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt) +
sizeof(tinfo.length)))
ret = -EFAULT;
}
break;
- case HFI1_CMD_TID_INVAL_READ:
- ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
+
+ case HFI1_IOCTL_TID_FREE:
+ if (copy_from_user(&tinfo,
+ (struct hfi11_tid_info __user *)arg,
+ sizeof(tinfo)))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
if (ret)
break;
- addr = (unsigned long)cmd.addr +
- offsetof(struct hfi1_tid_info, tidcnt);
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
ret = -EFAULT;
break;
- case HFI1_CMD_TID_FREE:
- ret = hfi1_user_exp_rcv_clear(fp, &tinfo);
+
+ case HFI1_IOCTL_TID_INVAL_READ:
+ if (copy_from_user(&tinfo,
+ (struct hfi11_tid_info __user *)arg,
+ sizeof(tinfo)))
+ return -EFAULT;
+
+ ret = hfi1_user_exp_rcv_invalid(fp, &tinfo);
if (ret)
break;
- addr = (unsigned long)cmd.addr +
- offsetof(struct hfi1_tid_info, tidcnt);
+ addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
ret = -EFAULT;
break;
- case HFI1_CMD_RECV_CTRL:
- ret = manage_rcvq(uctxt, fd->subctxt, (int)user_val);
+
+ case HFI1_IOCTL_RECV_CTRL:
+ ret = get_user(uval, (int __user *)arg);
+ if (ret != 0)
+ return -EFAULT;
+ ret = manage_rcvq(uctxt, fd->subctxt, uval);
break;
- case HFI1_CMD_POLL_TYPE:
- uctxt->poll_type = (typeof(uctxt->poll_type))user_val;
+
+ case HFI1_IOCTL_POLL_TYPE:
+ ret = get_user(uval, (int __user *)arg);
+ if (ret != 0)
+ return -EFAULT;
+ uctxt->poll_type = (typeof(uctxt->poll_type))uval;
break;
- case HFI1_CMD_ACK_EVENT:
- ret = user_event_ack(uctxt, fd->subctxt, user_val);
+
+ case HFI1_IOCTL_ACK_EVENT:
+ ret = get_user(ul_uval, (unsigned long __user *)arg);
+ if (ret != 0)
+ return -EFAULT;
+ ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
break;
- case HFI1_CMD_SET_PKEY:
+
+ case HFI1_IOCTL_SET_PKEY:
+ ret = get_user(uval16, (u16 __user *)arg);
+ if (ret != 0)
+ return -EFAULT;
if (HFI1_CAP_IS_USET(PKEY_CHECK))
- ret = set_ctxt_pkey(uctxt, fd->subctxt, user_val);
+ ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
else
- ret = -EPERM;
+ return -EPERM;
break;
- case HFI1_CMD_CTXT_RESET: {
+
+ case HFI1_IOCTL_CTXT_RESET: {
struct send_context *sc;
struct hfi1_devdata *dd;
- if (!uctxt || !uctxt->dd || !uctxt->sc) {
- ret = -EINVAL;
- break;
- }
+ if (!uctxt || !uctxt->dd || !uctxt->sc)
+ return -EINVAL;
+
/*
* There is no protection here. User level has to
* guarantee that no one will be writing to the send
@@ -373,10 +336,9 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
wait_event_interruptible_timeout(
sc->halt_wait, (sc->flags & SCF_HALTED),
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
- if (!(sc->flags & SCF_HALTED)) {
- ret = -ENOLCK;
- break;
- }
+ if (!(sc->flags & SCF_HALTED))
+ return -ENOLCK;
+
/*
* If the send context was halted due to a Freeze,
* wait until the device has been "unfrozen" before
@@ -387,18 +349,16 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
dd->event_queue,
!(ACCESS_ONCE(dd->flags) & HFI1_FROZEN),
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
- if (dd->flags & HFI1_FROZEN) {
- ret = -ENOLCK;
- break;
- }
- if (dd->flags & HFI1_FORCED_FREEZE) {
+ if (dd->flags & HFI1_FROZEN)
+ return -ENOLCK;
+
+ if (dd->flags & HFI1_FORCED_FREEZE)
/*
* Don't allow context reset if we are into
* forced freeze
*/
- ret = -ENODEV;
- break;
- }
+ return -ENODEV;
+
sc_disable(sc);
ret = sc_enable(sc);
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB,
@@ -410,18 +370,17 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
sc_return_credits(sc);
break;
}
- case HFI1_CMD_EP_INFO:
- case HFI1_CMD_EP_ERASE_CHIP:
- case HFI1_CMD_EP_ERASE_RANGE:
- case HFI1_CMD_EP_READ_RANGE:
- case HFI1_CMD_EP_WRITE_RANGE:
- ret = handle_eprom_command(fp, &cmd);
+
+ case HFI1_IOCTL_GET_VERS:
+ uval = HFI1_USER_SWVERSION;
+ if (put_user(uval, (int __user *)arg))
+ return -EFAULT;
break;
+
+ default:
+ return -EINVAL;
}
- if (ret >= 0)
- ret = consumed;
-bail:
return ret;
}
@@ -738,7 +697,9 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
{
struct hfi1_filedata *fdata = fp->private_data;
struct hfi1_ctxtdata *uctxt = fdata->uctxt;
- struct hfi1_devdata *dd;
+ struct hfi1_devdata *dd = container_of(inode->i_cdev,
+ struct hfi1_devdata,
+ user_cdev);
unsigned long flags, *ev;
fp->private_data = NULL;
@@ -747,7 +708,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
goto done;
hfi1_cdbg(PROC, "freeing ctxt %u:%u", uctxt->ctxt, fdata->subctxt);
- dd = uctxt->dd;
mutex_lock(&hfi1_mutex);
flush_wc();
@@ -813,6 +773,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
mutex_unlock(&hfi1_mutex);
hfi1_free_ctxtdata(dd, uctxt);
done:
+ kobject_put(&dd->kobj);
kfree(fdata);
return 0;
}
@@ -836,7 +797,7 @@ static u64 kvirt_to_phys(void *addr)
static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
{
int i_minor, ret = 0;
- unsigned swmajor, swminor, alg = HFI1_ALG_ACROSS;
+ unsigned int swmajor, swminor;
swmajor = uinfo->userversion >> 16;
if (swmajor != HFI1_USER_SWMAJOR) {
@@ -846,9 +807,6 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
swminor = uinfo->userversion & 0xffff;
- if (uinfo->hfi1_alg < HFI1_ALG_COUNT)
- alg = uinfo->hfi1_alg;
-
mutex_lock(&hfi1_mutex);
/* First, lets check if we need to setup a shared context? */
if (uinfo->subctxt_cnt) {
@@ -868,7 +826,7 @@ static int assign_ctxt(struct file *fp, struct hfi1_user_info *uinfo)
*/
if (!ret) {
i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
- ret = get_user_context(fp, uinfo, i_minor - 1, alg);
+ ret = get_user_context(fp, uinfo, i_minor);
}
done_unlock:
mutex_unlock(&hfi1_mutex);
@@ -876,71 +834,26 @@ done:
return ret;
}
-/* return true if the device available for general use */
-static int usable_device(struct hfi1_devdata *dd)
-{
- struct hfi1_pportdata *ppd = dd->pport;
-
- return driver_lstate(ppd) == IB_PORT_ACTIVE;
-}
-
static int get_user_context(struct file *fp, struct hfi1_user_info *uinfo,
- int devno, unsigned alg)
+ int devno)
{
struct hfi1_devdata *dd = NULL;
- int ret = 0, devmax, npresent, nup, dev;
+ int devmax, npresent, nup;
devmax = hfi1_count_units(&npresent, &nup);
- if (!npresent) {
- ret = -ENXIO;
- goto done;
- }
- if (!nup) {
- ret = -ENETDOWN;
- goto done;
- }
- if (devno >= 0) {
- dd = hfi1_lookup(devno);
- if (!dd)
- ret = -ENODEV;
- else if (!dd->freectxts)
- ret = -EBUSY;
- } else {
- struct hfi1_devdata *pdd;
-
- if (alg == HFI1_ALG_ACROSS) {
- unsigned free = 0U;
-
- for (dev = 0; dev < devmax; dev++) {
- pdd = hfi1_lookup(dev);
- if (!pdd)
- continue;
- if (!usable_device(pdd))
- continue;
- if (pdd->freectxts &&
- pdd->freectxts > free) {
- dd = pdd;
- free = pdd->freectxts;
- }
- }
- } else {
- for (dev = 0; dev < devmax; dev++) {
- pdd = hfi1_lookup(dev);
- if (!pdd)
- continue;
- if (!usable_device(pdd))
- continue;
- if (pdd->freectxts) {
- dd = pdd;
- break;
- }
- }
- }
- if (!dd)
- ret = -EBUSY;
- }
-done:
- return ret ? ret : allocate_ctxt(fp, dd, uinfo);
+ if (!npresent)
+ return -ENXIO;
+
+ if (!nup)
+ return -ENETDOWN;
+
+ dd = hfi1_lookup(devno);
+ if (!dd)
+ return -ENODEV;
+ else if (!dd->freectxts)
+ return -EBUSY;
+
+ return allocate_ctxt(fp, dd, uinfo);
}
static int find_shared_ctxt(struct file *fp,
@@ -1546,170 +1459,10 @@ done:
return ret;
}
-static int ui_open(struct inode *inode, struct file *filp)
-{
- struct hfi1_devdata *dd;
-
- dd = container_of(inode->i_cdev, struct hfi1_devdata, ui_cdev);
- filp->private_data = dd; /* for other methods */
- return 0;
-}
-
-static int ui_release(struct inode *inode, struct file *filp)
-{
- /* nothing to do */
- return 0;
-}
-
-static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
-{
- struct hfi1_devdata *dd = filp->private_data;
-
- return fixed_size_llseek(filp, offset, whence,
- (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
-}
-
-/* NOTE: assumes unsigned long is 8 bytes */
-static ssize_t ui_read(struct file *filp, char __user *buf, size_t count,
- loff_t *f_pos)
-{
- struct hfi1_devdata *dd = filp->private_data;
- void __iomem *base = dd->kregbase;
- unsigned long total, csr_off,
- barlen = (dd->kregend - dd->kregbase);
- u64 data;
-
- /* only read 8 byte quantities */
- if ((count % 8) != 0)
- return -EINVAL;
- /* offset must be 8-byte aligned */
- if ((*f_pos % 8) != 0)
- return -EINVAL;
- /* destination buffer must be 8-byte aligned */
- if ((unsigned long)buf % 8 != 0)
- return -EINVAL;
- /* must be in range */
- if (*f_pos + count > (barlen + DC8051_DATA_MEM_SIZE))
- return -EINVAL;
- /* only set the base if we are not starting past the BAR */
- if (*f_pos < barlen)
- base += *f_pos;
- csr_off = *f_pos;
- for (total = 0; total < count; total += 8, csr_off += 8) {
- /* accessing LCB CSRs requires more checks */
- if (is_lcb_offset(csr_off)) {
- if (read_lcb_csr(dd, csr_off, (u64 *)&data))
- break; /* failed */
- }
- /*
- * Cannot read ASIC GPIO/QSFP* clear and force CSRs without a
- * false parity error. Avoid the whole issue by not reading
- * them. These registers are defined as having a read value
- * of 0.
- */
- else if (csr_off == ASIC_GPIO_CLEAR ||
- csr_off == ASIC_GPIO_FORCE ||
- csr_off == ASIC_QSFP1_CLEAR ||
- csr_off == ASIC_QSFP1_FORCE ||
- csr_off == ASIC_QSFP2_CLEAR ||
- csr_off == ASIC_QSFP2_FORCE)
- data = 0;
- else if (csr_off >= barlen) {
- /*
- * read_8051_data can read more than just 8 bytes at
- * a time. However, folding this into the loop and
- * handling the reads in 8 byte increments allows us
- * to smoothly transition from chip memory to 8051
- * memory.
- */
- if (read_8051_data(dd,
- (u32)(csr_off - barlen),
- sizeof(data), &data))
- break; /* failed */
- } else
- data = readq(base + total);
- if (put_user(data, (unsigned long __user *)(buf + total)))
- break;
- }
- *f_pos += total;
- return total;
-}
-
-/* NOTE: assumes unsigned long is 8 bytes */
-static ssize_t ui_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *f_pos)
-{
- struct hfi1_devdata *dd = filp->private_data;
- void __iomem *base;
- unsigned long total, data, csr_off;
- int in_lcb;
-
- /* only write 8 byte quantities */
- if ((count % 8) != 0)
- return -EINVAL;
- /* offset must be 8-byte aligned */
- if ((*f_pos % 8) != 0)
- return -EINVAL;
- /* source buffer must be 8-byte aligned */
- if ((unsigned long)buf % 8 != 0)
- return -EINVAL;
- /* must be in range */
- if (*f_pos + count > dd->kregend - dd->kregbase)
- return -EINVAL;
-
- base = (void __iomem *)dd->kregbase + *f_pos;
- csr_off = *f_pos;
- in_lcb = 0;
- for (total = 0; total < count; total += 8, csr_off += 8) {
- if (get_user(data, (unsigned long __user *)(buf + total)))
- break;
- /* accessing LCB CSRs requires a special procedure */
- if (is_lcb_offset(csr_off)) {
- if (!in_lcb) {
- int ret = acquire_lcb_access(dd, 1);
-
- if (ret)
- break;
- in_lcb = 1;
- }
- } else {
- if (in_lcb) {
- release_lcb_access(dd, 1);
- in_lcb = 0;
- }
- }
- writeq(data, base + total);
- }
- if (in_lcb)
- release_lcb_access(dd, 1);
- *f_pos += total;
- return total;
-}
-
-static const struct file_operations ui_file_ops = {
- .owner = THIS_MODULE,
- .llseek = ui_lseek,
- .read = ui_read,
- .write = ui_write,
- .open = ui_open,
- .release = ui_release,
-};
-
-#define UI_OFFSET 192 /* device minor offset for UI devices */
-static int create_ui = 1;
-
-static struct cdev wildcard_cdev;
-static struct device *wildcard_device;
-
-static atomic_t user_count = ATOMIC_INIT(0);
-
static void user_remove(struct hfi1_devdata *dd)
{
- if (atomic_dec_return(&user_count) == 0)
- hfi1_cdev_cleanup(&wildcard_cdev, &wildcard_device);
hfi1_cdev_cleanup(&dd->user_cdev, &dd->user_device);
- hfi1_cdev_cleanup(&dd->ui_cdev, &dd->ui_device);
}
static int user_add(struct hfi1_devdata *dd)
@@ -1717,34 +1470,13 @@ static int user_add(struct hfi1_devdata *dd)
char name[10];
int ret;
- if (atomic_inc_return(&user_count) == 1) {
- ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
- &wildcard_cdev, &wildcard_device,
- true);
- if (ret)
- goto done;
- }
-
snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
- ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
+ ret = hfi1_cdev_init(dd->unit, name, &hfi1_file_ops,
&dd->user_cdev, &dd->user_device,
- true);
+ true, &dd->kobj);
if (ret)
- goto done;
+ user_remove(dd);
- if (create_ui) {
- snprintf(name, sizeof(name),
- "%s_ui%d", class_name(), dd->unit);
- ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
- &dd->ui_cdev, &dd->ui_device,
- false);
- if (ret)
- goto done;
- }
-
- return 0;
-done:
- user_remove(dd);
return ret;
}
@@ -1753,13 +1485,7 @@ done:
*/
int hfi1_device_create(struct hfi1_devdata *dd)
{
- int r, ret;
-
- r = user_add(dd);
- ret = hfi1_diag_add(dd);
- if (r && !ret)
- ret = r;
- return ret;
+ return user_add(dd);
}
/*
@@ -1769,5 +1495,4 @@ int hfi1_device_create(struct hfi1_devdata *dd)
void hfi1_device_remove(struct hfi1_devdata *dd)
{
user_remove(dd);
- hfi1_diag_remove(dd);
}
diff --git a/drivers/staging/rdma/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
index ed680fda611d..ed680fda611d 100644
--- a/drivers/staging/rdma/hfi1/firmware.c
+++ b/drivers/infiniband/hw/hfi1/firmware.c
diff --git a/drivers/staging/rdma/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 7b78d56de7f5..4417a0fd3ef9 100644
--- a/drivers/staging/rdma/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -453,6 +453,7 @@ struct rvt_sge_state;
#define HLS_LINK_COOLDOWN BIT(__HLS_LINK_COOLDOWN_BP)
#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
+#define HLS_DOWN ~(HLS_UP)
/* use this MTU size if none other is given */
#define HFI1_DEFAULT_ACTIVE_MTU 10240
@@ -1168,6 +1169,7 @@ struct hfi1_devdata {
atomic_t aspm_disabled_cnt;
struct hfi1_affinity *affinity;
+ struct kobject kobj;
};
/* 8051 firmware version helper */
@@ -1882,9 +1884,8 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
get_unit_name((dd)->unit), ##__VA_ARGS__)
#define hfi1_dev_porterr(dd, port, fmt, ...) \
- dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
- get_unit_name((dd)->unit), (dd)->unit, (port), \
- ##__VA_ARGS__)
+ dev_err(&(dd)->pcidev->dev, "%s: port %u: " fmt, \
+ get_unit_name((dd)->unit), (port), ##__VA_ARGS__)
/*
* this is used for formatting hw error messages...
diff --git a/drivers/staging/rdma/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 502b7cf4647d..5cc492e5776d 100644
--- a/drivers/staging/rdma/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -732,12 +732,12 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
lastfail = hfi1_create_rcvhdrq(dd, rcd);
if (!lastfail)
lastfail = hfi1_setup_eagerbufs(rcd);
- if (lastfail)
+ if (lastfail) {
dd_dev_err(dd,
"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
+ ret = lastfail;
+ }
}
- if (lastfail)
- ret = lastfail;
/* Allocate enough memory for user event notification. */
len = PAGE_ALIGN(dd->chip_rcv_contexts * HFI1_MAX_SHARED_CTXTS *
@@ -989,8 +989,10 @@ static void release_asic_data(struct hfi1_devdata *dd)
dd->asic_data = NULL;
}
-void hfi1_free_devdata(struct hfi1_devdata *dd)
+static void __hfi1_free_devdata(struct kobject *kobj)
{
+ struct hfi1_devdata *dd =
+ container_of(kobj, struct hfi1_devdata, kobj);
unsigned long flags;
spin_lock_irqsave(&hfi1_devs_lock, flags);
@@ -1007,6 +1009,15 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
rvt_dealloc_device(&dd->verbs_dev.rdi);
}
+static struct kobj_type hfi1_devdata_type = {
+ .release = __hfi1_free_devdata,
+};
+
+void hfi1_free_devdata(struct hfi1_devdata *dd)
+{
+ kobject_put(&dd->kobj);
+}
+
/*
* Allocate our primary per-unit data structure. Must be done via verbs
* allocator, because the verbs cleanup process both does cleanup and
@@ -1102,6 +1113,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
&pdev->dev,
"Could not alloc cpulist info, cpu affinity might be wrong\n");
}
+ kobject_init(&dd->kobj, &hfi1_devdata_type);
return dd;
bail:
@@ -1300,7 +1312,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd)
spin_lock(&ppd->cc_state_lock);
cc_state = get_cc_state(ppd);
- rcu_assign_pointer(ppd->cc_state, NULL);
+ RCU_INIT_POINTER(ppd->cc_state, NULL);
spin_unlock(&ppd->cc_state_lock);
if (cc_state)
diff --git a/drivers/staging/rdma/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c
index 65348d16ab2f..65348d16ab2f 100644
--- a/drivers/staging/rdma/hfi1/intr.c
+++ b/drivers/infiniband/hw/hfi1/intr.c
diff --git a/drivers/staging/rdma/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 2ec6ef38d389..2ec6ef38d389 100644
--- a/drivers/staging/rdma/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index ed58cf21e790..219029576ba0 100644
--- a/drivers/staging/rdma/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1403,6 +1403,12 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
if (key == okey)
continue;
/*
+ * Don't update pkeys[2], if an HFI port without MgmtAllowed
+ * by neighbor is a switch.
+ */
+ if (i == 2 && !ppd->mgmt_allowed && ppd->neighbor_type == 1)
+ continue;
+ /*
* The SM gives us the complete PKey table. We have
* to ensure that we put the PKeys in the matching
* slots.
@@ -3363,6 +3369,50 @@ static int __subn_get_opa_cong_setting(struct opa_smp *smp, u32 am,
return reply((struct ib_mad_hdr *)smp);
}
+/*
+ * Apply congestion control information stored in the ppd to the
+ * active structure.
+ */
+static void apply_cc_state(struct hfi1_pportdata *ppd)
+{
+ struct cc_state *old_cc_state, *new_cc_state;
+
+ new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
+ if (!new_cc_state)
+ return;
+
+ /*
+ * Hold the lock for updating *and* to prevent ppd information
+ * from changing during the update.
+ */
+ spin_lock(&ppd->cc_state_lock);
+
+ old_cc_state = get_cc_state(ppd);
+ if (!old_cc_state) {
+ /* never active, or shutting down */
+ spin_unlock(&ppd->cc_state_lock);
+ kfree(new_cc_state);
+ return;
+ }
+
+ *new_cc_state = *old_cc_state;
+
+ new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
+ memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
+ ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
+
+ new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
+ new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
+ memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
+ OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
+
+ rcu_assign_pointer(ppd->cc_state, new_cc_state);
+
+ spin_unlock(&ppd->cc_state_lock);
+
+ call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+}
+
static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
struct ib_device *ibdev, u8 port,
u32 *resp_len)
@@ -3374,6 +3424,11 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
struct opa_congestion_setting_entry_shadow *entries;
int i;
+ /*
+ * Save details from packet into the ppd. Hold the cc_state_lock so
+ * our information is consistent with anyone trying to apply the state.
+ */
+ spin_lock(&ppd->cc_state_lock);
ppd->cc_sl_control_map = be32_to_cpu(p->control_map);
entries = ppd->congestion_entries;
@@ -3384,6 +3439,10 @@ static int __subn_set_opa_cong_setting(struct opa_smp *smp, u32 am, u8 *data,
p->entries[i].trigger_threshold;
entries[i].ccti_min = p->entries[i].ccti_min;
}
+ spin_unlock(&ppd->cc_state_lock);
+
+ /* now apply the information */
+ apply_cc_state(ppd);
return __subn_get_opa_cong_setting(smp, am, data, ibdev, port,
resp_len);
@@ -3526,7 +3585,6 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
int i, j;
u32 sentry, eentry;
u16 ccti_limit;
- struct cc_state *old_cc_state, *new_cc_state;
/* sanity check n_blocks, start_block */
if (n_blocks == 0 ||
@@ -3546,45 +3604,20 @@ static int __subn_set_opa_cc_table(struct opa_smp *smp, u32 am, u8 *data,
return reply((struct ib_mad_hdr *)smp);
}
- new_cc_state = kzalloc(sizeof(*new_cc_state), GFP_KERNEL);
- if (!new_cc_state)
- goto getit;
-
+ /*
+ * Save details from packet into the ppd. Hold the cc_state_lock so
+ * our information is consistent with anyone trying to apply the state.
+ */
spin_lock(&ppd->cc_state_lock);
-
- old_cc_state = get_cc_state(ppd);
-
- if (!old_cc_state) {
- spin_unlock(&ppd->cc_state_lock);
- kfree(new_cc_state);
- return reply((struct ib_mad_hdr *)smp);
- }
-
- *new_cc_state = *old_cc_state;
-
- new_cc_state->cct.ccti_limit = ccti_limit;
-
- entries = ppd->ccti_entries;
ppd->total_cct_entry = ccti_limit + 1;
-
+ entries = ppd->ccti_entries;
for (j = 0, i = sentry; i < eentry; j++, i++)
entries[i].entry = be16_to_cpu(p->ccti_entries[j].entry);
-
- memcpy(new_cc_state->cct.entries, entries,
- eentry * sizeof(struct ib_cc_table_entry));
-
- new_cc_state->cong_setting.port_control = IB_CC_CCS_PC_SL_BASED;
- new_cc_state->cong_setting.control_map = ppd->cc_sl_control_map;
- memcpy(new_cc_state->cong_setting.entries, ppd->congestion_entries,
- OPA_MAX_SLS * sizeof(struct opa_congestion_setting_entry));
-
- rcu_assign_pointer(ppd->cc_state, new_cc_state);
-
spin_unlock(&ppd->cc_state_lock);
- call_rcu(&old_cc_state->rcu, cc_state_reclaim);
+ /* now apply the information */
+ apply_cc_state(ppd);
-getit:
return __subn_get_opa_cc_table(smp, am, data, ibdev, port, resp_len);
}
diff --git a/drivers/staging/rdma/hfi1/mad.h b/drivers/infiniband/hw/hfi1/mad.h
index 55ee08675333..55ee08675333 100644
--- a/drivers/staging/rdma/hfi1/mad.h
+++ b/drivers/infiniband/hw/hfi1/mad.h
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
index 2b0e91d3093d..b7a80aa1ae30 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
@@ -45,6 +45,7 @@
*
*/
#include <linux/list.h>
+#include <linux/rculist.h>
#include <linux/mmu_notifier.h>
#include <linux/interval_tree_generic.h>
@@ -97,7 +98,6 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
{
struct mmu_rb_handler *handlr;
- unsigned long flags;
if (!ops->invalidate)
return -EINVAL;
@@ -111,9 +111,9 @@ int hfi1_mmu_rb_register(struct rb_root *root, struct mmu_rb_ops *ops)
INIT_HLIST_NODE(&handlr->mn.hlist);
spin_lock_init(&handlr->lock);
handlr->mn.ops = &mn_opts;
- spin_lock_irqsave(&mmu_rb_lock, flags);
- list_add_tail(&handlr->list, &mmu_rb_handlers);
- spin_unlock_irqrestore(&mmu_rb_lock, flags);
+ spin_lock(&mmu_rb_lock);
+ list_add_tail_rcu(&handlr->list, &mmu_rb_handlers);
+ spin_unlock(&mmu_rb_lock);
return mmu_notifier_register(&handlr->mn, current->mm);
}
@@ -130,9 +130,10 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
if (current->mm)
mmu_notifier_unregister(&handler->mn, current->mm);
- spin_lock_irqsave(&mmu_rb_lock, flags);
- list_del(&handler->list);
- spin_unlock_irqrestore(&mmu_rb_lock, flags);
+ spin_lock(&mmu_rb_lock);
+ list_del_rcu(&handler->list);
+ spin_unlock(&mmu_rb_lock);
+ synchronize_rcu();
spin_lock_irqsave(&handler->lock, flags);
if (!RB_EMPTY_ROOT(root)) {
@@ -271,16 +272,15 @@ void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
{
struct mmu_rb_handler *handler;
- unsigned long flags;
- spin_lock_irqsave(&mmu_rb_lock, flags);
- list_for_each_entry(handler, &mmu_rb_handlers, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(handler, &mmu_rb_handlers, list) {
if (handler->root == root)
goto unlock;
}
handler = NULL;
unlock:
- spin_unlock_irqrestore(&mmu_rb_lock, flags);
+ rcu_read_unlock();
return handler;
}
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/infiniband/hw/hfi1/mmu_rb.h
index 7a57b9c49d27..7a57b9c49d27 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/infiniband/hw/hfi1/mmu_rb.h
diff --git a/drivers/staging/rdma/hfi1/opa_compat.h b/drivers/infiniband/hw/hfi1/opa_compat.h
index 6ef3c1cbdcd7..6ef3c1cbdcd7 100644
--- a/drivers/staging/rdma/hfi1/opa_compat.h
+++ b/drivers/infiniband/hw/hfi1/opa_compat.h
diff --git a/drivers/staging/rdma/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 0bac21e6a658..0bac21e6a658 100644
--- a/drivers/staging/rdma/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
diff --git a/drivers/staging/rdma/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index c67b9ad3fcf4..d5edb1afbb8f 100644
--- a/drivers/staging/rdma/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -1835,8 +1835,7 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
struct pio_vl_map *oldmap, *newmap;
if (!vl_scontexts) {
- /* send context 0 reserved for VL15 */
- for (i = 1; i < dd->num_send_contexts; i++)
+ for (i = 0; i < dd->num_send_contexts; i++)
if (dd->send_contexts[i].type == SC_KERNEL)
num_kernel_send_contexts++;
/* truncate divide */
diff --git a/drivers/staging/rdma/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 53a08edb7f64..464cbd27b975 100644
--- a/drivers/staging/rdma/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -49,10 +49,10 @@
/* send context types */
#define SC_KERNEL 0
-#define SC_ACK 1
-#define SC_USER 2
-#define SC_VL15 3
-#define SC_MAX 4
+#define SC_VL15 1
+#define SC_ACK 2
+#define SC_USER 3 /* must be the last one: it may take all left */
+#define SC_MAX 4 /* count of send context types */
/* invalid send context index */
#define INVALID_SCI 0xff
diff --git a/drivers/staging/rdma/hfi1/pio_copy.c b/drivers/infiniband/hw/hfi1/pio_copy.c
index 8c25e1b58849..8c25e1b58849 100644
--- a/drivers/staging/rdma/hfi1/pio_copy.c
+++ b/drivers/infiniband/hw/hfi1/pio_copy.c
diff --git a/drivers/staging/rdma/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index 8fe8a205b5bb..03df9322f862 100644
--- a/drivers/staging/rdma/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -87,6 +87,17 @@ void free_platform_config(struct hfi1_devdata *dd)
*/
}
+void get_port_type(struct hfi1_pportdata *ppd)
+{
+ int ret;
+
+ ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
+ PORT_TABLE_PORT_TYPE, &ppd->port_type,
+ 4);
+ if (ret)
+ ppd->port_type = PORT_TYPE_UNKNOWN;
+}
+
int set_qsfp_tx(struct hfi1_pportdata *ppd, int on)
{
u8 tx_ctrl_byte = on ? 0x0 : 0xF;
@@ -529,7 +540,8 @@ static void apply_tunings(
/* Enable external device config if channel is limiting active */
read_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
GENERAL_CONFIG, &config_data);
- config_data |= limiting_active;
+ config_data &= ~(0xff << ENABLE_EXT_DEV_CONFIG_SHIFT);
+ config_data |= ((u32)limiting_active << ENABLE_EXT_DEV_CONFIG_SHIFT);
ret = load_8051_config(ppd->dd, LINK_OPTIMIZATION_SETTINGS,
GENERAL_CONFIG, config_data);
if (ret != HCMD_SUCCESS)
@@ -542,7 +554,8 @@ static void apply_tunings(
/* Pass tuning method to 8051 */
read_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
&config_data);
- config_data |= tuning_method;
+ config_data &= ~(0xff << TUNING_METHOD_SHIFT);
+ config_data |= ((u32)tuning_method << TUNING_METHOD_SHIFT);
ret = load_8051_config(ppd->dd, LINK_TUNING_PARAMETERS, GENERAL_CONFIG,
config_data);
if (ret != HCMD_SUCCESS)
@@ -564,8 +577,8 @@ static void apply_tunings(
ret = read_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
GENERAL_CONFIG, &config_data);
/* Clear, then set the external device config field */
- config_data &= ~(0xFF << 24);
- config_data |= (external_device_config << 24);
+ config_data &= ~(u32)0xFF;
+ config_data |= external_device_config;
ret = load_8051_config(ppd->dd, DC_HOST_COMM_SETTINGS,
GENERAL_CONFIG, config_data);
if (ret != HCMD_SUCCESS)
@@ -784,12 +797,6 @@ void tune_serdes(struct hfi1_pportdata *ppd)
return;
}
- ret = get_platform_config_field(ppd->dd, PLATFORM_CONFIG_PORT_TABLE, 0,
- PORT_TABLE_PORT_TYPE, &ppd->port_type,
- 4);
- if (ret)
- ppd->port_type = PORT_TYPE_UNKNOWN;
-
switch (ppd->port_type) {
case PORT_TYPE_DISCONNECTED:
ppd->offline_disabled_reason =
diff --git a/drivers/staging/rdma/hfi1/platform.h b/drivers/infiniband/hw/hfi1/platform.h
index 19620cf546d5..e2c21613c326 100644
--- a/drivers/staging/rdma/hfi1/platform.h
+++ b/drivers/infiniband/hw/hfi1/platform.h
@@ -298,6 +298,7 @@ enum link_tuning_encoding {
/* platform.c */
void get_platform_config(struct hfi1_devdata *dd);
void free_platform_config(struct hfi1_devdata *dd);
+void get_port_type(struct hfi1_pportdata *ppd);
int set_qsfp_tx(struct hfi1_pportdata *ppd, int on);
void tune_serdes(struct hfi1_pportdata *ppd);
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 91eb42316df9..1a942ffba4cb 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -49,7 +49,6 @@
#include <linux/vmalloc.h>
#include <linux/hash.h>
#include <linux/module.h>
-#include <linux/random.h>
#include <linux/seq_file.h>
#include <rdma/rdma_vt.h>
#include <rdma/rdmavt_qp.h>
@@ -161,9 +160,6 @@ static inline int opa_mtu_enum_to_int(int mtu)
* This function is what we would push to the core layer if we wanted to be a
* "first class citizen". Instead we hide this here and rely on Verbs ULPs
* to blindly pass the MTU enum value from the PathRecord to us.
- *
- * The actual flag used to determine "8k MTU" will change and is currently
- * unknown.
*/
static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
@@ -516,6 +512,7 @@ static void iowait_wakeup(struct iowait *wait, int reason)
static void iowait_sdma_drained(struct iowait *wait)
{
struct rvt_qp *qp = iowait_to_qp(wait);
+ unsigned long flags;
/*
* This happens when the send engine notes
@@ -523,12 +520,12 @@ static void iowait_sdma_drained(struct iowait *wait)
* do the flush work until that QP's
* sdma work has finished.
*/
- spin_lock(&qp->s_lock);
+ spin_lock_irqsave(&qp->s_lock, flags);
if (qp->s_flags & RVT_S_WAIT_DMA) {
qp->s_flags &= ~RVT_S_WAIT_DMA;
hfi1_schedule_send(qp);
}
- spin_unlock(&qp->s_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
}
/**
diff --git a/drivers/staging/rdma/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index e7bc8d6cf681..e7bc8d6cf681 100644
--- a/drivers/staging/rdma/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
diff --git a/drivers/staging/rdma/hfi1/qsfp.c b/drivers/infiniband/hw/hfi1/qsfp.c
index 2441669f0817..2441669f0817 100644
--- a/drivers/staging/rdma/hfi1/qsfp.c
+++ b/drivers/infiniband/hw/hfi1/qsfp.c
diff --git a/drivers/staging/rdma/hfi1/qsfp.h b/drivers/infiniband/hw/hfi1/qsfp.h
index dadc66c442b9..dadc66c442b9 100644
--- a/drivers/staging/rdma/hfi1/qsfp.h
+++ b/drivers/infiniband/hw/hfi1/qsfp.h
diff --git a/drivers/staging/rdma/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 792f15eb8efe..792f15eb8efe 100644
--- a/drivers/staging/rdma/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
diff --git a/drivers/staging/rdma/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index a659aec3c3c6..a659aec3c3c6 100644
--- a/drivers/staging/rdma/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index abb8ebc1fcac..f9befc05b349 100644
--- a/drivers/staging/rdma/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -134,6 +134,7 @@ static const char * const sdma_state_names[] = {
[sdma_state_s99_running] = "s99_Running",
};
+#ifdef CONFIG_SDMA_VERBOSITY
static const char * const sdma_event_names[] = {
[sdma_event_e00_go_hw_down] = "e00_GoHwDown",
[sdma_event_e10_go_hw_start] = "e10_GoHwStart",
@@ -150,6 +151,7 @@ static const char * const sdma_event_names[] = {
[sdma_event_e85_link_down] = "e85_LinkDown",
[sdma_event_e90_sw_halted] = "e90_SwHalted",
};
+#endif
static const struct sdma_set_state_action sdma_action_table[] = {
[sdma_state_s00_hw_down] = {
@@ -376,7 +378,7 @@ static inline void complete_tx(struct sdma_engine *sde,
sdma_txclean(sde->dd, tx);
if (complete)
(*complete)(tx, res);
- if (iowait_sdma_dec(wait) && wait)
+ if (wait && iowait_sdma_dec(wait))
iowait_drain_wakeup(wait);
}
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 8f50c99fe711..8f50c99fe711 100644
--- a/drivers/staging/rdma/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
diff --git a/drivers/staging/rdma/hfi1/sdma_txreq.h b/drivers/infiniband/hw/hfi1/sdma_txreq.h
index bf7d777d756e..bf7d777d756e 100644
--- a/drivers/staging/rdma/hfi1/sdma_txreq.h
+++ b/drivers/infiniband/hw/hfi1/sdma_txreq.h
diff --git a/drivers/staging/rdma/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 8cd6df8634ad..91fc2aed6aed 100644
--- a/drivers/staging/rdma/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -721,8 +721,8 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
}
dd_dev_info(dd,
- "IB%u: Congestion Control Agent enabled for port %d\n",
- dd->unit, port_num);
+ "Congestion Control Agent enabled for port %d\n",
+ port_num);
return 0;
diff --git a/drivers/staging/rdma/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c
index 8b62fefcf903..79b2952c0dfb 100644
--- a/drivers/staging/rdma/hfi1/trace.c
+++ b/drivers/infiniband/hw/hfi1/trace.c
@@ -66,6 +66,7 @@ u8 ibhdr_exhdr_len(struct hfi1_ib_header *hdr)
#define RETH_PRN "reth vaddr 0x%.16llx rkey 0x%.8x dlen 0x%.8x"
#define AETH_PRN "aeth syn 0x%.2x %s msn 0x%.8x"
#define DETH_PRN "deth qkey 0x%.8x sqpn 0x%.6x"
+#define IETH_PRN "ieth rkey 0x%.8x"
#define ATOMICACKETH_PRN "origdata %lld"
#define ATOMICETH_PRN "vaddr 0x%llx rkey 0x%.8x sdata %lld cdata %lld"
@@ -166,6 +167,12 @@ const char *parse_everbs_hdrs(
be32_to_cpu(eh->ud.deth[0]),
be32_to_cpu(eh->ud.deth[1]) & RVT_QPN_MASK);
break;
+ /* ieth */
+ case OP(RC, SEND_LAST_WITH_INVALIDATE):
+ case OP(RC, SEND_ONLY_WITH_INVALIDATE):
+ trace_seq_printf(p, IETH_PRN,
+ be32_to_cpu(eh->ieth));
+ break;
}
trace_seq_putc(p, 0);
return ret;
@@ -233,3 +240,4 @@ __hfi1_trace_fn(FIRMWARE);
__hfi1_trace_fn(RCVCTRL);
__hfi1_trace_fn(TID);
__hfi1_trace_fn(MMU);
+__hfi1_trace_fn(IOCTL);
diff --git a/drivers/staging/rdma/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 963dc948c38a..28c1d0832886 100644
--- a/drivers/staging/rdma/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -74,8 +74,8 @@ __print_symbolic(etype, \
TRACE_EVENT(hfi1_rcvhdr,
TP_PROTO(struct hfi1_devdata *dd,
- u64 eflags,
u32 ctxt,
+ u64 eflags,
u32 etype,
u32 hlen,
u32 tlen,
@@ -392,6 +392,8 @@ __print_symbolic(opcode, \
ib_opcode_name(RC_ATOMIC_ACKNOWLEDGE), \
ib_opcode_name(RC_COMPARE_SWAP), \
ib_opcode_name(RC_FETCH_ADD), \
+ ib_opcode_name(RC_SEND_LAST_WITH_INVALIDATE), \
+ ib_opcode_name(RC_SEND_ONLY_WITH_INVALIDATE), \
ib_opcode_name(UC_SEND_FIRST), \
ib_opcode_name(UC_SEND_MIDDLE), \
ib_opcode_name(UC_SEND_LAST), \
@@ -1341,6 +1343,7 @@ __hfi1_trace_def(FIRMWARE);
__hfi1_trace_def(RCVCTRL);
__hfi1_trace_def(TID);
__hfi1_trace_def(MMU);
+__hfi1_trace_def(IOCTL);
#define hfi1_cdbg(which, fmt, ...) \
__hfi1_trace_##which(__func__, fmt, ##__VA_ARGS__)
diff --git a/drivers/staging/rdma/hfi1/twsi.c b/drivers/infiniband/hw/hfi1/twsi.c
index e82e52a63d35..e82e52a63d35 100644
--- a/drivers/staging/rdma/hfi1/twsi.c
+++ b/drivers/infiniband/hw/hfi1/twsi.c
diff --git a/drivers/staging/rdma/hfi1/twsi.h b/drivers/infiniband/hw/hfi1/twsi.h
index 5b8a5b5e7eae..5b8a5b5e7eae 100644
--- a/drivers/staging/rdma/hfi1/twsi.h
+++ b/drivers/infiniband/hw/hfi1/twsi.h
diff --git a/drivers/staging/rdma/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index df773d433297..df773d433297 100644
--- a/drivers/staging/rdma/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 1e503ad0bebb..1e503ad0bebb 100644
--- a/drivers/staging/rdma/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 1b640a35b3fe..1b640a35b3fe 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.h b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
index 9bc8d9fba87e..9bc8d9fba87e 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.h
diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 88e10b5f55f1..88e10b5f55f1 100644
--- a/drivers/staging/rdma/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 0014c9c0e967..29f4795f866c 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -166,6 +166,8 @@ static unsigned initial_pkt_count = 8;
#define SDMA_IOWAIT_TIMEOUT 1000 /* in milliseconds */
+struct sdma_mmu_node;
+
struct user_sdma_iovec {
struct list_head list;
struct iovec iov;
@@ -178,6 +180,7 @@ struct user_sdma_iovec {
* which we last left off.
*/
u64 offset;
+ struct sdma_mmu_node *node;
};
#define SDMA_CACHE_NODE_EVICT BIT(0)
@@ -507,6 +510,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
struct sdma_req_info info;
struct user_sdma_request *req;
u8 opcode, sc, vl;
+ int req_queued = 0;
if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) {
hfi1_cdbg(
@@ -703,6 +707,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
atomic_inc(&pq->n_reqs);
+ req_queued = 1;
/* Send the first N packets in the request to buy us some time */
ret = user_sdma_send_pkts(req, pcount);
if (unlikely(ret < 0 && ret != -EBUSY)) {
@@ -747,7 +752,8 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec,
return 0;
free_req:
user_sdma_free_request(req, true);
- pq_update(pq);
+ if (req_queued)
+ pq_update(pq);
set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
return ret;
}
@@ -1153,6 +1159,7 @@ retry:
}
iovec->pages = node->pages;
iovec->npages = npages;
+ iovec->node = node;
ret = hfi1_mmu_rb_insert(&req->pq->sdma_rb_root, &node->rb);
if (ret) {
@@ -1519,18 +1526,13 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
}
if (req->data_iovs) {
struct sdma_mmu_node *node;
- struct mmu_rb_node *mnode;
int i;
for (i = 0; i < req->data_iovs; i++) {
- mnode = hfi1_mmu_rb_search(
- &req->pq->sdma_rb_root,
- (unsigned long)req->iovs[i].iov.iov_base,
- req->iovs[i].iov.iov_len);
- if (!mnode || IS_ERR(mnode))
+ node = req->iovs[i].node;
+ if (!node)
continue;
- node = container_of(mnode, struct sdma_mmu_node, rb);
if (unpin)
hfi1_mmu_rb_remove(&req->pq->sdma_rb_root,
&node->rb);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index b9240e351161..b9240e351161 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 9cdc85fa366f..849c4b9399d4 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -52,7 +52,6 @@
#include <linux/utsname.h>
#include <linux/rculist.h>
#include <linux/mm.h>
-#include <linux/random.h>
#include <linux/vmalloc.h>
#include "hfi.h"
@@ -336,6 +335,8 @@ const u8 hdr_len_by_opcode[256] = {
[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = 12 + 8 + 4,
[IB_OPCODE_RC_COMPARE_SWAP] = 12 + 8 + 28,
[IB_OPCODE_RC_FETCH_ADD] = 12 + 8 + 28,
+ [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = 12 + 8 + 4,
+ [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = 12 + 8 + 4,
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = 12 + 8,
[IB_OPCODE_UC_SEND_MIDDLE] = 12 + 8,
@@ -946,7 +947,6 @@ static int pio_wait(struct rvt_qp *qp,
dev->n_piowait += !!(flag & RVT_S_WAIT_PIO);
dev->n_piodrain += !!(flag & RVT_S_WAIT_PIO_DRAIN);
- dev->n_piowait++;
qp->s_flags |= flag;
was_empty = list_empty(&sc->piowait);
list_add_tail(&priv->s_iowait.list, &sc->piowait);
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index 3ee223983b20..488356775627 100644
--- a/drivers/staging/rdma/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -152,6 +152,7 @@ union ib_ehdrs {
} at;
__be32 imm_data;
__be32 aeth;
+ __be32 ieth;
struct ib_atomic_eth atomic_eth;
} __packed;
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index bc95c4112c61..bc95c4112c61 100644
--- a/drivers/staging/rdma/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
diff --git a/drivers/staging/rdma/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 1cf69b2fe4a5..1cf69b2fe4a5 100644
--- a/drivers/staging/rdma/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 4a740f7a0519..02a735b64208 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -2361,58 +2361,130 @@ static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static const char * const i40iw_hw_stat_names[] = {
+ // 32bit names
+ [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
+ [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
+ [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
+ [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
+ [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
+ [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
+ [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
+ [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
+ [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
+ // 64bit names
+ [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4InOctets",
+ [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4InPkts",
+ [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4InReasmRqd",
+ [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4InMcastPkts",
+ [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4OutOctets",
+ [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4OutPkts",
+ [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4OutSegRqd",
+ [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip4OutMcastPkts",
+ [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6InOctets",
+ [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6InPkts",
+ [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6InReasmRqd",
+ [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6InMcastPkts",
+ [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6OutOctets",
+ [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6OutPkts",
+ [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6OutSegRqd",
+ [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "ip6OutMcastPkts",
+ [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "tcpInSegs",
+ [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] =
+ "tcpOutSegs",
+ [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwInRdmaReads",
+ [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwInRdmaSends",
+ [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwInRdmaWrites",
+ [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwOutRdmaReads",
+ [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwOutRdmaSends",
+ [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwOutRdmaWrites",
+ [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwRdmaBnd",
+ [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] =
+ "iwRdmaInv"
+};
+
/**
- * i40iw_get_protocol_stats - Populates the rdma_stats structure
- * @ibdev: ib dev struct
- * @stats: iw protocol stats struct
+ * i40iw_alloc_hw_stats - Allocate a hw stats structure
+ * @ibdev: device pointer from stack
+ * @port_num: port number
*/
-static int i40iw_get_protocol_stats(struct ib_device *ibdev,
- union rdma_protocol_stats *stats)
+static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ struct i40iw_device *iwdev = to_iwdev(ibdev);
+ struct i40iw_sc_dev *dev = &iwdev->sc_dev;
+ int num_counters = I40IW_HW_STAT_INDEX_MAX_32 +
+ I40IW_HW_STAT_INDEX_MAX_64;
+ unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
+
+ BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) !=
+ (I40IW_HW_STAT_INDEX_MAX_32 +
+ I40IW_HW_STAT_INDEX_MAX_64));
+
+ /*
+ * PFs get the default update lifespan, but VFs only update once
+ * per second
+ */
+ if (!dev->is_pf)
+ lifespan = 1000;
+ return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters,
+ lifespan);
+}
+
+/**
+ * i40iw_get_hw_stats - Populates the rdma_hw_stats structure
+ * @ibdev: device pointer from stack
+ * @stats: stats pointer from stack
+ * @port_num: port number
+ * @index: which hw counter the stack is requesting we update
+ */
+static int i40iw_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port_num, int index)
{
struct i40iw_device *iwdev = to_iwdev(ibdev);
struct i40iw_sc_dev *dev = &iwdev->sc_dev;
struct i40iw_dev_pestat *devstat = &dev->dev_pestat;
struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats;
- struct timespec curr_time;
- static struct timespec last_rd_time = {0, 0};
unsigned long flags;
- curr_time = current_kernel_time();
- memset(stats, 0, sizeof(*stats));
-
if (dev->is_pf) {
spin_lock_irqsave(&devstat->stats_lock, flags);
devstat->ops.iw_hw_stat_read_all(devstat,
&devstat->hw_stats);
spin_unlock_irqrestore(&devstat->stats_lock, flags);
} else {
- if (((u64)curr_time.tv_sec - (u64)last_rd_time.tv_sec) > 1)
- if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
- return -ENOSYS;
+ if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats))
+ return -ENOSYS;
}
- stats->iw.ipInReceives = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] +
- hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXPKTS];
- stats->iw.ipInTruncatedPkts = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] +
- hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC];
- stats->iw.ipInDiscards = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] +
- hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD];
- stats->iw.ipOutNoRoutes = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] +
- hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE];
- stats->iw.ipReasmReqds = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] +
- hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS];
- stats->iw.ipFragCreates = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] +
- hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS];
- stats->iw.ipInMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] +
- hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS];
- stats->iw.ipOutMcastPkts = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] +
- hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_IP6TXMCPKTS];
- stats->iw.tcpOutSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPTXSEG];
- stats->iw.tcpInSegs = hw_stats->stat_value_64[I40IW_HW_STAT_INDEX_TCPRXSEGS];
- stats->iw.tcpRetransSegs = hw_stats->stat_value_32[I40IW_HW_STAT_INDEX_TCPRTXSEG];
-
- last_rd_time = curr_time;
- return 0;
+ memcpy(&stats->value[0], &hw_stats, sizeof(*hw_stats));
+
+ return stats->num_counters;
}
/**
@@ -2551,7 +2623,8 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr;
iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr;
iwibdev->ibdev.dereg_mr = i40iw_dereg_mr;
- iwibdev->ibdev.get_protocol_stats = i40iw_get_protocol_stats;
+ iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats;
+ iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats;
iwibdev->ibdev.query_device = i40iw_query_device;
iwibdev->ibdev.create_ah = i40iw_create_ah;
iwibdev->ibdev.destroy_ah = i40iw_destroy_ah;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 82d7c4bf5970..ce4034071f9c 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1308,21 +1308,6 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
SYM_LSB(IntMask, fldname##17IntMask)), \
.msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
-static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
- INTR_AUTO_P(SDmaInt),
- INTR_AUTO_P(SDmaProgressInt),
- INTR_AUTO_P(SDmaIdleInt),
- INTR_AUTO_P(SDmaCleanupDone),
- INTR_AUTO_C(RcvUrg),
- INTR_AUTO_P(ErrInt),
- INTR_AUTO(ErrInt), /* non-port-specific errs */
- INTR_AUTO(AssertGPIOInt),
- INTR_AUTO_P(SendDoneInt),
- INTR_AUTO(SendBufAvailInt),
- INTR_AUTO_C(RcvAvail),
- { .mask = 0, .sz = 0 }
-};
-
#define TXSYMPTOM_AUTO_P(fldname) \
{ .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
.msg = #fldname, .sz = sizeof(#fldname) }
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 0bd18375d7df..d2ac29861af5 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1172,11 +1172,13 @@ static int pma_get_classportinfo(struct ib_pma_mad *pmp,
* Set the most significant bit of CM2 to indicate support for
* congestion statistics
*/
- p->reserved[0] = dd->psxmitwait_supported << 7;
+ ib_set_cpi_capmask2(p,
+ dd->psxmitwait_supported <<
+ (31 - IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE));
/*
* Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
*/
- p->resp_time_value = 18;
+ ib_set_cpi_resp_time(p, 18);
return reply((struct ib_smp *) pmp);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 6888f03c6d61..4f878151f81f 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -159,6 +159,7 @@ struct qib_other_headers {
} at;
__be32 imm_data;
__be32 aeth;
+ __be32 ieth;
struct ib_atomic_eth atomic_eth;
} u;
} __packed;
diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
index b1ffc8b4a6c0..6ca6fa80dd6e 100644
--- a/drivers/infiniband/sw/rdmavt/cq.c
+++ b/drivers/infiniband/sw/rdmavt/cq.c
@@ -525,6 +525,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
return PTR_ERR(task);
}
+ set_user_nice(task, MIN_NICE);
cpu = cpumask_first(cpumask_of_node(rdi->dparms.node));
kthread_bind(task, cpu);
wake_up_process(task);
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 0ff765bfd619..0f4d4500f45e 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -124,11 +124,13 @@ static int rvt_init_mregion(struct rvt_mregion *mr, struct ib_pd *pd,
int count)
{
int m, i = 0;
+ struct rvt_dev_info *dev = ib_to_rvt(pd->device);
mr->mapsz = 0;
m = (count + RVT_SEGSZ - 1) / RVT_SEGSZ;
for (; i < m; i++) {
- mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
+ mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL,
+ dev->dparms.node);
if (!mr->map[i]) {
rvt_deinit_mregion(mr);
return -ENOMEM;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 0f12c211c385..5fa4d4d81ee0 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -397,6 +397,7 @@ static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
{
unsigned n;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags))
rvt_put_ss(&qp->s_rdma_read_sge);
@@ -431,7 +432,7 @@ static void rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends)
if (qp->ibqp.qp_type != IB_QPT_RC)
return;
- for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
+ for (n = 0; n < rvt_max_atomic(rdi); n++) {
struct rvt_ack_entry *e = &qp->s_ack_queue[n];
if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
@@ -569,7 +570,12 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_ssn = 1;
qp->s_lsn = 0;
qp->s_mig_state = IB_MIG_MIGRATED;
- memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
+ if (qp->s_ack_queue)
+ memset(
+ qp->s_ack_queue,
+ 0,
+ rvt_max_atomic(rdi) *
+ sizeof(*qp->s_ack_queue));
qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0;
qp->s_num_rd_atomic = 0;
@@ -653,9 +659,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
if (gfp == GFP_NOIO)
swq = __vmalloc(
(init_attr->cap.max_send_wr + 1) * sz,
- gfp, PAGE_KERNEL);
+ gfp | __GFP_ZERO, PAGE_KERNEL);
else
- swq = vmalloc_node(
+ swq = vzalloc_node(
(init_attr->cap.max_send_wr + 1) * sz,
rdi->dparms.node);
if (!swq)
@@ -677,6 +683,16 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
goto bail_swq;
RCU_INIT_POINTER(qp->next, NULL);
+ if (init_attr->qp_type == IB_QPT_RC) {
+ qp->s_ack_queue =
+ kzalloc_node(
+ sizeof(*qp->s_ack_queue) *
+ rvt_max_atomic(rdi),
+ gfp,
+ rdi->dparms.node);
+ if (!qp->s_ack_queue)
+ goto bail_qp;
+ }
/*
* Driver needs to set up it's private QP structure and do any
@@ -704,9 +720,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
qp->r_rq.wq = __vmalloc(
sizeof(struct rvt_rwq) +
qp->r_rq.size * sz,
- gfp, PAGE_KERNEL);
+ gfp | __GFP_ZERO, PAGE_KERNEL);
else
- qp->r_rq.wq = vmalloc_node(
+ qp->r_rq.wq = vzalloc_node(
sizeof(struct rvt_rwq) +
qp->r_rq.size * sz,
rdi->dparms.node);
@@ -857,6 +873,7 @@ bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);
bail_qp:
+ kfree(qp->s_ack_queue);
kfree(qp);
bail_swq:
@@ -1284,6 +1301,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp)
vfree(qp->r_rq.wq);
vfree(qp->s_wq);
rdi->driver_f.qp_priv_free(rdi, qp);
+ kfree(qp->s_ack_queue);
kfree(qp);
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index caec8e9c4666..bab7db6fa9ab 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -92,6 +92,8 @@ enum {
IPOIB_FLAG_UMCAST = 10,
IPOIB_STOP_NEIGH_GC = 11,
IPOIB_NEIGH_TBL_FLUSH = 12,
+ IPOIB_FLAG_DEV_ADDR_SET = 13,
+ IPOIB_FLAG_DEV_ADDR_CTRL = 14,
IPOIB_MAX_BACKOFF_SECONDS = 16,
@@ -392,6 +394,7 @@ struct ipoib_dev_priv {
struct ipoib_ethtool_st ethtool;
struct timer_list poll_timer;
unsigned max_send_sge;
+ bool sm_fullmember_sendonly_support;
};
struct ipoib_ah {
@@ -476,6 +479,7 @@ void ipoib_reap_ah(struct work_struct *work);
void ipoib_mark_paths_invalid(struct net_device *dev);
void ipoib_flush_paths(struct net_device *dev);
+int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv);
struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 418e5a1c8744..45c40a17d6a6 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -997,6 +997,106 @@ static inline int update_child_pkey(struct ipoib_dev_priv *priv)
return 0;
}
+/*
+ * returns true if the device address of the ipoib interface has changed and the
+ * new address is a valid one (i.e in the gid table), return false otherwise.
+ */
+static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
+{
+ union ib_gid search_gid;
+ union ib_gid gid0;
+ union ib_gid *netdev_gid;
+ int err;
+ u16 index;
+ u8 port;
+ bool ret = false;
+
+ netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
+ if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL))
+ return false;
+
+ netif_addr_lock(priv->dev);
+
+ /* The subnet prefix may have changed, update it now so we won't have
+ * to do it later
+ */
+ priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+ netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
+ search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
+
+ search_gid.global.interface_id = priv->local_gid.global.interface_id;
+
+ netif_addr_unlock(priv->dev);
+
+ err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB,
+ priv->dev, &port, &index);
+
+ netif_addr_lock(priv->dev);
+
+ if (search_gid.global.interface_id !=
+ priv->local_gid.global.interface_id)
+ /* There was a change while we were looking up the gid, bail
+ * here and let the next work sort this out
+ */
+ goto out;
+
+ /* The next section of code needs some background:
+ * Per IB spec the port GUID can't change if the HCA is powered on.
+ * port GUID is the basis for GID at index 0 which is the basis for
+ * the default device address of a ipoib interface.
+ *
+ * so it seems the flow should be:
+ * if user_changed_dev_addr && gid in gid tbl
+ * set bit dev_addr_set
+ * return true
+ * else
+ * return false
+ *
+ * The issue is that there are devices that don't follow the spec,
+ * they change the port GUID when the HCA is powered, so in order
+ * not to break userspace applications, We need to check if the
+ * user wanted to control the device address and we assume that
+ * if he sets the device address back to be based on GID index 0,
+ * he no longer wishs to control it.
+ *
+ * If the user doesn't control the the device address,
+ * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
+ * the port GUID has changed and GID at index 0 has changed
+ * so we need to change priv->local_gid and priv->dev->dev_addr
+ * to reflect the new GID.
+ */
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ if (!err && port == priv->port) {
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+ if (index == 0)
+ clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
+ &priv->flags);
+ else
+ set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
+ ret = true;
+ } else {
+ ret = false;
+ }
+ } else {
+ if (!err && port == priv->port) {
+ ret = true;
+ } else {
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
+ memcpy(&priv->local_gid, &gid0,
+ sizeof(priv->local_gid));
+ memcpy(priv->dev->dev_addr + 4, &gid0,
+ sizeof(priv->local_gid));
+ ret = true;
+ }
+ }
+ }
+
+out:
+ netif_addr_unlock(priv->dev);
+
+ return ret;
+}
+
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
enum ipoib_flush_level level,
int nesting)
@@ -1018,6 +1118,9 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
level != IPOIB_FLUSH_HEAVY) {
+ /* Make sure the dev_addr is set even if not flushing */
+ if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return;
}
@@ -1029,7 +1132,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
update_parent_pkey(priv);
else
update_child_pkey(priv);
- }
+ } else if (level == IPOIB_FLUSH_LIGHT)
+ ipoib_dev_addr_changed_valid(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
return;
}
@@ -1081,7 +1185,8 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
if (level >= IPOIB_FLUSH_NORMAL)
ipoib_ib_dev_up(dev);
- ipoib_mcast_restart_task(&priv->restart_task);
+ if (ipoib_dev_addr_changed_valid(priv))
+ ipoib_mcast_restart_task(&priv->restart_task);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b940ef1c19c7..2d7c16346648 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -99,6 +99,7 @@ static struct net_device *ipoib_get_net_dev_by_params(
struct ib_device *dev, u8 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data);
+static int ipoib_set_mac(struct net_device *dev, void *addr);
static struct ib_client ipoib_client = {
.name = "ipoib",
@@ -117,6 +118,8 @@ int ipoib_open(struct net_device *dev)
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
+ priv->sm_fullmember_sendonly_support = false;
+
if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
@@ -629,6 +632,77 @@ void ipoib_mark_paths_invalid(struct net_device *dev)
spin_unlock_irq(&priv->lock);
}
+struct classport_info_context {
+ struct ipoib_dev_priv *priv;
+ struct completion done;
+ struct ib_sa_query *sa_query;
+};
+
+static void classport_info_query_cb(int status, struct ib_class_port_info *rec,
+ void *context)
+{
+ struct classport_info_context *cb_ctx = context;
+ struct ipoib_dev_priv *priv;
+
+ WARN_ON(!context);
+
+ priv = cb_ctx->priv;
+
+ if (status || !rec) {
+ pr_debug("device: %s failed query classport_info status: %d\n",
+ priv->dev->name, status);
+ /* keeps the default, will try next mcast_restart */
+ priv->sm_fullmember_sendonly_support = false;
+ goto out;
+ }
+
+ if (ib_get_cpi_capmask2(rec) &
+ IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT) {
+ pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n",
+ priv->dev->name);
+ priv->sm_fullmember_sendonly_support = true;
+ } else {
+ pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n",
+ priv->dev->name);
+ priv->sm_fullmember_sendonly_support = false;
+ }
+
+out:
+ complete(&cb_ctx->done);
+}
+
+int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv *priv)
+{
+ struct classport_info_context *callback_context;
+ int ret;
+
+ callback_context = kmalloc(sizeof(*callback_context), GFP_KERNEL);
+ if (!callback_context)
+ return -ENOMEM;
+
+ callback_context->priv = priv;
+ init_completion(&callback_context->done);
+
+ ret = ib_sa_classport_info_rec_query(&ipoib_sa_client,
+ priv->ca, priv->port, 3000,
+ GFP_KERNEL,
+ classport_info_query_cb,
+ callback_context,
+ &callback_context->sa_query);
+ if (ret < 0) {
+ pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n",
+ priv->dev->name, ret);
+ kfree(callback_context);
+ return ret;
+ }
+
+ /* waiting for the callback to finish before returnning */
+ wait_for_completion(&callback_context->done);
+ kfree(callback_context);
+
+ return ret;
+}
+
void ipoib_flush_paths(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1649,6 +1723,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_get_vf_config = ipoib_get_vf_config,
.ndo_get_vf_stats = ipoib_get_vf_stats,
.ndo_set_vf_guid = ipoib_set_vf_guid,
+ .ndo_set_mac_address = ipoib_set_mac,
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
@@ -1771,6 +1846,70 @@ int ipoib_add_umcast_attr(struct net_device *dev)
return device_create_file(&dev->dev, &dev_attr_umcast);
}
+static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
+{
+ struct ipoib_dev_priv *child_priv;
+ struct net_device *netdev = priv->dev;
+
+ netif_addr_lock(netdev);
+
+ memcpy(&priv->local_gid.global.interface_id,
+ &gid->global.interface_id,
+ sizeof(gid->global.interface_id));
+ memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
+ clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
+
+ netif_addr_unlock(netdev);
+
+ if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ down_read(&priv->vlan_rwsem);
+ list_for_each_entry(child_priv, &priv->child_intfs, list)
+ set_base_guid(child_priv, gid);
+ up_read(&priv->vlan_rwsem);
+ }
+}
+
+static int ipoib_check_lladdr(struct net_device *dev,
+ struct sockaddr_storage *ss)
+{
+ union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
+ int ret = 0;
+
+ netif_addr_lock(dev);
+
+ /* Make sure the QPN, reserved and subnet prefix match the current
+ * lladdr, it also makes sure the lladdr is unicast.
+ */
+ if (memcmp(dev->dev_addr, ss->__data,
+ 4 + sizeof(gid->global.subnet_prefix)) ||
+ gid->global.interface_id == 0)
+ ret = -EINVAL;
+
+ netif_addr_unlock(dev);
+
+ return ret;
+}
+
+static int ipoib_set_mac(struct net_device *dev, void *addr)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct sockaddr_storage *ss = addr;
+ int ret;
+
+ if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
+ return -EBUSY;
+
+ ret = ipoib_check_lladdr(dev, ss);
+ if (ret)
+ return ret;
+
+ set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
+
+ queue_work(ipoib_workqueue, &priv->flush_light);
+
+ return 0;
+}
+
static ssize_t create_child(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -1894,6 +2033,7 @@ static struct net_device *ipoib_add_port(const char *format,
goto device_init_failed;
} else
memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
result = ipoib_dev_init(priv->dev, hca, port);
if (result < 0) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 25889311b1e9..82fbc9442608 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -64,6 +64,9 @@ struct ipoib_mcast_iter {
unsigned int send_only;
};
+/* join state that allows creating mcg with sendonly member request */
+#define SENDONLY_FULLMEMBER_JOIN 8
+
/*
* This should be called with the priv->lock held
*/
@@ -326,12 +329,23 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
carrier_on_task);
struct ib_port_attr attr;
+ int ret;
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
+ /*
+ * Check if can send sendonly MCG's with sendonly-fullmember join state.
+ * It done here after the successfully join to the broadcast group,
+ * because the broadcast group must always be joined first and is always
+ * re-joined if the SM changes substantially.
+ */
+ ret = ipoib_check_sm_sendonly_fullmember_support(priv);
+ if (ret < 0)
+ pr_debug("%s failed query sm support for sendonly-fullmember (ret: %d)\n",
+ priv->dev->name, ret);
/*
* Take rtnl_lock to avoid racing with ipoib_stop() and
@@ -515,22 +529,20 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
/*
- * Send-only IB Multicast joins do not work at the core
- * IB layer yet, so we can't use them here. However,
- * we are emulating an Ethernet multicast send, which
- * does not require a multicast subscription and will
- * still send properly. The most appropriate thing to
+ * Send-only IB Multicast joins work at the core IB layer but
+ * require specific SM support.
+ * We can use such joins here only if the current SM supports that feature.
+ * However, if not, we emulate an Ethernet multicast send,
+ * which does not require a multicast subscription and will
+ * still send properly. The most appropriate thing to
* do is to create the group if it doesn't exist as that
* most closely emulates the behavior, from a user space
- * application perspecitive, of Ethernet multicast
- * operation. For now, we do a full join, maybe later
- * when the core IB layers support send only joins we
- * will use them.
+ * application perspective, of Ethernet multicast operation.
*/
-#if 0
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- rec.join_state = 4;
-#endif
+ if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
+ priv->sm_fullmember_sendonly_support)
+ /* SM supports sendonly-fullmember, otherwise fallback to full-member */
+ rec.join_state = SENDONLY_FULLMEMBER_JOIN;
}
spin_unlock_irq(&priv->lock);
@@ -570,11 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work)
return;
}
priv->local_lid = port_attr.lid;
+ netif_addr_lock(dev);
- if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid, NULL))
- ipoib_warn(priv, "ib_query_gid() failed\n");
- else
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
+ if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ netif_addr_unlock(dev);
+ return;
+ }
+ netif_addr_unlock(dev);
spin_lock_irq(&priv->lock);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index b809c373e40e..1e7cbbaa15bd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -307,5 +307,8 @@ void ipoib_event(struct ib_event_handler *handler,
queue_work(ipoib_workqueue, &priv->flush_normal);
} else if (record->event == IB_EVENT_PKEY_CHANGE) {
queue_work(ipoib_workqueue, &priv->flush_heavy);
+ } else if (record->event == IB_EVENT_GID_CHANGE &&
+ !test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
+ queue_work(ipoib_workqueue, &priv->flush_light);
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index fca1a882de27..64a35595eab8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -68,6 +68,8 @@ int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
priv->pkey = pkey;
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
+ memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid));
+ set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
priv->dev->broadcast[8] = pkey >> 8;
priv->dev->broadcast[9] = pkey & 0xff;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 897b5a4993e8..a990c04208c9 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2596,9 +2596,19 @@ static void isert_free_conn(struct iscsi_conn *conn)
isert_put_conn(isert_conn);
}
+static void isert_get_rx_pdu(struct iscsi_conn *conn)
+{
+ struct completion comp;
+
+ init_completion(&comp);
+
+ wait_for_completion_interruptible(&comp);
+}
+
static struct iscsit_transport iser_target_transport = {
.name = "IB/iSER",
.transport_type = ISCSI_INFINIBAND,
+ .rdma_shutdown = true,
.priv_size = sizeof(struct isert_cmd),
.owner = THIS_MODULE,
.iscsit_setup_np = isert_setup_np,
@@ -2614,6 +2624,7 @@ static struct iscsit_transport iser_target_transport = {
.iscsit_queue_data_in = isert_put_datain,
.iscsit_queue_status = isert_put_response,
.iscsit_aborted_task = isert_aborted_task,
+ .iscsit_get_rx_pdu = isert_get_rx_pdu,
.iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 2843f1ae75bd..e68b20cba70b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -254,8 +254,8 @@ static void srpt_get_class_port_info(struct ib_dm_mad *mad)
memset(cif, 0, sizeof(*cif));
cif->base_version = 1;
cif->class_version = 1;
- cif->resp_time_value = 20;
+ ib_set_cpi_resp_time(cif, 20);
mad->mad_hdr.status = 0;
}
@@ -1767,14 +1767,6 @@ static void __srpt_close_all_ch(struct srpt_device *sdev)
}
}
-/**
- * srpt_shutdown_session() - Whether or not a session may be shut down.
- */
-static int srpt_shutdown_session(struct se_session *se_sess)
-{
- return 1;
-}
-
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
@@ -3064,7 +3056,6 @@ static const struct target_core_fabric_ops srpt_template = {
.tpg_get_inst_index = srpt_tpg_get_inst_index,
.release_cmd = srpt_release_cmd,
.check_stop_free = srpt_check_stop_free,
- .shutdown_session = srpt_shutdown_session,
.close_session = srpt_close_session,
.sess_get_index = srpt_sess_get_index,
.sess_get_initiator_sid = NULL,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 1142a93dd90b..804dbcc37d3f 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -87,7 +87,7 @@
#define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>"
#define DRIVER_DESC "X-Box pad driver"
-#define XPAD_PKT_LEN 32
+#define XPAD_PKT_LEN 64
/* xbox d-pads should map to buttons, as is required for DDR pads
but we map them to axes when possible to simplify things */
@@ -129,6 +129,7 @@ static const struct xpad_device {
{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
{ 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -173,9 +174,11 @@ static const struct xpad_device {
{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
@@ -183,6 +186,7 @@ static const struct xpad_device {
{ 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 },
{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
@@ -199,6 +203,7 @@ static const struct xpad_device {
{ 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
@@ -212,6 +217,8 @@ static const struct xpad_device {
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
@@ -307,13 +314,16 @@ static struct usb_device_id xpad_table[] = {
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
@@ -457,6 +467,10 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
u16 cmd, unsigned char *data)
{
+ /* valid pad data */
+ if (data[0] != 0x00)
+ return;
+
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (left, right, up, down) */
@@ -756,6 +770,7 @@ static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad)
if (packet) {
memcpy(xpad->odata, packet->data, packet->len);
xpad->irq_out->transfer_buffer_length = packet->len;
+ packet->pending = false;
return true;
}
@@ -797,7 +812,6 @@ static void xpad_irq_out(struct urb *urb)
switch (status) {
case 0:
/* success */
- xpad->out_packets[xpad->last_out_packet].pending = false;
xpad->irq_out_active = xpad_prepare_next_out_packet(xpad);
break;
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 8d7133268745..5f9655d49a65 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -20,21 +20,40 @@
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
struct pwm_beeper {
struct input_dev *input;
struct pwm_device *pwm;
+ struct work_struct work;
unsigned long period;
};
#define HZ_TO_NANOSECONDS(x) (1000000000UL/(x))
+static void __pwm_beeper_set(struct pwm_beeper *beeper)
+{
+ unsigned long period = beeper->period;
+
+ if (period) {
+ pwm_config(beeper->pwm, period / 2, period);
+ pwm_enable(beeper->pwm);
+ } else
+ pwm_disable(beeper->pwm);
+}
+
+static void pwm_beeper_work(struct work_struct *work)
+{
+ struct pwm_beeper *beeper =
+ container_of(work, struct pwm_beeper, work);
+
+ __pwm_beeper_set(beeper);
+}
+
static int pwm_beeper_event(struct input_dev *input,
unsigned int type, unsigned int code, int value)
{
- int ret = 0;
struct pwm_beeper *beeper = input_get_drvdata(input);
- unsigned long period;
if (type != EV_SND || value < 0)
return -EINVAL;
@@ -49,22 +68,31 @@ static int pwm_beeper_event(struct input_dev *input,
return -EINVAL;
}
- if (value == 0) {
- pwm_disable(beeper->pwm);
- } else {
- period = HZ_TO_NANOSECONDS(value);
- ret = pwm_config(beeper->pwm, period / 2, period);
- if (ret)
- return ret;
- ret = pwm_enable(beeper->pwm);
- if (ret)
- return ret;
- beeper->period = period;
- }
+ if (value == 0)
+ beeper->period = 0;
+ else
+ beeper->period = HZ_TO_NANOSECONDS(value);
+
+ schedule_work(&beeper->work);
return 0;
}
+static void pwm_beeper_stop(struct pwm_beeper *beeper)
+{
+ cancel_work_sync(&beeper->work);
+
+ if (beeper->period)
+ pwm_disable(beeper->pwm);
+}
+
+static void pwm_beeper_close(struct input_dev *input)
+{
+ struct pwm_beeper *beeper = input_get_drvdata(input);
+
+ pwm_beeper_stop(beeper);
+}
+
static int pwm_beeper_probe(struct platform_device *pdev)
{
unsigned long pwm_id = (unsigned long)dev_get_platdata(&pdev->dev);
@@ -93,6 +121,8 @@ static int pwm_beeper_probe(struct platform_device *pdev)
*/
pwm_apply_args(beeper->pwm);
+ INIT_WORK(&beeper->work, pwm_beeper_work);
+
beeper->input = input_allocate_device();
if (!beeper->input) {
dev_err(&pdev->dev, "Failed to allocate input device\n");
@@ -112,6 +142,7 @@ static int pwm_beeper_probe(struct platform_device *pdev)
beeper->input->sndbit[0] = BIT(SND_TONE) | BIT(SND_BELL);
beeper->input->event = pwm_beeper_event;
+ beeper->input->close = pwm_beeper_close;
input_set_drvdata(beeper->input, beeper);
@@ -141,7 +172,6 @@ static int pwm_beeper_remove(struct platform_device *pdev)
input_unregister_device(beeper->input);
- pwm_disable(beeper->pwm);
pwm_free(beeper->pwm);
kfree(beeper);
@@ -153,8 +183,7 @@ static int __maybe_unused pwm_beeper_suspend(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
- if (beeper->period)
- pwm_disable(beeper->pwm);
+ pwm_beeper_stop(beeper);
return 0;
}
@@ -163,10 +192,8 @@ static int __maybe_unused pwm_beeper_resume(struct device *dev)
{
struct pwm_beeper *beeper = dev_get_drvdata(dev);
- if (beeper->period) {
- pwm_config(beeper->pwm, beeper->period / 2, beeper->period);
- pwm_enable(beeper->pwm);
- }
+ if (beeper->period)
+ __pwm_beeper_set(beeper);
return 0;
}
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index abe1a927b332..65ebbd111702 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -981,9 +981,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#ifdef CONFIG_COMPAT
+
+#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
+
static long uinput_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
+ if (cmd == UI_SET_PHYS_COMPAT)
+ cmd = UI_SET_PHYS;
+
return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
}
#endif
diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c
index 485794376ee5..d07dd29d4848 100644
--- a/drivers/input/touchscreen/sun4i-ts.c
+++ b/drivers/input/touchscreen/sun4i-ts.c
@@ -115,7 +115,6 @@
struct sun4i_ts_data {
struct device *dev;
struct input_dev *input;
- struct thermal_zone_device *tz;
void __iomem *base;
unsigned int irq;
bool ignore_fifo_data;
@@ -366,10 +365,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
- ts->tz = thermal_zone_of_sensor_register(ts->dev, 0, ts,
- &sun4i_ts_tz_ops);
- if (IS_ERR(ts->tz))
- ts->tz = NULL;
+ devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops);
writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC);
@@ -377,7 +373,6 @@ static int sun4i_ts_probe(struct platform_device *pdev)
error = input_register_device(ts->input);
if (error) {
writel(0, ts->base + TP_INT_FIFOC);
- thermal_zone_of_sensor_unregister(ts->dev, ts->tz);
return error;
}
}
@@ -394,8 +389,6 @@ static int sun4i_ts_remove(struct platform_device *pdev)
if (ts->input)
input_unregister_device(ts->input);
- thermal_zone_of_sensor_unregister(ts->dev, ts->tz);
-
/* Deactivate all IRQs */
writel(0, ts->base + TP_INT_FIFOC);
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index ebab33e77d67..94b68213c50d 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1477,7 +1477,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
- if (IS_ERR_VALUE(asid))
+ if (asid < 0)
return asid;
cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
@@ -1508,7 +1508,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
- if (IS_ERR_VALUE(vmid))
+ if (vmid < 0)
return vmid;
cfg->vmid = (u16)vmid;
@@ -1569,7 +1569,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
smmu_domain->pgtbl_ops = pgtbl_ops;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
free_io_pgtable_ops(pgtbl_ops);
return ret;
@@ -1642,7 +1642,7 @@ static void arm_smmu_detach_dev(struct device *dev)
struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
smmu_group->ste.bypass = true;
- if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
+ if (arm_smmu_install_ste_for_group(smmu_group) < 0)
dev_warn(dev, "failed to install bypass STE\n");
smmu_group->domain = NULL;
@@ -1694,7 +1694,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA;
ret = arm_smmu_install_ste_for_group(smmu_group);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
smmu_group->domain = NULL;
out_unlock:
@@ -2235,7 +2235,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
arm_smmu_evtq_handler,
arm_smmu_evtq_thread,
0, "arm-smmu-v3-evtq", smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev, "failed to enable evtq irq\n");
}
@@ -2244,7 +2244,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
ret = devm_request_irq(smmu->dev, irq,
arm_smmu_cmdq_sync_handler, 0,
"arm-smmu-v3-cmdq-sync", smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
}
@@ -2252,7 +2252,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
if (irq) {
ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
0, "arm-smmu-v3-gerror", smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev, "failed to enable gerror irq\n");
}
@@ -2264,7 +2264,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
arm_smmu_priq_thread,
0, "arm-smmu-v3-priq",
smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev,
"failed to enable priq irq\n");
else
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e206ce7a4e4b..9345a3fcb706 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -950,7 +950,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
smmu->num_context_banks);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto out_unlock;
cfg->cbndx = ret;
@@ -989,7 +989,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
"arm-smmu-context-fault", domain);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
cfg->irptndx = INVALID_IRPTNDX;
@@ -1099,7 +1099,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
for (i = 0; i < cfg->num_streamids; ++i) {
int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
smmu->num_mapping_groups);
- if (IS_ERR_VALUE(idx)) {
+ if (idx < 0) {
dev_err(smmu->dev, "failed to allocate free SMR\n");
goto err_free_smrs;
}
@@ -1233,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/*
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b2bfb9594508..a644d0cec2d8 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -33,6 +33,7 @@
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
#include <linux/memory.h>
+#include <linux/cpu.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/iova.h>
@@ -390,6 +391,7 @@ struct dmar_domain {
* domain ids are 16 bit wide according
* to VT-d spec, section 9.3 */
+ bool has_iotlb_device;
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
@@ -456,27 +458,32 @@ static LIST_HEAD(dmar_rmrr_units);
static void flush_unmaps_timeout(unsigned long data);
-static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
+struct deferred_flush_entry {
+ unsigned long iova_pfn;
+ unsigned long nrpages;
+ struct dmar_domain *domain;
+ struct page *freelist;
+};
#define HIGH_WATER_MARK 250
-struct deferred_flush_tables {
+struct deferred_flush_table {
int next;
- struct iova *iova[HIGH_WATER_MARK];
- struct dmar_domain *domain[HIGH_WATER_MARK];
- struct page *freelist[HIGH_WATER_MARK];
+ struct deferred_flush_entry entries[HIGH_WATER_MARK];
+};
+
+struct deferred_flush_data {
+ spinlock_t lock;
+ int timer_on;
+ struct timer_list timer;
+ long size;
+ struct deferred_flush_table *tables;
};
-static struct deferred_flush_tables *deferred_flush;
+DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;
-static DEFINE_SPINLOCK(async_umap_flush_lock);
-static LIST_HEAD(unmaps_to_do);
-
-static int timer_on;
-static long list_size;
-
static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct dmar_domain *domain,
@@ -1458,10 +1465,35 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
return NULL;
}
+static void domain_update_iotlb(struct dmar_domain *domain)
+{
+ struct device_domain_info *info;
+ bool has_iotlb_device = false;
+
+ assert_spin_locked(&device_domain_lock);
+
+ list_for_each_entry(info, &domain->devices, link) {
+ struct pci_dev *pdev;
+
+ if (!info->dev || !dev_is_pci(info->dev))
+ continue;
+
+ pdev = to_pci_dev(info->dev);
+ if (pdev->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
+
+ domain->has_iotlb_device = has_iotlb_device;
+}
+
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
+ assert_spin_locked(&device_domain_lock);
+
if (!info || !dev_is_pci(info->dev))
return;
@@ -1481,6 +1513,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
#endif
if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
+ domain_update_iotlb(info->domain);
info->ats_qdep = pci_ats_queue_depth(pdev);
}
}
@@ -1489,6 +1522,8 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
+ assert_spin_locked(&device_domain_lock);
+
if (!dev_is_pci(info->dev))
return;
@@ -1497,6 +1532,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
if (info->ats_enabled) {
pci_disable_ats(pdev);
info->ats_enabled = 0;
+ domain_update_iotlb(info->domain);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
if (info->pri_enabled) {
@@ -1517,6 +1553,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
unsigned long flags;
struct device_domain_info *info;
+ if (!domain->has_iotlb_device)
+ return;
+
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (!info->ats_enabled)
@@ -1734,6 +1773,7 @@ static struct dmar_domain *alloc_domain(int flags)
memset(domain, 0, sizeof(*domain));
domain->nid = -1;
domain->flags = flags;
+ domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
return domain;
@@ -1918,8 +1958,12 @@ static void domain_exit(struct dmar_domain *domain)
return;
/* Flush any lazy unmaps that may reference this domain */
- if (!intel_iommu_strict)
- flush_unmaps_timeout(0);
+ if (!intel_iommu_strict) {
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ flush_unmaps_timeout(cpu);
+ }
/* Remove associated devices and clear attached or cached domains */
rcu_read_lock();
@@ -3077,7 +3121,7 @@ static int __init init_dmars(void)
bool copied_tables = false;
struct device *dev;
struct intel_iommu *iommu;
- int i, ret;
+ int i, ret, cpu;
/*
* for each drhd
@@ -3110,11 +3154,20 @@ static int __init init_dmars(void)
goto error;
}
- deferred_flush = kzalloc(g_num_of_iommus *
- sizeof(struct deferred_flush_tables), GFP_KERNEL);
- if (!deferred_flush) {
- ret = -ENOMEM;
- goto free_g_iommus;
+ for_each_possible_cpu(cpu) {
+ struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
+ cpu);
+
+ dfd->tables = kzalloc(g_num_of_iommus *
+ sizeof(struct deferred_flush_table),
+ GFP_KERNEL);
+ if (!dfd->tables) {
+ ret = -ENOMEM;
+ goto free_g_iommus;
+ }
+
+ spin_lock_init(&dfd->lock);
+ setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
}
for_each_active_iommu(iommu, drhd) {
@@ -3291,19 +3344,20 @@ free_iommu:
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
- kfree(deferred_flush);
free_g_iommus:
+ for_each_possible_cpu(cpu)
+ kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
kfree(g_iommus);
error:
return ret;
}
/* This takes a number of _MM_ pages, not VTD pages */
-static struct iova *intel_alloc_iova(struct device *dev,
+static unsigned long intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
unsigned long nrpages, uint64_t dma_mask)
{
- struct iova *iova = NULL;
+ unsigned long iova_pfn = 0;
/* Restrict dma_mask to the width that the iommu can handle */
dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
@@ -3316,19 +3370,19 @@ static struct iova *intel_alloc_iova(struct device *dev,
* DMA_BIT_MASK(32) and if that fails then try allocating
* from higher range
*/
- iova = alloc_iova(&domain->iovad, nrpages,
- IOVA_PFN(DMA_BIT_MASK(32)), 1);
- if (iova)
- return iova;
+ iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
+ IOVA_PFN(DMA_BIT_MASK(32)));
+ if (iova_pfn)
+ return iova_pfn;
}
- iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
- if (unlikely(!iova)) {
+ iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
+ if (unlikely(!iova_pfn)) {
pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev));
- return NULL;
+ return 0;
}
- return iova;
+ return iova_pfn;
}
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
@@ -3426,7 +3480,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
{
struct dmar_domain *domain;
phys_addr_t start_paddr;
- struct iova *iova;
+ unsigned long iova_pfn;
int prot = 0;
int ret;
struct intel_iommu *iommu;
@@ -3444,8 +3498,8 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
- iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
- if (!iova)
+ iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
+ if (!iova_pfn)
goto error;
/*
@@ -3463,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
* might have two guest_addr mapping to the same host paddr, but this
* is not a big problem
*/
- ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
+ ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
mm_to_dma_pfn(paddr_pfn), size, prot);
if (ret)
goto error;
@@ -3471,18 +3525,18 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain,
- mm_to_dma_pfn(iova->pfn_lo),
+ mm_to_dma_pfn(iova_pfn),
size, 0, 1);
else
iommu_flush_write_buffer(iommu);
- start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
+ start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK;
return start_paddr;
error:
- if (iova)
- __free_iova(&domain->iovad, iova);
+ if (iova_pfn)
+ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
dev_name(dev), size, (unsigned long long)paddr, dir);
return 0;
@@ -3497,91 +3551,120 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
dir, *dev->dma_mask);
}
-static void flush_unmaps(void)
+static void flush_unmaps(struct deferred_flush_data *flush_data)
{
int i, j;
- timer_on = 0;
+ flush_data->timer_on = 0;
/* just flush them all */
for (i = 0; i < g_num_of_iommus; i++) {
struct intel_iommu *iommu = g_iommus[i];
+ struct deferred_flush_table *flush_table =
+ &flush_data->tables[i];
if (!iommu)
continue;
- if (!deferred_flush[i].next)
+ if (!flush_table->next)
continue;
/* In caching mode, global flushes turn emulation expensive */
if (!cap_caching_mode(iommu->cap))
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
- for (j = 0; j < deferred_flush[i].next; j++) {
+ for (j = 0; j < flush_table->next; j++) {
unsigned long mask;
- struct iova *iova = deferred_flush[i].iova[j];
- struct dmar_domain *domain = deferred_flush[i].domain[j];
+ struct deferred_flush_entry *entry =
+ &flush_table->entries[j];
+ unsigned long iova_pfn = entry->iova_pfn;
+ unsigned long nrpages = entry->nrpages;
+ struct dmar_domain *domain = entry->domain;
+ struct page *freelist = entry->freelist;
/* On real hardware multiple invalidations are expensive */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain,
- iova->pfn_lo, iova_size(iova),
- !deferred_flush[i].freelist[j], 0);
+ mm_to_dma_pfn(iova_pfn),
+ nrpages, !freelist, 0);
else {
- mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
- iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
- (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+ mask = ilog2(nrpages);
+ iommu_flush_dev_iotlb(domain,
+ (uint64_t)iova_pfn << PAGE_SHIFT, mask);
}
- __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
- if (deferred_flush[i].freelist[j])
- dma_free_pagelist(deferred_flush[i].freelist[j]);
+ free_iova_fast(&domain->iovad, iova_pfn, nrpages);
+ if (freelist)
+ dma_free_pagelist(freelist);
}
- deferred_flush[i].next = 0;
+ flush_table->next = 0;
}
- list_size = 0;
+ flush_data->size = 0;
}
-static void flush_unmaps_timeout(unsigned long data)
+static void flush_unmaps_timeout(unsigned long cpuid)
{
+ struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
unsigned long flags;
- spin_lock_irqsave(&async_umap_flush_lock, flags);
- flush_unmaps();
- spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+ spin_lock_irqsave(&flush_data->lock, flags);
+ flush_unmaps(flush_data);
+ spin_unlock_irqrestore(&flush_data->lock, flags);
}
-static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
+static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
+ unsigned long nrpages, struct page *freelist)
{
unsigned long flags;
- int next, iommu_id;
+ int entry_id, iommu_id;
struct intel_iommu *iommu;
+ struct deferred_flush_entry *entry;
+ struct deferred_flush_data *flush_data;
+ unsigned int cpuid;
- spin_lock_irqsave(&async_umap_flush_lock, flags);
- if (list_size == HIGH_WATER_MARK)
- flush_unmaps();
+ cpuid = get_cpu();
+ flush_data = per_cpu_ptr(&deferred_flush, cpuid);
+
+ /* Flush all CPUs' entries to avoid deferring too much. If
+ * this becomes a bottleneck, can just flush us, and rely on
+ * flush timer for the rest.
+ */
+ if (flush_data->size == HIGH_WATER_MARK) {
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ flush_unmaps_timeout(cpu);
+ }
+
+ spin_lock_irqsave(&flush_data->lock, flags);
iommu = domain_get_iommu(dom);
iommu_id = iommu->seq_id;
- next = deferred_flush[iommu_id].next;
- deferred_flush[iommu_id].domain[next] = dom;
- deferred_flush[iommu_id].iova[next] = iova;
- deferred_flush[iommu_id].freelist[next] = freelist;
- deferred_flush[iommu_id].next++;
+ entry_id = flush_data->tables[iommu_id].next;
+ ++(flush_data->tables[iommu_id].next);
- if (!timer_on) {
- mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
- timer_on = 1;
+ entry = &flush_data->tables[iommu_id].entries[entry_id];
+ entry->domain = dom;
+ entry->iova_pfn = iova_pfn;
+ entry->nrpages = nrpages;
+ entry->freelist = freelist;
+
+ if (!flush_data->timer_on) {
+ mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
+ flush_data->timer_on = 1;
}
- list_size++;
- spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+ flush_data->size++;
+ spin_unlock_irqrestore(&flush_data->lock, flags);
+
+ put_cpu();
}
-static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
+static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
{
struct dmar_domain *domain;
unsigned long start_pfn, last_pfn;
- struct iova *iova;
+ unsigned long nrpages;
+ unsigned long iova_pfn;
struct intel_iommu *iommu;
struct page *freelist;
@@ -3593,13 +3676,11 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
iommu = domain_get_iommu(domain);
- iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
- if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
- (unsigned long long)dev_addr))
- return;
+ iova_pfn = IOVA_PFN(dev_addr);
- start_pfn = mm_to_dma_pfn(iova->pfn_lo);
- last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
+ nrpages = aligned_nrpages(dev_addr, size);
+ start_pfn = mm_to_dma_pfn(iova_pfn);
+ last_pfn = start_pfn + nrpages - 1;
pr_debug("Device %s unmapping: pfn %lx-%lx\n",
dev_name(dev), start_pfn, last_pfn);
@@ -3608,12 +3689,12 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
- last_pfn - start_pfn + 1, !freelist, 0);
+ nrpages, !freelist, 0);
/* free iova */
- __free_iova(&domain->iovad, iova);
+ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
dma_free_pagelist(freelist);
} else {
- add_unmap(domain, iova, freelist);
+ add_unmap(domain, iova_pfn, nrpages, freelist);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
@@ -3625,7 +3706,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- intel_unmap(dev, dev_addr);
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3684,7 +3765,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
size = PAGE_ALIGN(size);
order = get_order(size);
- intel_unmap(dev, dma_handle);
+ intel_unmap(dev, dma_handle, size);
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, order);
}
@@ -3693,7 +3774,16 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- intel_unmap(dev, sglist[0].dma_address);
+ dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
+ unsigned long nrpages = 0;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
+ }
+
+ intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3717,7 +3807,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct dmar_domain *domain;
size_t size = 0;
int prot = 0;
- struct iova *iova = NULL;
+ unsigned long iova_pfn;
int ret;
struct scatterlist *sg;
unsigned long start_vpfn;
@@ -3736,9 +3826,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
for_each_sg(sglist, sg, nelems, i)
size += aligned_nrpages(sg->offset, sg->length);
- iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
+ iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
*dev->dma_mask);
- if (!iova) {
+ if (!iova_pfn) {
sglist->dma_length = 0;
return 0;
}
@@ -3753,13 +3843,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
prot |= DMA_PTE_WRITE;
- start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
+ start_vpfn = mm_to_dma_pfn(iova_pfn);
ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
if (unlikely(ret)) {
dma_pte_free_pagetable(domain, start_vpfn,
start_vpfn + size - 1);
- __free_iova(&domain->iovad, iova);
+ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
return 0;
}
@@ -4498,6 +4588,46 @@ static struct notifier_block intel_iommu_memory_nb = {
.priority = 0
};
+static void free_all_cpu_cached_iovas(unsigned int cpu)
+{
+ int i;
+
+ for (i = 0; i < g_num_of_iommus; i++) {
+ struct intel_iommu *iommu = g_iommus[i];
+ struct dmar_domain *domain;
+ u16 did;
+
+ if (!iommu)
+ continue;
+
+ for (did = 0; did < 0xffff; did++) {
+ domain = get_iommu_domain(iommu, did);
+
+ if (!domain)
+ continue;
+ free_cpu_cached_iovas(cpu, &domain->iovad);
+ }
+ }
+}
+
+static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
+ unsigned long action, void *v)
+{
+ unsigned int cpu = (unsigned long)v;
+
+ switch (action) {
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ free_all_cpu_cached_iovas(cpu);
+ flush_unmaps_timeout(cpu);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block intel_iommu_cpu_nb = {
+ .notifier_call = intel_iommu_cpu_notifier,
+};
static ssize_t intel_iommu_show_version(struct device *dev,
struct device_attribute *attr,
@@ -4631,7 +4761,6 @@ int __init intel_iommu_init(void)
up_write(&dmar_global_lock);
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
- init_timer(&unmap_timer);
#ifdef CONFIG_SWIOTLB
swiotlb = 0;
#endif
@@ -4648,6 +4777,7 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
+ register_hotcpu_notifier(&intel_iommu_cpu_nb);
intel_iommu_enabled = 1;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index fa0adef32bd6..ba764a0835d3 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -20,6 +20,17 @@
#include <linux/iova.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/bitops.h>
+
+static bool iova_rcache_insert(struct iova_domain *iovad,
+ unsigned long pfn,
+ unsigned long size);
+static unsigned long iova_rcache_get(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn);
+static void init_iova_rcaches(struct iova_domain *iovad);
+static void free_iova_rcaches(struct iova_domain *iovad);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -38,6 +49,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit;
+ init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -291,33 +303,18 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
}
EXPORT_SYMBOL_GPL(alloc_iova);
-/**
- * find_iova - find's an iova for a given pfn
- * @iovad: - iova domain in question.
- * @pfn: - page frame number
- * This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
- */
-struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
+static struct iova *
+private_find_iova(struct iova_domain *iovad, unsigned long pfn)
{
- unsigned long flags;
- struct rb_node *node;
+ struct rb_node *node = iovad->rbroot.rb_node;
+
+ assert_spin_locked(&iovad->iova_rbtree_lock);
- /* Take the lock so that no other thread is manipulating the rbtree */
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- node = iovad->rbroot.rb_node;
while (node) {
struct iova *iova = container_of(node, struct iova, node);
/* If pfn falls within iova's range, return iova */
if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- /* We are not holding the lock while this iova
- * is referenced by the caller as the same thread
- * which called this function also calls __free_iova()
- * and it is by design that only one thread can possibly
- * reference a particular iova and hence no conflict.
- */
return iova;
}
@@ -327,9 +324,35 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
node = node->rb_right;
}
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return NULL;
}
+
+static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
+{
+ assert_spin_locked(&iovad->iova_rbtree_lock);
+ __cached_rbnode_delete_update(iovad, iova);
+ rb_erase(&iova->node, &iovad->rbroot);
+ free_iova_mem(iova);
+}
+
+/**
+ * find_iova - finds an iova for a given pfn
+ * @iovad: - iova domain in question.
+ * @pfn: - page frame number
+ * This function finds and returns an iova belonging to the
+ * given doamin which matches the given pfn.
+ */
+struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+ unsigned long flags;
+ struct iova *iova;
+
+ /* Take the lock so that no other thread is manipulating the rbtree */
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ iova = private_find_iova(iovad, pfn);
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return iova;
+}
EXPORT_SYMBOL_GPL(find_iova);
/**
@@ -344,10 +367,8 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
unsigned long flags;
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- __cached_rbnode_delete_update(iovad, iova);
- rb_erase(&iova->node, &iovad->rbroot);
+ private_free_iova(iovad, iova);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- free_iova_mem(iova);
}
EXPORT_SYMBOL_GPL(__free_iova);
@@ -370,6 +391,63 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
EXPORT_SYMBOL_GPL(free_iova);
/**
+ * alloc_iova_fast - allocates an iova from rcache
+ * @iovad: - iova domain in question
+ * @size: - size of page frames to allocate
+ * @limit_pfn: - max limit address
+ * This function tries to satisfy an iova allocation from the rcache,
+ * and falls back to regular allocation on failure.
+*/
+unsigned long
+alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn)
+{
+ bool flushed_rcache = false;
+ unsigned long iova_pfn;
+ struct iova *new_iova;
+
+ iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
+ if (iova_pfn)
+ return iova_pfn;
+
+retry:
+ new_iova = alloc_iova(iovad, size, limit_pfn, true);
+ if (!new_iova) {
+ unsigned int cpu;
+
+ if (flushed_rcache)
+ return 0;
+
+ /* Try replenishing IOVAs by flushing rcache. */
+ flushed_rcache = true;
+ for_each_online_cpu(cpu)
+ free_cpu_cached_iovas(cpu, iovad);
+ goto retry;
+ }
+
+ return new_iova->pfn_lo;
+}
+EXPORT_SYMBOL_GPL(alloc_iova_fast);
+
+/**
+ * free_iova_fast - free iova pfn range into rcache
+ * @iovad: - iova domain in question.
+ * @pfn: - pfn that is allocated previously
+ * @size: - # of pages in range
+ * This functions frees an iova range by trying to put it into the rcache,
+ * falling back to regular iova deallocation via free_iova() if this fails.
+ */
+void
+free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
+{
+ if (iova_rcache_insert(iovad, pfn, size))
+ return;
+
+ free_iova(iovad, pfn);
+}
+EXPORT_SYMBOL_GPL(free_iova_fast);
+
+/**
* put_iova_domain - destroys the iova doamin
* @iovad: - iova domain in question.
* All the iova's in that domain are destroyed.
@@ -379,6 +457,7 @@ void put_iova_domain(struct iova_domain *iovad)
struct rb_node *node;
unsigned long flags;
+ free_iova_rcaches(iovad);
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
node = rb_first(&iovad->rbroot);
while (node) {
@@ -550,5 +629,295 @@ error:
return NULL;
}
+/*
+ * Magazine caches for IOVA ranges. For an introduction to magazines,
+ * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
+ * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
+ * For simplicity, we use a static magazine size and don't implement the
+ * dynamic size tuning described in the paper.
+ */
+
+#define IOVA_MAG_SIZE 128
+
+struct iova_magazine {
+ unsigned long size;
+ unsigned long pfns[IOVA_MAG_SIZE];
+};
+
+struct iova_cpu_rcache {
+ spinlock_t lock;
+ struct iova_magazine *loaded;
+ struct iova_magazine *prev;
+};
+
+static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
+{
+ return kzalloc(sizeof(struct iova_magazine), flags);
+}
+
+static void iova_magazine_free(struct iova_magazine *mag)
+{
+ kfree(mag);
+}
+
+static void
+iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
+{
+ unsigned long flags;
+ int i;
+
+ if (!mag)
+ return;
+
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+
+ for (i = 0 ; i < mag->size; ++i) {
+ struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
+
+ BUG_ON(!iova);
+ private_free_iova(iovad, iova);
+ }
+
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+
+ mag->size = 0;
+}
+
+static bool iova_magazine_full(struct iova_magazine *mag)
+{
+ return (mag && mag->size == IOVA_MAG_SIZE);
+}
+
+static bool iova_magazine_empty(struct iova_magazine *mag)
+{
+ return (!mag || mag->size == 0);
+}
+
+static unsigned long iova_magazine_pop(struct iova_magazine *mag,
+ unsigned long limit_pfn)
+{
+ BUG_ON(iova_magazine_empty(mag));
+
+ if (mag->pfns[mag->size - 1] >= limit_pfn)
+ return 0;
+
+ return mag->pfns[--mag->size];
+}
+
+static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
+{
+ BUG_ON(iova_magazine_full(mag));
+
+ mag->pfns[mag->size++] = pfn;
+}
+
+static void init_iova_rcaches(struct iova_domain *iovad)
+{
+ struct iova_cpu_rcache *cpu_rcache;
+ struct iova_rcache *rcache;
+ unsigned int cpu;
+ int i;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ spin_lock_init(&rcache->lock);
+ rcache->depot_size = 0;
+ rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
+ if (WARN_ON(!rcache->cpu_rcaches))
+ continue;
+ for_each_possible_cpu(cpu) {
+ cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ spin_lock_init(&cpu_rcache->lock);
+ cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
+ cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
+ }
+ }
+}
+
+/*
+ * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
+ * return true on success. Can fail if rcache is full and we can't free
+ * space, and free_iova() (our only caller) will then return the IOVA
+ * range to the rbtree instead.
+ */
+static bool __iova_rcache_insert(struct iova_domain *iovad,
+ struct iova_rcache *rcache,
+ unsigned long iova_pfn)
+{
+ struct iova_magazine *mag_to_free = NULL;
+ struct iova_cpu_rcache *cpu_rcache;
+ bool can_insert = false;
+ unsigned long flags;
+
+ cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ if (!iova_magazine_full(cpu_rcache->loaded)) {
+ can_insert = true;
+ } else if (!iova_magazine_full(cpu_rcache->prev)) {
+ swap(cpu_rcache->prev, cpu_rcache->loaded);
+ can_insert = true;
+ } else {
+ struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
+
+ if (new_mag) {
+ spin_lock(&rcache->lock);
+ if (rcache->depot_size < MAX_GLOBAL_MAGS) {
+ rcache->depot[rcache->depot_size++] =
+ cpu_rcache->loaded;
+ } else {
+ mag_to_free = cpu_rcache->loaded;
+ }
+ spin_unlock(&rcache->lock);
+
+ cpu_rcache->loaded = new_mag;
+ can_insert = true;
+ }
+ }
+
+ if (can_insert)
+ iova_magazine_push(cpu_rcache->loaded, iova_pfn);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+
+ if (mag_to_free) {
+ iova_magazine_free_pfns(mag_to_free, iovad);
+ iova_magazine_free(mag_to_free);
+ }
+
+ return can_insert;
+}
+
+static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
+ unsigned long size)
+{
+ unsigned int log_size = order_base_2(size);
+
+ if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
+ return false;
+
+ return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
+}
+
+/*
+ * Caller wants to allocate a new IOVA range from 'rcache'. If we can
+ * satisfy the request, return a matching non-NULL range and remove
+ * it from the 'rcache'.
+ */
+static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
+ unsigned long limit_pfn)
+{
+ struct iova_cpu_rcache *cpu_rcache;
+ unsigned long iova_pfn = 0;
+ bool has_pfn = false;
+ unsigned long flags;
+
+ cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ if (!iova_magazine_empty(cpu_rcache->loaded)) {
+ has_pfn = true;
+ } else if (!iova_magazine_empty(cpu_rcache->prev)) {
+ swap(cpu_rcache->prev, cpu_rcache->loaded);
+ has_pfn = true;
+ } else {
+ spin_lock(&rcache->lock);
+ if (rcache->depot_size > 0) {
+ iova_magazine_free(cpu_rcache->loaded);
+ cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
+ has_pfn = true;
+ }
+ spin_unlock(&rcache->lock);
+ }
+
+ if (has_pfn)
+ iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+
+ return iova_pfn;
+}
+
+/*
+ * Try to satisfy IOVA allocation range from rcache. Fail if requested
+ * size is too big or the DMA limit we are given isn't satisfied by the
+ * top element in the magazine.
+ */
+static unsigned long iova_rcache_get(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn)
+{
+ unsigned int log_size = order_base_2(size);
+
+ if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
+ return 0;
+
+ return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
+}
+
+/*
+ * Free a cpu's rcache.
+ */
+static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
+ struct iova_rcache *rcache)
+{
+ struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
+ iova_magazine_free(cpu_rcache->loaded);
+
+ iova_magazine_free_pfns(cpu_rcache->prev, iovad);
+ iova_magazine_free(cpu_rcache->prev);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+}
+
+/*
+ * free rcache data structures.
+ */
+static void free_iova_rcaches(struct iova_domain *iovad)
+{
+ struct iova_rcache *rcache;
+ unsigned long flags;
+ unsigned int cpu;
+ int i, j;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ for_each_possible_cpu(cpu)
+ free_cpu_iova_rcache(cpu, iovad, rcache);
+ spin_lock_irqsave(&rcache->lock, flags);
+ free_percpu(rcache->cpu_rcaches);
+ for (j = 0; j < rcache->depot_size; ++j) {
+ iova_magazine_free_pfns(rcache->depot[j], iovad);
+ iova_magazine_free(rcache->depot[j]);
+ }
+ spin_unlock_irqrestore(&rcache->lock, flags);
+ }
+}
+
+/*
+ * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
+ */
+void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
+{
+ struct iova_cpu_rcache *cpu_rcache;
+ struct iova_rcache *rcache;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+ iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
+ iova_magazine_free_pfns(cpu_rcache->prev, iovad);
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ }
+}
+
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index eb5eb0cd414d..2223b3f15d68 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -182,7 +182,7 @@ static int __init _clps711x_intc_init(struct device_node *np,
writel_relaxed(0, clps711x_intc->intmr[2]);
err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id());
- if (IS_ERR_VALUE(err))
+ if (err < 0)
goto out_iounmap;
clps711x_intc->ops.map = clps711x_intc_irq_map;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index b4e647179346..fbc4ae2afd29 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1123,7 +1123,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
numa_node_id());
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
irq_start);
irq_base = irq_start;
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 9688d2e2a636..9e25d8ce08e5 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -402,7 +402,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */
irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id());
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
pr_err("failed to allocate IRQ numbers\n");
return -EINVAL;
}
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c089f49b63fb..3b5e10aa48ab 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -968,7 +968,7 @@ static void __init __gic_init(unsigned long gic_base_addr,
unsigned int cpu_vec, unsigned int irqbase,
struct device_node *node)
{
- unsigned int gicconfig;
+ unsigned int gicconfig, cpu;
unsigned int v[2];
__gic_base_addr = gic_base_addr;
@@ -985,6 +985,14 @@ static void __init __gic_init(unsigned long gic_base_addr,
gic_vpes = gic_vpes + 1;
if (cpu_has_veic) {
+ /* Set EIC mode for all VPEs */
+ for_each_present_cpu(cpu) {
+ gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
+ mips_cm_vp_id(cpu));
+ gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
+ GIC_VPE_CTL_EIC_MODE_MSK);
+ }
+
/* Always use vector 1 in EIC mode */
gic_cpu_pin = 0;
timer_cpu_pin = gic_cpu_pin;
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 1ccd2abed65f..1518ba31a80c 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -232,7 +232,7 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
nr_irqs += shirq_blocks[i]->nr_irqs;
virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
- if (IS_ERR_VALUE(virq_base)) {
+ if (virq_base < 0) {
pr_err("%s: irq desc alloc failed\n", __func__);
goto err_unmap;
}
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 8eeab72b93e2..ca4abe1ccd8d 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -64,7 +64,6 @@
#include "btree.h"
#include <linux/blkdev.h>
-#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/random.h>
#include <trace/events/bcache.h>
@@ -288,7 +287,6 @@ do { \
if (kthread_should_stop()) \
return 0; \
\
- try_to_freeze(); \
schedule(); \
mutex_lock(&(ca)->set->bucket_lock); \
} \
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 22b9e34ceb75..eab505ee0027 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/bitops.h>
-#include <linux/freezer.h>
#include <linux/hash.h>
#include <linux/kthread.h>
#include <linux/prefetch.h>
@@ -1787,7 +1786,6 @@ again:
mutex_unlock(&c->bucket_lock);
- try_to_freeze();
schedule();
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b9346cd9cda1..60123677b382 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -12,7 +12,6 @@
#include "writeback.h"
#include <linux/delay.h>
-#include <linux/freezer.h>
#include <linux/kthread.h>
#include <trace/events/bcache.h>
@@ -228,7 +227,6 @@ static void read_dirty(struct cached_dev *dc)
*/
while (!kthread_should_stop()) {
- try_to_freeze();
w = bch_keybuf_next(&dc->writeback_keys);
if (!w)
@@ -433,7 +431,6 @@ static int bch_writeback_thread(void *arg)
if (kthread_should_stop())
return 0;
- try_to_freeze();
schedule();
continue;
}
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 9e1731c565e7..e191e295c951 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -95,7 +95,7 @@ static int adp1653_get_fault(struct adp1653_flash *flash)
int rval;
fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT);
- if (IS_ERR_VALUE(fault))
+ if (fault < 0)
return fault;
flash->fault |= fault;
@@ -105,13 +105,13 @@ static int adp1653_get_fault(struct adp1653_flash *flash)
/* Clear faults. */
rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
- if (IS_ERR_VALUE(rval))
+ if (rval < 0)
return rval;
flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE;
rval = adp1653_update_hw(flash);
- if (IS_ERR_VALUE(rval))
+ if (rval)
return rval;
return flash->fault;
@@ -158,7 +158,7 @@ static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl)
int rval;
rval = adp1653_get_fault(flash);
- if (IS_ERR_VALUE(rval))
+ if (rval)
return rval;
ctrl->cur.val = 0;
@@ -184,7 +184,7 @@ static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl)
int rval;
rval = adp1653_get_fault(flash);
- if (IS_ERR_VALUE(rval))
+ if (rval)
return rval;
if ((rval & (ADP1653_REG_FAULT_FLT_SCP |
ADP1653_REG_FAULT_FLT_OT |
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index 5ef67774971d..8a5d19469ddc 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -146,7 +146,7 @@ int mxr_power_get(struct mxr_device *mdev)
/* returning 1 means that power is already enabled,
* so zero success be returned */
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
return 0;
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9015.c b/drivers/media/usb/dvb-usb-v2/af9015.c
index 95a7388e89d4..09e0f58f6bb7 100644
--- a/drivers/media/usb/dvb-usb-v2/af9015.c
+++ b/drivers/media/usb/dvb-usb-v2/af9015.c
@@ -398,6 +398,8 @@ error:
}
#define AF9015_EEPROM_SIZE 256
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
/* hash (and dump) eeprom */
static int af9015_eeprom_hash(struct dvb_usb_device *d)
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 40e51b0baa46..b46c0cfc27d9 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -696,7 +696,7 @@ int twl4030_init_irq(struct device *dev, int irq_num)
nr_irqs = TWL4030_PWR_NR_IRQS + TWL4030_CORE_NR_IRQS;
irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
dev_err(dev, "Fail to allocate IRQ descs\n");
return irq_base;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index ddc96206288a..e62fde3ac431 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -618,6 +618,10 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
+ /* Always switch back to main area after RPMB access */
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+
mmc_put_card(card);
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
@@ -685,6 +689,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
for (i = 0; i < num_of_cmds && !ioc_err; i++)
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+ /* Always switch back to main area after RPMB access */
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+
mmc_put_card(card);
/* copy to user if data and response */
@@ -748,16 +756,25 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
if (mmc_card_mmc(card)) {
u8 part_config = card->ext_csd.part_config;
+ if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ mmc_retune_pause(card->host);
+
part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
part_config |= md->part_type;
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_PART_CONFIG, part_config,
card->ext_csd.part_time);
- if (ret)
+ if (ret) {
+ if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ mmc_retune_unpause(card->host);
return ret;
+ }
card->ext_csd.part_config = part_config;
+
+ if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ mmc_retune_unpause(card->host);
}
main_md->part_curr = md->part_type;
@@ -2519,11 +2536,12 @@ static const struct mmc_fixup blk_fixups[] =
MMC_QUIRK_BLK_NO_CMD23),
/*
- * Some Micron MMC cards needs longer data read timeout than
- * indicated in CSD.
+ * Some MMC cards need longer data read timeout than indicated in CSD.
*/
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
MMC_QUIRK_LONG_READ_TIME),
+ MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
/*
* On these Samsung MoviNAND parts, performing secure erase or
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 99275e40bf2f..8b4dfd45433b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -875,11 +875,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
/*
* Some cards require longer data read timeout than indicated in CSD.
* Address this by setting the read timeout to a "reasonably high"
- * value. For the cards tested, 300ms has proven enough. If necessary,
+ * value. For the cards tested, 600ms has proven enough. If necessary,
* this value can be increased if other problematic cards require this.
*/
if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
- data->timeout_ns = 300000000;
+ data->timeout_ns = 600000000;
data->timeout_clks = 0;
}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index e0a3ee16c0d3..1be42fab1a30 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -68,8 +68,32 @@ void mmc_retune_enable(struct mmc_host *host)
jiffies + host->retune_period * HZ);
}
+/*
+ * Pause re-tuning for a small set of operations. The pause begins after the
+ * next command and after first doing re-tuning.
+ */
+void mmc_retune_pause(struct mmc_host *host)
+{
+ if (!host->retune_paused) {
+ host->retune_paused = 1;
+ mmc_retune_needed(host);
+ mmc_retune_hold(host);
+ }
+}
+EXPORT_SYMBOL(mmc_retune_pause);
+
+void mmc_retune_unpause(struct mmc_host *host)
+{
+ if (host->retune_paused) {
+ host->retune_paused = 0;
+ mmc_retune_release(host);
+ }
+}
+EXPORT_SYMBOL(mmc_retune_unpause);
+
void mmc_retune_disable(struct mmc_host *host)
{
+ mmc_retune_unpause(host);
host->can_retune = 0;
del_timer_sync(&host->retune_timer);
host->retune_now = 0;
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index b81b08f81325..c984321d1881 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card)
* switch to HS200 mode if bus width is set successfully.
*/
err = mmc_select_bus_width(card);
- if (!IS_ERR_VALUE(err)) {
+ if (!err) {
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
} else if (mmc_card_hs(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
- if (!IS_ERR_VALUE(err)) {
+ if (!err) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
diff --git a/drivers/mmc/host/dw_mmc-rockchip.c b/drivers/mmc/host/dw_mmc-rockchip.c
index 8c20b81cafd8..358b0dc853b0 100644
--- a/drivers/mmc/host/dw_mmc-rockchip.c
+++ b/drivers/mmc/host/dw_mmc-rockchip.c
@@ -66,6 +66,70 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
/* Make sure we use phases which we can enumerate with */
if (!IS_ERR(priv->sample_clk))
clk_set_phase(priv->sample_clk, priv->default_sample_phase);
+
+ /*
+ * Set the drive phase offset based on speed mode to achieve hold times.
+ *
+ * NOTE: this is _not_ a value that is dynamically tuned and is also
+ * _not_ a value that will vary from board to board. It is a value
+ * that could vary between different SoC models if they had massively
+ * different output clock delays inside their dw_mmc IP block (delay_o),
+ * but since it's OK to overshoot a little we don't need to do complex
+ * calculations and can pick values that will just work for everyone.
+ *
+ * When picking values we'll stick with picking 0/90/180/270 since
+ * those can be made very accurately on all known Rockchip SoCs.
+ *
+ * Note that these values match values from the DesignWare Databook
+ * tables for the most part except for SDR12 and "ID mode". For those
+ * two modes the databook calculations assume a clock in of 50MHz. As
+ * seen above, we always use a clock in rate that is exactly the
+ * card's input clock (times RK3288_CLKGEN_DIV, but that gets divided
+ * back out before the controller sees it).
+ *
+ * From measurement of a single device, it appears that delay_o is
+ * about .5 ns. Since we try to leave a bit of margin, it's expected
+ * that numbers here will be fine even with much larger delay_o
+ * (the 1.4 ns assumed by the DesignWare Databook would result in the
+ * same results, for instance).
+ */
+ if (!IS_ERR(priv->drv_clk)) {
+ int phase;
+
+ /*
+ * In almost all cases a 90 degree phase offset will provide
+ * sufficient hold times across all valid input clock rates
+ * assuming delay_o is not absurd for a given SoC. We'll use
+ * that as a default.
+ */
+ phase = 90;
+
+ switch (ios->timing) {
+ case MMC_TIMING_MMC_DDR52:
+ /*
+ * Since clock in rate with MMC_DDR52 is doubled when
+ * bus width is 8 we need to double the phase offset
+ * to get the same timings.
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_8)
+ phase = 180;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ case MMC_TIMING_MMC_HS200:
+ /*
+ * In the case of 150 MHz clock (typical max for
+ * Rockchip SoCs), 90 degree offset will add a delay
+ * of 1.67 ns. That will meet min hold time of .8 ns
+ * as long as clock output delay is < .87 ns. On
+ * SoCs measured this seems to be OK, but it doesn't
+ * hurt to give margin here, so we use 180.
+ */
+ phase = 180;
+ break;
+ }
+
+ clk_set_phase(priv->drv_clk, phase);
+ }
}
#define NUM_PHASES 360
@@ -233,10 +297,10 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
/* Common capabilities of RK3288 SoC */
static unsigned long dw_mci_rk3288_dwmmc_caps[4] = {
- MMC_CAP_ERASE,
- MMC_CAP_ERASE,
- MMC_CAP_ERASE,
- MMC_CAP_ERASE,
+ MMC_CAP_ERASE | MMC_CAP_CMD23,
+ MMC_CAP_ERASE | MMC_CAP_CMD23,
+ MMC_CAP_ERASE | MMC_CAP_CMD23,
+ MMC_CAP_ERASE | MMC_CAP_CMD23,
};
static const struct dw_mci_drv_data rk2928_drv_data = {
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 9dd1bd358434..2cc6123b1df9 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1431,7 +1431,7 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
int gpio_ro = mmc_gpio_get_ro(mmc);
/* Use platform get_ro function, else try on board write protect */
- if (!IS_ERR_VALUE(gpio_ro))
+ if (gpio_ro >= 0)
read_only = gpio_ro;
else
read_only =
@@ -1454,7 +1454,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
(mmc->caps & MMC_CAP_NONREMOVABLE))
present = 1;
- else if (!IS_ERR_VALUE(gpio_cd))
+ else if (gpio_cd >= 0)
present = gpio_cd;
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
@@ -2595,13 +2595,13 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
/* Useful defaults if platform data is unset. */
if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size;
- mmc->max_blk_size = 65536;
+ mmc->max_blk_size = 65535;
mmc->max_seg_size = 0x1000;
mmc->max_req_size = mmc->max_seg_size * host->ring_size;
mmc->max_blk_count = mmc->max_req_size / 512;
} else if (host->use_dma == TRANS_MODE_EDMAC) {
mmc->max_segs = 64;
- mmc->max_blk_size = 65536;
+ mmc->max_blk_size = 65535;
mmc->max_blk_count = 65535;
mmc->max_req_size =
mmc->max_blk_size * mmc->max_blk_count;
@@ -2609,7 +2609,7 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
} else {
/* TRANS_MODE_PIO */
mmc->max_segs = 64;
- mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
+ mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
mmc->max_blk_count = 512;
mmc->max_req_size = mmc->max_blk_size *
mmc->max_blk_count;
@@ -2927,7 +2927,7 @@ static void dw_mci_enable_cd(struct dw_mci *host)
if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
return;
- if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
+ if (mmc_gpio_get_cd(slot->mmc) < 0)
break;
}
if (i == host->num_slots)
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index b2d70ba6caa7..458ffb7637e5 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -274,7 +274,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.chip = &sdhci_acpi_chip_int,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
- MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
+ MMC_CAP_WAIT_WHILE_BUSY,
.caps2 = MMC_CAP2_HC_ERASE_SZ,
.flags = SDHCI_ACPI_RUNTIME_PM,
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
@@ -289,7 +289,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
.caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
- MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
+ MMC_CAP_WAIT_WHILE_BUSY,
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
.probe_slot = sdhci_acpi_sdio_probe_slot,
@@ -301,7 +301,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
SDHCI_QUIRK2_STOP_WITH_TC,
- .caps = MMC_CAP_BUS_WIDTH_TEST | MMC_CAP_WAIT_WHILE_BUSY,
+ .caps = MMC_CAP_WAIT_WHILE_BUSY,
.probe_slot = sdhci_acpi_sd_probe_slot,
};
@@ -378,7 +378,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
acpi_handle handle = ACPI_HANDLE(dev);
- struct acpi_device *device;
+ struct acpi_device *device, *child;
struct sdhci_acpi_host *c;
struct sdhci_host *host;
struct resource *iomem;
@@ -390,6 +390,11 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (acpi_bus_get_device(handle, &device))
return -ENODEV;
+ /* Power on the SDHCI controller and its children */
+ acpi_device_fix_up_power(device);
+ list_for_each_entry(child, &device->children, node)
+ acpi_device_fix_up_power(child);
+
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 2d300d87cda8..9d3ae1f4bd3c 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1011,7 +1011,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
if (ret)
return ret;
- if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ if (mmc_gpio_get_cd(host->mmc) >= 0)
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
return 0;
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 25f779e09d8e..d4cef713d246 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -289,7 +289,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
* to enable polling via device tree with broken-cd property.
*/
if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
- IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) {
+ mmc_gpio_get_cd(host->mmc) < 0) {
host->mmc->caps |= MMC_CAP_NEEDS_POLL;
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 97d4eebd6bf5..a4dbf7421edc 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -356,7 +356,6 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
- MMC_CAP_BUS_WIDTH_TEST |
MMC_CAP_WAIT_WHILE_BUSY;
slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ;
slot->hw_reset = sdhci_pci_int_hw_reset;
@@ -372,15 +371,13 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE |
- MMC_CAP_BUS_WIDTH_TEST |
MMC_CAP_WAIT_WHILE_BUSY;
return 0;
}
static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
{
- slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST |
- MMC_CAP_WAIT_WHILE_BUSY;
+ slot->host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
slot->cd_con_id = NULL;
slot->cd_idx = 0;
slot->cd_override_level = true;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e010ea4eb6f5..0e3d7c056cb1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1624,7 +1624,7 @@ static int sdhci_get_cd(struct mmc_host *mmc)
* Try slot gpio detect, if defined it take precedence
* over build in controller functionality
*/
- if (!IS_ERR_VALUE(gpio_cd))
+ if (gpio_cd >= 0)
return !!gpio_cd;
/* If polling, assume that the card is always present. */
@@ -3077,7 +3077,7 @@ int sdhci_add_host(struct sdhci_host *host)
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
- IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ mmc_gpio_get_cd(host->mmc) < 0)
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* If there are external regulators, get them */
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index efc8ea250c1d..68b9160108c9 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -67,10 +67,6 @@ struct atmel_nand_caps {
uint8_t pmecc_max_correction;
};
-struct atmel_nand_nfc_caps {
- uint32_t rb_mask;
-};
-
/*
* oob layout for large page size
* bad block info is on bytes 0 and 1
@@ -129,7 +125,6 @@ struct atmel_nfc {
/* Point to the sram bank which include readed data via NFC */
void *data_in_sram;
bool will_write_sram;
- const struct atmel_nand_nfc_caps *caps;
};
static struct atmel_nfc nand_nfc;
@@ -1715,9 +1710,9 @@ static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
ret = IRQ_HANDLED;
}
- if (pending & host->nfc->caps->rb_mask) {
+ if (pending & NFC_SR_RB_EDGE) {
complete(&host->nfc->comp_ready);
- nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
ret = IRQ_HANDLED;
}
if (pending & NFC_SR_CMD_DONE) {
@@ -1735,7 +1730,7 @@ static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
if (flag & NFC_SR_XFR_DONE)
init_completion(&host->nfc->comp_xfer_done);
- if (flag & host->nfc->caps->rb_mask)
+ if (flag & NFC_SR_RB_EDGE)
init_completion(&host->nfc->comp_ready);
if (flag & NFC_SR_CMD_DONE)
@@ -1753,7 +1748,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
if (flag & NFC_SR_XFR_DONE)
comp[index++] = &host->nfc->comp_xfer_done;
- if (flag & host->nfc->caps->rb_mask)
+ if (flag & NFC_SR_RB_EDGE)
comp[index++] = &host->nfc->comp_ready;
if (flag & NFC_SR_CMD_DONE)
@@ -1821,7 +1816,7 @@ static int nfc_device_ready(struct mtd_info *mtd)
dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
mask & status);
- return status & host->nfc->caps->rb_mask;
+ return status & NFC_SR_RB_EDGE;
}
static void nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -1994,8 +1989,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
}
/* fall through */
default:
- nfc_prepare_interrupt(host, host->nfc->caps->rb_mask);
- nfc_wait_interrupt(host, host->nfc->caps->rb_mask);
+ nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
+ nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
}
}
@@ -2426,11 +2421,6 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
}
}
- nfc->caps = (const struct atmel_nand_nfc_caps *)
- of_device_get_match_data(&pdev->dev);
- if (!nfc->caps)
- return -ENODEV;
-
nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
@@ -2459,17 +2449,8 @@ static int atmel_nand_nfc_remove(struct platform_device *pdev)
return 0;
}
-static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = {
- .rb_mask = NFC_SR_RB_EDGE0,
-};
-
-static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = {
- .rb_mask = NFC_SR_RB_EDGE3,
-};
-
static const struct of_device_id atmel_nand_nfc_match[] = {
- { .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps },
- { .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps },
+ { .compatible = "atmel,sama5d3-nfc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 0bbc1fa97dba..4d5d26221a7e 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -42,8 +42,7 @@
#define NFC_SR_UNDEF (1 << 21)
#define NFC_SR_AWB (1 << 22)
#define NFC_SR_ASE (1 << 23)
-#define NFC_SR_RB_EDGE0 (1 << 24)
-#define NFC_SR_RB_EDGE3 (1 << 27)
+#define NFC_SR_RB_EDGE (1 << 24)
#define ATMEL_HSMC_NFC_IER 0x0c
#define ATMEL_HSMC_NFC_IDR 0x10
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a7d1febf667a..16baeb51b2bd 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -149,6 +149,8 @@ static struct device_attribute dev_bgt_enabled =
__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_mtd_num =
__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_ro_mode =
+ __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
/**
* ubi_volume_notify - send a volume change notification.
@@ -385,6 +387,8 @@ static ssize_t dev_attribute_show(struct device *dev,
ret = sprintf(buf, "%d\n", ubi->thread_enabled);
else if (attr == &dev_mtd_num)
ret = sprintf(buf, "%d\n", ubi->mtd->index);
+ else if (attr == &dev_ro_mode)
+ ret = sprintf(buf, "%d\n", ubi->ro_mode);
else
ret = -EINVAL;
@@ -404,6 +408,7 @@ static struct attribute *ubi_dev_attrs[] = {
&dev_min_io_size.attr,
&dev_bgt_enabled.attr,
&dev_mtd_num.attr,
+ &dev_ro_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(ubi_dev);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index c4cb15a3098c..f101a4985a7c 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -352,7 +352,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
} else if (dent == d->dfs_emulate_power_cut) {
if (kstrtoint(buf, 0, &val) != 0)
count = -EINVAL;
- d->emulate_power_cut = val;
+ else
+ d->emulate_power_cut = val;
goto out;
}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5b9834cf2820..5780dd1ba79d 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -426,8 +426,25 @@ retry:
pnum, vol_id, lnum);
err = -EBADMSG;
} else {
- err = -EINVAL;
- ubi_ro_mode(ubi);
+ /*
+ * Ending up here in the non-Fastmap case
+ * is a clear bug as the VID header had to
+ * be present at scan time to have it referenced.
+ * With fastmap the story is more complicated.
+ * Fastmap has the mapping info without the need
+ * of a full scan. So the LEB could have been
+ * unmapped, Fastmap cannot know this and keeps
+ * the LEB referenced.
+ * This is valid and works as the layer above UBI
+ * has to do bookkeeping about used/referenced
+ * LEBs in any case.
+ */
+ if (ubi->fast_attach) {
+ err = -EBADMSG;
+ } else {
+ err = -EINVAL;
+ ubi_ro_mode(ubi);
+ }
}
}
goto out_free;
@@ -1202,32 +1219,6 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
cond_resched();
-
- /*
- * We've written the data and are going to read it back to make
- * sure it was written correctly.
- */
- memset(ubi->peb_buf, 0xFF, aldata_size);
- err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
- if (err) {
- if (err != UBI_IO_BITFLIPS) {
- ubi_warn(ubi, "error %d while reading data back from PEB %d",
- err, to);
- if (is_error_sane(err))
- err = MOVE_TARGET_RD_ERR;
- } else
- err = MOVE_TARGET_BITFLIPS;
- goto out_unlock_buf;
- }
-
- cond_resched();
-
- if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
- ubi_warn(ubi, "read data back from PEB %d and it is different",
- to);
- err = -EINVAL;
- goto out_unlock_buf;
- }
}
ubi_assert(vol->eba_tbl[lnum] == from);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 263b439e21a8..990898b9dc72 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi_msg(ubi, "fastmap WL pool size: %d",
ubi->fm_wl_pool.max_size);
ubi->fm_disabled = 0;
+ ubi->fast_attach = 1;
ubi_free_vid_hdr(ubi, vh);
kfree(ech);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 437757c89b9e..348dbbcbedc8 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -705,7 +705,7 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
+ dbg_gen("map LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index dadc6a9d5755..61d4e99755a4 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -466,6 +466,7 @@ struct ubi_debug_info {
* @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
+ * @fast_attach: non-zero if UBI was attached by fastmap
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -574,6 +575,7 @@ struct ubi_device {
size_t fm_size;
struct work_struct fm_work;
int fm_work_scheduled;
+ int fast_attach;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 1ae17bb9b889..10059dfdc1b6 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -405,7 +405,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
if (!no_vtbl)
self_check_volumes(ubi);
- return err;
+ return 0;
out_err:
ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 17ec948ac40e..959c7b12e0b1 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1534,6 +1534,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
INIT_LIST_HEAD(&ubi->pq[i]);
ubi->pq_head = 0;
+ ubi->free_count = 0;
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
@@ -1552,7 +1553,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
found_pebs++;
}
- ubi->free_count = 0;
list_for_each_entry(aeb, &ai->free, u.list) {
cond_resched();
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index bcb9dccada4d..1de2e1e51c2b 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -615,7 +615,7 @@ struct fman {
struct fman_cfg *cfg;
struct muram_info *muram;
/* cam section in muram */
- int cam_offset;
+ unsigned long cam_offset;
size_t cam_size;
/* Fifo in MURAM */
int fifo_offset;
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 4eb0e9ac7182..47394c45b6e8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -129,7 +129,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
*
* Return: address of the allocated memory; NULL otherwise.
*/
-int fman_muram_alloc(struct muram_info *muram, size_t size)
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
{
unsigned long vaddr;
@@ -150,7 +150,7 @@ int fman_muram_alloc(struct muram_info *muram, size_t size)
*
* Free an allocated memory from FM-MURAM partition.
*/
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size)
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
{
unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index dbf0af9e5bb5..889649ad8931 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -44,8 +44,8 @@ struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
unsigned long offset);
-int fman_muram_alloc(struct muram_info *muram, size_t size);
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size);
#endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 020ac1a4b408..cea9443c22a6 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -382,7 +382,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
ret = of_property_read_u32(dt_node, "ref-clock-frequency",
&pdev_data->ref_clock_freq);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(glue->dev,
"can't get reference clock frequency (%d)\n", ret);
return ret;
@@ -425,7 +425,7 @@ static int wl1271_probe(struct spi_device *spi)
}
ret = wlcore_probe_of(spi, glue, &pdev_data);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(glue->dev,
"can't get device tree parameters (%d)\n", ret);
return ret;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 042baec56931..608fc4464574 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -164,14 +164,22 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
}
static long pmem_direct_access(struct block_device *bdev, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn)
+ void __pmem **kaddr, pfn_t *pfn, long size)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
resource_size_t offset = sector * 512 + pmem->data_offset;
+ if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
+ return -EIO;
*kaddr = pmem->virt_addr + offset;
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+ /*
+ * If badblocks are present, limit known good range to the
+ * requested range.
+ */
+ if (unlikely(pmem->bb.count))
+ return size;
return pmem->size - pmem->pfn_pad - offset;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2de248bd462b..1a51584a382b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -95,6 +95,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
break;
}
break;
+ case NVME_CTRL_DEAD:
+ switch (old_state) {
+ case NVME_CTRL_DELETING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
default:
break;
}
@@ -720,10 +729,14 @@ static void nvme_init_integrity(struct nvme_ns *ns)
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc;
+ integrity.tag_size = sizeof(u16) + sizeof(u32);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2:
integrity.profile = &t10_pi_type1_crc;
+ integrity.tag_size = sizeof(u16);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
default:
integrity.profile = NULL;
@@ -1212,6 +1225,9 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
return ctrl->ops->reset_ctrl(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
return nvme_reset_subsystem(ctrl);
+ case NVME_IOCTL_RESCAN:
+ nvme_queue_scan(ctrl);
+ return 0;
default:
return -ENOTTY;
}
@@ -1239,6 +1255,17 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
}
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+static ssize_t nvme_sysfs_rescan(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ nvme_queue_scan(ctrl);
+ return count;
+}
+static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1342,6 +1369,7 @@ nvme_show_int_function(cntlid);
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
+ &dev_attr_rescan_controller.attr,
&dev_attr_model.attr,
&dev_attr_serial.attr,
&dev_attr_firmware_rev.attr,
@@ -1580,6 +1608,15 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns, *next;
+ /*
+ * The dead states indicates the controller was not gracefully
+ * disconnected. In that case, we won't be able to flush any data while
+ * removing the namespaces' disks; fail all the queues now to avoid
+ * potentially having to clean up the failed sync later.
+ */
+ if (ctrl->state == NVME_CTRL_DEAD)
+ nvme_kill_queues(ctrl);
+
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 114b92873894..1daa0482de0e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -72,6 +72,7 @@ enum nvme_ctrl_state {
NVME_CTRL_LIVE,
NVME_CTRL_RESETTING,
NVME_CTRL_DELETING,
+ NVME_CTRL_DEAD,
};
struct nvme_ctrl {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0f093f14d348..78dca3193ca4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1394,7 +1394,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, i, vecs, nr_io_queues, size;
- nr_io_queues = num_possible_cpus();
+ nr_io_queues = num_online_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
return result;
@@ -1551,12 +1551,12 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
static void nvme_disable_io_queues(struct nvme_dev *dev)
{
- int pass;
+ int pass, queues = dev->online_queues - 1;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
for (pass = 0; pass < 2; pass++) {
- int sent = 0, i = dev->queue_count - 1;
+ int sent = 0, i = queues;
reinit_completion(&dev->ioq_wait);
retry:
@@ -1857,7 +1857,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
nvme_kill_queues(&dev->ctrl);
if (pci_get_drvdata(pdev))
- pci_stop_and_remove_bus_device_locked(pdev);
+ device_release_driver(&pdev->dev);
nvme_put_ctrl(&dev->ctrl);
}
@@ -2017,6 +2017,10 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
pci_set_drvdata(pdev, NULL);
+
+ if (!pci_device_is_present(pdev))
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
+
flush_work(&dev->reset_work);
nvme_uninit_ctrl(&dev->ctrl);
nvme_dev_disable(dev, true);
@@ -2060,14 +2064,17 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
* shutdown the controller to quiesce. The controller will be restarted
* after the slot reset through driver's slot_reset callback.
*/
- dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
+ dev_warn(dev->ctrl.device,
+ "frozen state error detected, reset controller\n");
nvme_dev_disable(dev, false);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
+ dev_warn(dev->ctrl.device,
+ "failure state error detected, request disconnect\n");
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
@@ -2102,6 +2109,12 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0953),
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DISCARD_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0x0a53),
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DISCARD_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0x0a54),
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index bb4ea123547f..965911d9b36a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -113,7 +113,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
rc = nvmem_reg_read(nvmem, pos, buf, count);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return count;
@@ -147,7 +147,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
rc = nvmem_reg_write(nvmem, pos, buf, count);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return count;
@@ -366,7 +366,7 @@ static int nvmem_add_cells(struct nvmem_device *nvmem,
}
rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
- if (IS_ERR_VALUE(rval)) {
+ if (rval) {
kfree(cells[i]);
goto err;
}
@@ -963,7 +963,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
/* shift bits in-place */
@@ -998,7 +998,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
return ERR_PTR(-ENOMEM);
rc = __nvmem_cell_read(nvmem, cell, buf, len);
- if (IS_ERR_VALUE(rc)) {
+ if (rc) {
kfree(buf);
return ERR_PTR(rc);
}
@@ -1083,7 +1083,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
if (cell->bit_offset || cell->nbits)
kfree(buf);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return len;
@@ -1111,11 +1111,11 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return len;
@@ -1141,7 +1141,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return nvmem_cell_write(&cell, buf, cell.bytes);
@@ -1170,7 +1170,7 @@ int nvmem_device_read(struct nvmem_device *nvmem,
rc = nvmem_reg_read(nvmem, offset, buf, bytes);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return bytes;
@@ -1198,7 +1198,7 @@ int nvmem_device_write(struct nvmem_device *nvmem,
rc = nvmem_reg_write(nvmem, offset, buf, bytes);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 55182fc58c6a..677a811b3a6f 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -153,8 +153,10 @@ struct byt_community {
.name = (n), \
.pins = (p), \
.npins = ARRAY_SIZE((p)), \
- .has_simple_funcs = 1, \
- .simple_funcs = (f), \
+ .has_simple_funcs = 1, \
+ { \
+ .simple_funcs = (f), \
+ }, \
.nfuncs = ARRAY_SIZE((f)), \
}
#define PIN_GROUP_MIXED(n, p, f) \
@@ -163,7 +165,9 @@ struct byt_community {
.pins = (p), \
.npins = ARRAY_SIZE((p)), \
.has_simple_funcs = 0, \
- .mixed_funcs = (f), \
+ { \
+ .mixed_funcs = (f), \
+ }, \
.nfuncs = ARRAY_SIZE((f)), \
}
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index d03df4a60d05..76bdae1a93bb 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -64,4 +64,14 @@ config CROS_EC_PROTO
help
ChromeOS EC communication protocol helpers.
+config CROS_KBD_LED_BACKLIGHT
+ tristate "Backlight LED support for Chrome OS keyboards"
+ depends on LEDS_CLASS && ACPI
+ help
+ This option enables support for the keyboard backlight LEDs on
+ select Chrome OS systems.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_kbd_led_backlight.
+
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index bc498bda8211..4f3462783a3c 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -1,8 +1,9 @@
-obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
-obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
-cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
- cros_ec_lightbar.o cros_ec_vbc.o
-obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
-obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o
-obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o
+obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
+obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o
+cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \
+ cros_ec_lightbar.o cros_ec_vbc.o
+obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o
+obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o
+obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o
+obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 2b441e9ae593..e8a44a9bc916 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -34,6 +34,7 @@
#define ATMEL_TS_I2C_ADDR 0x4a
#define ATMEL_TS_I2C_BL_ADDR 0x26
#define CYAPA_TP_I2C_ADDR 0x67
+#define ELAN_TP_I2C_ADDR 0x15
#define ISL_ALS_I2C_ADDR 0x44
#define TAOS_ALS_I2C_ADDR 0x29
@@ -73,7 +74,7 @@ struct i2c_peripheral {
int tries;
};
-#define MAX_I2C_PERIPHERALS 3
+#define MAX_I2C_PERIPHERALS 4
struct chromeos_laptop {
struct i2c_peripheral i2c_peripherals[MAX_I2C_PERIPHERALS];
@@ -86,6 +87,11 @@ static struct i2c_board_info cyapa_device = {
.flags = I2C_CLIENT_WAKE,
};
+static struct i2c_board_info elantech_device = {
+ I2C_BOARD_INFO("elan_i2c", ELAN_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+};
+
static struct i2c_board_info isl_als_device = {
I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
};
@@ -306,6 +312,16 @@ static int setup_atmel_224s_tp(enum i2c_adapter_type type)
return (!tp) ? -EAGAIN : 0;
}
+static int setup_elantech_tp(enum i2c_adapter_type type)
+{
+ if (tp)
+ return 0;
+
+ /* add elantech touchpad */
+ tp = add_i2c_device("trackpad", type, &elantech_device);
+ return (!tp) ? -EAGAIN : 0;
+}
+
static int setup_atmel_1664s_ts(enum i2c_adapter_type type)
{
const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
@@ -445,6 +461,8 @@ static struct chromeos_laptop dell_chromebook_11 = {
.i2c_peripherals = {
/* Touchpad. */
{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+ /* Elan Touchpad option. */
+ { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
},
};
@@ -475,6 +493,8 @@ static struct chromeos_laptop acer_c720 = {
{ .add = setup_atmel_1664s_ts, I2C_ADAPTER_DESIGNWARE_1 },
/* Touchpad. */
{ .add = setup_cyapa_tp, I2C_ADAPTER_DESIGNWARE_0 },
+ /* Elan Touchpad option. */
+ { .add = setup_elantech_tp, I2C_ADAPTER_DESIGNWARE_0 },
/* Light Sensor. */
{ .add = setup_isl29018_als, I2C_ADAPTER_DESIGNWARE_1 },
},
diff --git a/drivers/platform/chrome/chromeos_pstore.c b/drivers/platform/chrome/chromeos_pstore.c
index 34749200e4ab..308a853ac4f1 100644
--- a/drivers/platform/chrome/chromeos_pstore.c
+++ b/drivers/platform/chrome/chromeos_pstore.c
@@ -8,6 +8,7 @@
* the Free Software Foundation, version 2 of the License.
*/
+#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -58,7 +59,7 @@ MODULE_DEVICE_TABLE(dmi, chromeos_pstore_dmi_table);
static struct ramoops_platform_data chromeos_ramoops_data = {
.mem_size = 0x100000,
.mem_address = 0xf00000,
- .record_size = 0x20000,
+ .record_size = 0x40000,
.console_size = 0x20000,
.ftrace_size = 0x20000,
.dump_oops = 1,
@@ -71,9 +72,59 @@ static struct platform_device chromeos_ramoops = {
},
};
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_ramoops_acpi_match[] = {
+ { "GOOG9999", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_ramoops_acpi_match);
+
+static struct platform_driver chromeos_ramoops_acpi = {
+ .driver = {
+ .name = "chromeos_pstore",
+ .acpi_match_table = ACPI_PTR(cros_ramoops_acpi_match),
+ },
+};
+
+static int __init chromeos_probe_acpi(struct platform_device *pdev)
+{
+ struct resource *res;
+ resource_size_t len;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOMEM;
+
+ len = resource_size(res);
+ if (!res->start || !len)
+ return -ENOMEM;
+
+ pr_info("chromeos ramoops using acpi device.\n");
+
+ chromeos_ramoops_data.mem_size = len;
+ chromeos_ramoops_data.mem_address = res->start;
+
+ return 0;
+}
+
+static bool __init chromeos_check_acpi(void)
+{
+ if (!platform_driver_probe(&chromeos_ramoops_acpi, chromeos_probe_acpi))
+ return true;
+ return false;
+}
+#else
+static inline bool chromeos_check_acpi(void) { return false; }
+#endif
+
static int __init chromeos_pstore_init(void)
{
- if (dmi_check_system(chromeos_pstore_dmi_table))
+ bool acpi_dev_found;
+
+ /* First check ACPI for non-hardcoded values from firmware. */
+ acpi_dev_found = chromeos_check_acpi();
+
+ if (acpi_dev_found || dmi_check_system(chromeos_pstore_dmi_table))
return platform_device_register(&chromeos_ramoops);
return -ENODEV;
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index d45cd254ed1c..6d8ee3b15872 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -137,6 +137,10 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
if (copy_from_user(&u_cmd, arg, sizeof(u_cmd)))
return -EFAULT;
+ if ((u_cmd.outsize > EC_MAX_MSG_BYTES) ||
+ (u_cmd.insize > EC_MAX_MSG_BYTES))
+ return -EINVAL;
+
s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
GFP_KERNEL);
if (!s_cmd)
@@ -208,6 +212,9 @@ static const struct file_operations fops = {
.release = ec_device_release,
.read = ec_device_read,
.unlocked_ioctl = ec_device_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ec_device_ioctl,
+#endif
};
static void __remove(struct device *dev)
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index ff7640575c75..8df3d447cacf 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -412,9 +412,13 @@ static umode_t cros_ec_lightbar_attrs_are_visible(struct kobject *kobj,
struct device *dev = container_of(kobj, struct device, kobj);
struct cros_ec_dev *ec = container_of(dev,
struct cros_ec_dev, class_dev);
- struct platform_device *pdev = container_of(ec->dev,
- struct platform_device, dev);
- if (pdev->id != 0)
+ struct platform_device *pdev = to_platform_device(ec->dev);
+ struct cros_ec_platform *pdata = pdev->dev.platform_data;
+ int is_cros_ec;
+
+ is_cros_ec = strcmp(pdata->ec_name, CROS_EC_DEV_NAME);
+
+ if (is_cros_ec != 0)
return 0;
/* Only instantiate this stuff if the EC has a lightbar */
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 990308ca384f..b6e161f71b26 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -298,8 +298,8 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
ec_dev->max_response = EC_PROTO2_MAX_PARAM_SIZE;
ec_dev->max_passthru = 0;
ec_dev->pkt_xfer = NULL;
- ec_dev->din_size = EC_MSG_BYTES;
- ec_dev->dout_size = EC_MSG_BYTES;
+ ec_dev->din_size = EC_PROTO2_MSG_BYTES;
+ ec_dev->dout_size = EC_PROTO2_MSG_BYTES;
} else {
/*
* It's possible for a test to occur too early when
diff --git a/drivers/platform/chrome/cros_kbd_led_backlight.c b/drivers/platform/chrome/cros_kbd_led_backlight.c
new file mode 100644
index 000000000000..ca3e4da852b4
--- /dev/null
+++ b/drivers/platform/chrome/cros_kbd_led_backlight.c
@@ -0,0 +1,122 @@
+/*
+ * Keyboard backlight LED driver for Chrome OS.
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/leds.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Keyboard LED ACPI Device must be defined in firmware */
+#define ACPI_KEYBOARD_BACKLIGHT_DEVICE "\\_SB.KBLT"
+#define ACPI_KEYBOARD_BACKLIGHT_READ ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBQC"
+#define ACPI_KEYBOARD_BACKLIGHT_WRITE ACPI_KEYBOARD_BACKLIGHT_DEVICE ".KBCM"
+
+#define ACPI_KEYBOARD_BACKLIGHT_MAX 100
+
+static void keyboard_led_set_brightness(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ union acpi_object param;
+ struct acpi_object_list input;
+ acpi_status status;
+
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = brightness;
+ input.count = 1;
+ input.pointer = &param;
+
+ status = acpi_evaluate_object(NULL, ACPI_KEYBOARD_BACKLIGHT_WRITE,
+ &input, NULL);
+ if (ACPI_FAILURE(status))
+ dev_err(cdev->dev, "Error setting keyboard LED value: %d\n",
+ status);
+}
+
+static enum led_brightness
+keyboard_led_get_brightness(struct led_classdev *cdev)
+{
+ unsigned long long brightness;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(NULL, ACPI_KEYBOARD_BACKLIGHT_READ,
+ NULL, &brightness);
+ if (ACPI_FAILURE(status)) {
+ dev_err(cdev->dev, "Error getting keyboard LED value: %d\n",
+ status);
+ return -EIO;
+ }
+
+ return brightness;
+}
+
+static int keyboard_led_probe(struct platform_device *pdev)
+{
+ struct led_classdev *cdev;
+ acpi_handle handle;
+ acpi_status status;
+ int error;
+
+ /* Look for the keyboard LED ACPI Device */
+ status = acpi_get_handle(ACPI_ROOT_OBJECT,
+ ACPI_KEYBOARD_BACKLIGHT_DEVICE,
+ &handle);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&pdev->dev, "Unable to find ACPI device %s: %d\n",
+ ACPI_KEYBOARD_BACKLIGHT_DEVICE, status);
+ return -ENXIO;
+ }
+
+ cdev = devm_kzalloc(&pdev->dev, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ cdev->name = "chromeos::kbd_backlight";
+ cdev->max_brightness = ACPI_KEYBOARD_BACKLIGHT_MAX;
+ cdev->flags |= LED_CORE_SUSPENDRESUME;
+ cdev->brightness_set = keyboard_led_set_brightness;
+ cdev->brightness_get = keyboard_led_get_brightness;
+
+ error = devm_led_classdev_register(&pdev->dev, cdev);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static const struct acpi_device_id keyboard_led_id[] = {
+ { "GOOG0002", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, keyboard_led_id);
+
+static struct platform_driver keyboard_led_driver = {
+ .driver = {
+ .name = "chromeos-keyboard-leds",
+ .acpi_match_table = ACPI_PTR(keyboard_led_id),
+ },
+ .probe = keyboard_led_probe,
+};
+module_platform_driver(keyboard_led_driver);
+
+MODULE_AUTHOR("Simon Que <sque@chromium.org>");
+MODULE_DESCRIPTION("ChromeOS Keyboard backlight LED Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:chromeos-keyboard-leds");
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ed2004be13cf..c06bb85c2839 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -846,6 +846,18 @@ config INTEL_IMR
If you are running on a Galileo/Quark say Y here.
+config INTEL_PMC_CORE
+ bool "Intel PMC Core driver"
+ depends on X86 && PCI
+ ---help---
+ The Intel Platform Controller Hub for Intel Core SoCs provides access
+ to Power Management Controller registers via a PCI interface. This
+ driver can utilize debugging capabilities and supported features as
+ exposed by the Power Management Controller.
+
+ Supported features:
+ - SLP_S0_RESIDENCY counter.
+
config IBM_RTL
tristate "Device driver to enable PRTL support"
depends on X86 && PCI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 448443c3baba..9b11b4073e03 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -69,3 +69,4 @@ obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
intel_telemetry_pltdrv.o \
intel_telemetry_debugfs.o
+obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index f2b5d0a8adf0..15f131146501 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -771,12 +771,14 @@ static int asus_read_brightness(struct backlight_device *bd)
{
struct asus_laptop *asus = bl_get_data(bd);
unsigned long long value;
- acpi_status rv = AE_OK;
+ acpi_status rv;
rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
NULL, &value);
- if (ACPI_FAILURE(rv))
+ if (ACPI_FAILURE(rv)) {
pr_warn("Error reading brightness\n");
+ return 0;
+ }
return value;
}
@@ -865,7 +867,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
int len = 0;
unsigned long long temp;
char buf[16]; /* enough for all info */
- acpi_status rv = AE_OK;
+ acpi_status rv;
/*
* We use the easy way, we don't care of off and count,
@@ -946,11 +948,10 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
const char *method)
{
int rv, value;
- int out = 0;
rv = parse_arg(buf, count, &value);
- if (rv > 0)
- out = value ? 1 : 0;
+ if (rv <= 0)
+ return rv;
if (write_acpi_int(asus->handle, method, value))
return -ENODEV;
@@ -1265,7 +1266,7 @@ static DEVICE_ATTR_RO(ls_value);
static int asus_gps_status(struct asus_laptop *asus)
{
unsigned long long status;
- acpi_status rv = AE_OK;
+ acpi_status rv;
rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
NULL, &status);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a96630d52346..a26dca3640ea 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -114,6 +114,7 @@ MODULE_LICENSE("GPL");
#define ASUS_WMI_DEVID_LED6 0x00020016
/* Backlight and Brightness */
+#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
@@ -1730,6 +1731,7 @@ ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD);
ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
+ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE);
static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1756,6 +1758,7 @@ static struct attribute *platform_attributes[] = {
&dev_attr_cardr.attr,
&dev_attr_touchpad.attr,
&dev_attr_lid_resume.attr,
+ &dev_attr_als_enable.attr,
NULL
};
@@ -1776,6 +1779,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_TOUCHPAD;
else if (attr == &dev_attr_lid_resume.attr)
devid = ASUS_WMI_DEVID_LID_RESUME;
+ else if (attr == &dev_attr_als_enable.attr)
+ devid = ASUS_WMI_DEVID_ALS_ENABLE;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index b51a2008d782..dcd9f40a4b18 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -28,6 +28,7 @@ struct rbtn_data {
enum rbtn_type type;
struct rfkill *rfkill;
struct input_dev *input_dev;
+ bool suspended;
};
@@ -235,9 +236,55 @@ static const struct acpi_device_id rbtn_ids[] = {
{ "", 0 },
};
+#ifdef CONFIG_PM_SLEEP
+static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
+{
+ struct rbtn_data *rbtn_data = context;
+
+ rbtn_data->suspended = false;
+}
+
+static int rbtn_suspend(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct rbtn_data *rbtn_data = acpi_driver_data(device);
+
+ rbtn_data->suspended = true;
+
+ return 0;
+}
+
+static int rbtn_resume(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct rbtn_data *rbtn_data = acpi_driver_data(device);
+ acpi_status status;
+
+ /*
+ * Upon resume, some BIOSes send an ACPI notification thet triggers
+ * an unwanted input event. In order to ignore it, we use a flag
+ * that we set at suspend and clear once we have received the extra
+ * ACPI notification. Since ACPI notifications are delivered
+ * asynchronously to drivers, we clear the flag from the workqueue
+ * used to deliver the notifications. This should be enough
+ * to have the flag cleared only after we received the extra
+ * notification, if any.
+ */
+ status = acpi_os_execute(OSL_NOTIFY_HANDLER,
+ rbtn_clear_suspended_flag, rbtn_data);
+ if (ACPI_FAILURE(status))
+ rbtn_clear_suspended_flag(rbtn_data);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
+
static struct acpi_driver rbtn_driver = {
.name = "dell-rbtn",
.ids = rbtn_ids,
+ .drv.pm = &rbtn_pm_ops,
.ops = {
.add = rbtn_add,
.remove = rbtn_remove,
@@ -399,6 +446,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event)
{
struct rbtn_data *rbtn_data = device->driver_data;
+ /*
+ * Some BIOSes send a notification at resume.
+ * Ignore it to prevent unwanted input events.
+ */
+ if (rbtn_data->suspended) {
+ dev_dbg(&device->dev, "ACPI notification ignored\n");
+ return;
+ }
+
if (event != 0x80) {
dev_info(&device->dev, "Received unknown event (0x%x)\n",
event);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ffc84cc7b1c7..ce41bc34288d 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -69,7 +69,7 @@
#include <linux/kfifo.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
#include <linux/leds.h>
#endif
#include <acpi/video.h>
@@ -100,13 +100,14 @@
/* FUNC interface - responses */
#define UNSUPPORTED_CMD 0x80000000
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* FUNC interface - LED control */
#define FUNC_LED_OFF 0x1
#define FUNC_LED_ON 0x30001
#define KEYBOARD_LAMPS 0x100
#define LOGOLAMP_POWERON 0x2000
#define LOGOLAMP_ALWAYS 0x4000
+#define RADIO_LED_ON 0x20
#endif
/* Hotkey details */
@@ -174,13 +175,14 @@ struct fujitsu_hotkey_t {
int rfkill_state;
int logolamp_registered;
int kblamps_registered;
+ int radio_led_registered;
};
static struct fujitsu_hotkey_t *fujitsu_hotkey;
static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
static enum led_brightness logolamp_get(struct led_classdev *cdev);
static void logolamp_set(struct led_classdev *cdev,
enum led_brightness brightness);
@@ -200,6 +202,16 @@ static struct led_classdev kblamps_led = {
.brightness_get = kblamps_get,
.brightness_set = kblamps_set
};
+
+static enum led_brightness radio_led_get(struct led_classdev *cdev);
+static void radio_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness);
+
+static struct led_classdev radio_led = {
+ .name = "fujitsu::radio_led",
+ .brightness_get = radio_led_get,
+ .brightness_set = radio_led_set
+};
#endif
#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
@@ -249,7 +261,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
return value;
}
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* LED class callbacks */
static void logolamp_set(struct led_classdev *cdev,
@@ -275,6 +287,15 @@ static void kblamps_set(struct led_classdev *cdev,
call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
}
+static void radio_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ if (brightness >= LED_FULL)
+ call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
+ else
+ call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
+}
+
static enum led_brightness logolamp_get(struct led_classdev *cdev)
{
enum led_brightness brightness = LED_OFF;
@@ -299,6 +320,16 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev)
return brightness;
}
+
+static enum led_brightness radio_led_get(struct led_classdev *cdev)
+{
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
#endif
/* Hardware access for LCD brightness control */
@@ -872,7 +903,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
/* Suspect this is a keymap of the application panel, print it */
pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
result = led_classdev_register(&fujitsu->pf_device->dev,
&logolamp_led);
@@ -895,6 +926,23 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
result);
}
}
+
+ /*
+ * BTNI bit 24 seems to indicate the presence of a radio toggle
+ * button in place of a slide switch, and all such machines appear
+ * to also have an RF LED. Therefore use bit 24 as an indicator
+ * that an RF LED is present.
+ */
+ if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
+ result = led_classdev_register(&fujitsu->pf_device->dev,
+ &radio_led);
+ if (result == 0) {
+ fujitsu_hotkey->radio_led_registered = 1;
+ } else {
+ pr_err("Could not register LED handler for radio LED, error %i\n",
+ result);
+ }
+ }
#endif
return result;
@@ -915,12 +963,15 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
struct input_dev *input = fujitsu_hotkey->input;
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
if (fujitsu_hotkey->logolamp_registered)
led_classdev_unregister(&logolamp_led);
if (fujitsu_hotkey->kblamps_registered)
led_classdev_unregister(&kblamps_led);
+
+ if (fujitsu_hotkey->radio_led_registered)
+ led_classdev_unregister(&radio_led);
#endif
input_unregister_device(input);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index be3bc2f4edd4..4a23fbc66b71 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -48,7 +48,10 @@
#define CFG_CAMERA_BIT (19)
#if IS_ENABLED(CONFIG_ACPI_WMI)
-static const char ideapad_wmi_fnesc_event[] = "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6";
+static const char *const ideapad_wmi_fnesc_events[] = {
+ "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", /* Yoga 3 */
+ "56322276-8493-4CE8-A783-98C991274F5E", /* Yoga 700 */
+};
#endif
enum {
@@ -93,6 +96,7 @@ struct ideapad_private {
struct dentry *debug;
unsigned long cfg;
bool has_hw_rfkill_switch;
+ const char *fnesc_guid;
};
static bool no_bt_rfkill;
@@ -989,8 +993,16 @@ static int ideapad_acpi_add(struct platform_device *pdev)
ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
if (ret)
goto notification_failed;
+
#if IS_ENABLED(CONFIG_ACPI_WMI)
- ret = wmi_install_notify_handler(ideapad_wmi_fnesc_event, ideapad_wmi_notify, priv);
+ for (i = 0; i < ARRAY_SIZE(ideapad_wmi_fnesc_events); i++) {
+ ret = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i],
+ ideapad_wmi_notify, priv);
+ if (ret == AE_OK) {
+ priv->fnesc_guid = ideapad_wmi_fnesc_events[i];
+ break;
+ }
+ }
if (ret != AE_OK && ret != AE_NOT_EXIST)
goto notification_failed_wmi;
#endif
@@ -1020,7 +1032,8 @@ static int ideapad_acpi_remove(struct platform_device *pdev)
int i;
#if IS_ENABLED(CONFIG_ACPI_WMI)
- wmi_remove_notify_handler(ideapad_wmi_fnesc_event);
+ if (priv->fnesc_guid)
+ wmi_remove_notify_handler(priv->fnesc_guid);
#endif
acpi_remove_notify_handler(priv->adev->handle,
ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 0a919d81662c..cbe01021c939 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -306,33 +306,32 @@ static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
#define to_intel_menlow_attr(_attr) \
container_of(_attr, struct intel_menlow_attribute, attr)
-static ssize_t aux0_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr,
+ char *buf, int idx)
{
struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
unsigned long long value;
int result;
- result = sensor_get_auxtrip(attr->handle, 0, &value);
+ result = sensor_get_auxtrip(attr->handle, idx, &value);
return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
}
-static ssize_t aux1_show(struct device *dev,
+static ssize_t aux0_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
- unsigned long long value;
- int result;
-
- result = sensor_get_auxtrip(attr->handle, 1, &value);
+ return aux_show(dev, dev_attr, buf, 0);
+}
- return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
+static ssize_t aux1_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return aux_show(dev, dev_attr, buf, 1);
}
-static ssize_t aux0_store(struct device *dev,
- struct device_attribute *dev_attr,
- const char *buf, size_t count)
+static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr,
+ const char *buf, size_t count, int idx)
{
struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
int value;
@@ -345,27 +344,23 @@ static ssize_t aux0_store(struct device *dev,
if (value < 0)
return -EINVAL;
- result = sensor_set_auxtrip(attr->handle, 0, CELSIUS_TO_DECI_KELVIN(value));
+ result = sensor_set_auxtrip(attr->handle, idx,
+ CELSIUS_TO_DECI_KELVIN(value));
return result ? result : count;
}
-static ssize_t aux1_store(struct device *dev,
+static ssize_t aux0_store(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
- int value;
- int result;
-
- /*Sanity check; should be a positive integer */
- if (!sscanf(buf, "%d", &value))
- return -EINVAL;
-
- if (value < 0)
- return -EINVAL;
+ return aux_store(dev, dev_attr, buf, count, 0);
+}
- result = sensor_set_auxtrip(attr->handle, 1, CELSIUS_TO_DECI_KELVIN(value));
- return result ? result : count;
+static ssize_t aux1_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ return aux_store(dev, dev_attr, buf, count, 1);
}
/* BIOS can enable/disable the thermal user application in dabney platform */
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
new file mode 100644
index 000000000000..2776bec89c88
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -0,0 +1,200 @@
+/*
+ * Intel Core SoC Power Management Controller Driver
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/pmc_core.h>
+
+#include "intel_pmc_core.h"
+
+static struct pmc_dev pmc;
+
+static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL },
+ { 0, },
+};
+
+static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
+{
+ return readl(pmcdev->regbase + reg_offset);
+}
+
+static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
+{
+ return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
+}
+
+/**
+ * intel_pmc_slp_s0_counter_read() - Read SLP_S0 residency.
+ * @data: Out param that contains current SLP_S0 count.
+ *
+ * This API currently supports Intel Skylake SoC and Sunrise
+ * Point Platform Controller Hub. Future platform support
+ * should be added for platforms that support low power modes
+ * beyond Package C10 state.
+ *
+ * SLP_S0_RESIDENCY counter counts in 100 us granularity per
+ * step hence function populates the multiplied value in out
+ * parameter @data.
+ *
+ * Return: an error code or 0 on success.
+ */
+int intel_pmc_slp_s0_counter_read(u32 *data)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 value;
+
+ if (!pmcdev->has_slp_s0_res)
+ return -EACCES;
+
+ value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
+ *data = pmc_core_adjust_slp_s0_step(value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int pmc_core_dev_state_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ u32 counter_val;
+
+ counter_val = pmc_core_reg_read(pmcdev,
+ SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
+ seq_printf(s, "%u\n", pmc_core_adjust_slp_s0_step(counter_val));
+
+ return 0;
+}
+
+static int pmc_core_dev_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_dev_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_dev_state_ops = {
+ .open = pmc_core_dev_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+ debugfs_remove_recursive(pmcdev->dbgfs_dir);
+}
+
+static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+ struct dentry *dir, *file;
+
+ dir = debugfs_create_dir("pmc_core", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ pmcdev->dbgfs_dir = dir;
+ file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
+ dir, pmcdev, &pmc_core_dev_state_ops);
+
+ if (!file) {
+ pmc_core_dbgfs_unregister(pmcdev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+#else
+static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+ return 0;
+}
+
+static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static const struct x86_cpu_id intel_pmc_core_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+ { X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+ {}
+};
+
+static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct device *ptr_dev = &dev->dev;
+ struct pmc_dev *pmcdev = &pmc;
+ const struct x86_cpu_id *cpu_id;
+ int err;
+
+ cpu_id = x86_match_cpu(intel_pmc_core_ids);
+ if (!cpu_id) {
+ dev_dbg(&dev->dev, "PMC Core: cpuid mismatch.\n");
+ return -EINVAL;
+ }
+
+ err = pcim_enable_device(dev);
+ if (err < 0) {
+ dev_dbg(&dev->dev, "PMC Core: failed to enable Power Management Controller.\n");
+ return err;
+ }
+
+ err = pci_read_config_dword(dev,
+ SPT_PMC_BASE_ADDR_OFFSET,
+ &pmcdev->base_addr);
+ if (err < 0) {
+ dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n");
+ return err;
+ }
+ dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr);
+
+ pmcdev->regbase = devm_ioremap_nocache(ptr_dev,
+ pmcdev->base_addr,
+ SPT_PMC_MMIO_REG_LEN);
+ if (!pmcdev->regbase) {
+ dev_dbg(&dev->dev, "PMC Core: ioremap failed.\n");
+ return -ENOMEM;
+ }
+
+ err = pmc_core_dbgfs_register(pmcdev);
+ if (err < 0) {
+ dev_err(&dev->dev, "PMC Core: debugfs register failed.\n");
+ return err;
+ }
+
+ pmc.has_slp_s0_res = true;
+ return 0;
+}
+
+static struct pci_driver intel_pmc_core_driver = {
+ .name = "intel_pmc_core",
+ .id_table = pmc_pci_ids,
+ .probe = pmc_core_probe,
+};
+
+builtin_pci_driver(intel_pmc_core_driver);
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
new file mode 100644
index 000000000000..a9dadaf787c1
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -0,0 +1,51 @@
+/*
+ * Intel Core SoC Power Management Controller Header File
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef PMC_CORE_H
+#define PMC_CORE_H
+
+/* Sunrise Point Power Management Controller PCI Device ID */
+#define SPT_PMC_PCI_DEVICE_ID 0x9d21
+#define SPT_PMC_BASE_ADDR_OFFSET 0x48
+#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c
+#define SPT_PMC_MMIO_REG_LEN 0x100
+#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
+
+/**
+ * struct pmc_dev - pmc device structure
+ * @base_addr: comtains pmc base address
+ * @regbase: pointer to io-remapped memory location
+ * @dbgfs_dir: path to debug fs interface
+ * @feature_available: flag to indicate whether
+ * the feature is available
+ * on a particular platform or not.
+ *
+ * pmc_dev contains info about power management controller device.
+ */
+struct pmc_dev {
+ u32 base_addr;
+ void __iomem *regbase;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+ bool has_slp_s0_res;
+};
+
+#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c
index a695a436a1c3..0d4c3808a6d8 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel_telemetry_core.c
@@ -25,7 +25,7 @@
struct telemetry_core_config {
struct telemetry_plt_config *plt_config;
- struct telemetry_core_ops *telem_ops;
+ const struct telemetry_core_ops *telem_ops;
};
static struct telemetry_core_config telm_core_conf;
@@ -95,7 +95,7 @@ static int telemetry_def_reset_events(void)
return 0;
}
-static struct telemetry_core_ops telm_defpltops = {
+static const struct telemetry_core_ops telm_defpltops = {
.set_sampling_period = telemetry_def_set_sampling_period,
.get_sampling_period = telemetry_def_get_sampling_period,
.get_trace_verbosity = telemetry_def_get_trace_verbosity,
@@ -332,7 +332,7 @@ EXPORT_SYMBOL_GPL(telemetry_set_trace_verbosity);
*
* Return: 0 success, < 0 for failure
*/
-int telemetry_set_pltdata(struct telemetry_core_ops *ops,
+int telemetry_set_pltdata(const struct telemetry_core_ops *ops,
struct telemetry_plt_config *pltconfig)
{
if (ops)
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 781bd10ca7ac..09c84a2b1c2c 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -1081,7 +1081,7 @@ out:
return ret;
}
-static struct telemetry_core_ops telm_pltops = {
+static const struct telemetry_core_ops telm_pltops = {
.get_trace_verbosity = telemetry_plt_get_trace_verbosity,
.set_trace_verbosity = telemetry_plt_set_trace_verbosity,
.set_sampling_period = telemetry_plt_set_sampling_period,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index e9caa347a9bf..1dba3598cfcb 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1446,6 +1446,9 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
{
unsigned int i, result, bitmask, handle;
+ if (!handles)
+ return;
+
/* get enabled events and disable them */
sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index 700e0fa0eec2..6505c97705e1 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -24,6 +24,8 @@
#define SURFACE_BUTTON_OBJ_NAME "VGBI"
#define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons"
+#define SURFACE_BUTTON_NOTIFY_TABLET_MODE 0xc8
+
#define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6
#define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7
@@ -33,7 +35,7 @@
#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0
#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1
-#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
+#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3
ACPI_MODULE_NAME("surface pro 3 button");
@@ -105,9 +107,12 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
key_code = KEY_VOLUMEDOWN;
break;
+ case SURFACE_BUTTON_NOTIFY_TABLET_MODE:
+ dev_warn_once(&device->dev, "Tablet mode is not supported\n");
+ break;
default:
dev_info_ratelimited(&device->dev,
- "Unsupported event [0x%x]\n", event);
+ "Unsupported event [0x%x]\n", event);
break;
}
input = button->input;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 9255ff3ee81a..c3bfa1fe95bf 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5001,6 +5001,8 @@ static int kbdlight_set_level(int level)
return 0;
}
+static int kbdlight_set_level_and_update(int level);
+
static int kbdlight_get_level(void)
{
int status = 0;
@@ -5068,7 +5070,7 @@ static void kbdlight_set_worker(struct work_struct *work)
container_of(work, struct tpacpi_led_classdev, work);
if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
- kbdlight_set_level(data->new_state);
+ kbdlight_set_level_and_update(data->new_state);
}
static void kbdlight_sysfs_set(struct led_classdev *led_cdev,
@@ -5099,7 +5101,6 @@ static struct tpacpi_led_classdev tpacpi_led_kbdlight = {
.max_brightness = 2,
.brightness_set = &kbdlight_sysfs_set,
.brightness_get = &kbdlight_sysfs_get,
- .flags = LED_CORE_SUSPENDRESUME,
}
};
@@ -5137,6 +5138,20 @@ static void kbdlight_exit(void)
flush_workqueue(tpacpi_wq);
}
+static int kbdlight_set_level_and_update(int level)
+{
+ int ret;
+ struct led_classdev *led_cdev;
+
+ ret = kbdlight_set_level(level);
+ led_cdev = &tpacpi_led_kbdlight.led_classdev;
+
+ if (ret == 0 && !(led_cdev->flags & LED_SUSPENDED))
+ led_cdev->brightness = level;
+
+ return ret;
+}
+
static int kbdlight_read(struct seq_file *m)
{
int level;
@@ -5177,13 +5192,35 @@ static int kbdlight_write(char *buf)
if (level == -1)
return -EINVAL;
- return kbdlight_set_level(level);
+ return kbdlight_set_level_and_update(level);
+}
+
+static void kbdlight_suspend(void)
+{
+ struct led_classdev *led_cdev;
+
+ if (!tp_features.kbdlight)
+ return;
+
+ led_cdev = &tpacpi_led_kbdlight.led_classdev;
+ led_update_brightness(led_cdev);
+ led_classdev_suspend(led_cdev);
+}
+
+static void kbdlight_resume(void)
+{
+ if (!tp_features.kbdlight)
+ return;
+
+ led_classdev_resume(&tpacpi_led_kbdlight.led_classdev);
}
static struct ibm_struct kbdlight_driver_data = {
.name = "kbdlight",
.read = kbdlight_read,
.write = kbdlight_write,
+ .suspend = kbdlight_suspend,
+ .resume = kbdlight_resume,
.exit = kbdlight_exit,
};
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index b83908670a9a..bed53c46dd90 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_make_request(struct request_queue *q,
struct bio *bio);
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
- void __pmem **kaddr, pfn_t *pfn);
+ void __pmem **kaddr, pfn_t *pfn, long size);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
@@ -884,7 +884,7 @@ fail:
static long
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
- void __pmem **kaddr, pfn_t *pfn)
+ void __pmem **kaddr, pfn_t *pfn, long size)
{
struct dcssblk_dev_info *dev_info;
unsigned long offset, dev_sz;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 8f90d9e77104..969c312de1be 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -621,6 +621,11 @@ struct aac_driver_ident
#define AAC_QUIRK_SCSI_32 0x0020
/*
+ * SRC based adapters support the AifReqEvent functions
+ */
+#define AAC_QUIRK_SRC 0x0040
+
+/*
* The adapter interface specs all queues to be located in the same
* physically contiguous block. The host structure that defines the
* commuication queues will assume they are each a separate physically
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a943bd230bc2..79871f3519ff 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -236,10 +236,10 @@ static struct aac_driver_ident aac_drivers[] = {
{ aac_rx_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Catch All */
{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec Rocket Catch All */
{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec NEMER/ARK Catch All */
- { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 6 (Tupelo) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 7 (Denali) */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 }, /* Adaptec PMC Series 8 */
- { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2 } /* Adaptec PMC Series 9 */
+ { aac_src_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
+ { aac_srcv_init, "aacraid", "ADAPTEC ", "RAID ", 2, AAC_QUIRK_SRC } /* Adaptec PMC Series 9 */
};
/**
@@ -1299,7 +1299,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
else
shost->this_id = shost->max_id;
- aac_intr_normal(aac, 0, 2, 0, NULL);
+ if (aac_drivers[index].quirks & AAC_QUIRK_SRC)
+ aac_intr_normal(aac, 0, 2, 0, NULL);
/*
* dmb - we may need to move the setting of these parms somewhere else once
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 6a4df5a315e9..6bff13e7afc7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7975,13 +7975,14 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
ActiveCableEventData =
(Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
if (ActiveCableEventData->ReasonCode ==
- MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER)
+ MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER) {
pr_info(MPT3SAS_FMT "Currently an active cable with ReceptacleID %d",
ioc->name, ActiveCableEventData->ReceptacleID);
pr_info("cannot be powered and devices connected to this active cable");
pr_info("will not be seen. This active cable");
pr_info("requires %d mW of power",
ActiveCableEventData->ActiveCablePowerRequirement);
+ }
break;
default: /* ignore the rest */
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 10aa18ba05fd..67c0d5aa3212 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -36,3 +36,12 @@ config TCM_QLA2XXX
default n
---help---
Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs
+
+if TCM_QLA2XXX
+config TCM_QLA2XXX_DEBUG
+ bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs"
+ default n
+ ---help---
+ Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs
+ This will include code to enable the SCSI command jammer
+endif
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8a44d1541eb4..ca39deb4ff5b 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -637,8 +637,10 @@ static void qlt_free_session_done(struct work_struct *work)
}
/* ha->tgt.sess_lock supposed to be held on entry */
-void qlt_unreg_sess(struct qla_tgt_sess *sess)
+static void qlt_release_session(struct kref *kref)
{
+ struct qla_tgt_sess *sess =
+ container_of(kref, struct qla_tgt_sess, sess_kref);
struct scsi_qla_host *vha = sess->vha;
if (sess->se_sess)
@@ -651,8 +653,16 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
INIT_WORK(&sess->free_work, qlt_free_session_done);
schedule_work(&sess->free_work);
}
-EXPORT_SYMBOL(qlt_unreg_sess);
+void qlt_put_sess(struct qla_tgt_sess *sess)
+{
+ if (!sess)
+ return;
+
+ assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
+ kref_put(&sess->sess_kref, qlt_release_session);
+}
+EXPORT_SYMBOL(qlt_put_sess);
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
@@ -857,12 +867,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
"Timeout: sess %p about to be deleted\n",
sess);
- if (sess->se_sess) {
+ if (sess->se_sess)
ha->tgt.tgt_ops->shutdown_sess(sess);
- ha->tgt.tgt_ops->put_sess(sess);
- } else {
- qlt_unreg_sess(sess);
- }
+ qlt_put_sess(sess);
} else {
schedule_delayed_work(&tgt->sess_del_work,
sess->expires - elapsed);
@@ -917,7 +924,7 @@ static struct qla_tgt_sess *qlt_create_sess(
}
}
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
(fcport->flags & FCF_CONF_COMP_SUPPORTED));
@@ -947,6 +954,7 @@ static struct qla_tgt_sess *qlt_create_sess(
sess->s_id = fcport->d_id;
sess->loop_id = fcport->loop_id;
sess->local = local;
+ kref_init(&sess->sess_kref);
INIT_LIST_HEAD(&sess->del_list_entry);
/* Under normal circumstances we want to logout from firmware when
@@ -991,7 +999,7 @@ static struct qla_tgt_sess *qlt_create_sess(
* Take an extra reference to ->sess_kref here to handle qla_tgt_sess
* access across ->tgt.sess_lock reaquire.
*/
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
}
return sess;
@@ -1035,7 +1043,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
} else {
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
if (sess->deleted) {
qlt_undelete_sess(sess);
@@ -1060,7 +1068,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->port_name, sess->loop_id);
sess->local = 0;
}
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
@@ -3817,7 +3825,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
* Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
*/
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
@@ -3836,7 +3844,7 @@ out_term:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
@@ -3936,13 +3944,13 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
if (!cmd) {
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_busy(vha, &op->atio, SAM_STAT_BUSY);
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
return;
}
/*
- * __qlt_do_work() will call ha->tgt.tgt_ops->put_sess() to release
+ * __qlt_do_work() will call qlt_put_sess() to release
* the extra reference taken above by qlt_make_local_sess()
*/
__qlt_do_work(cmd);
@@ -4003,13 +4011,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
/*
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
*/
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
cmd = qlt_get_tag(vha, sess, atio);
if (!cmd) {
ql_dbg(ql_dbg_io, vha, 0x3062,
"qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
return -ENOMEM;
}
@@ -5911,7 +5919,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
goto out_term2;
}
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
}
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -5924,7 +5932,7 @@ static void qlt_abort_work(struct qla_tgt *tgt,
goto out_term;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
return;
@@ -5935,8 +5943,7 @@ out_term:
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
}
@@ -5976,7 +5983,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
goto out_term;
}
- kref_get(&sess->se_sess->sess_kref);
+ kref_get(&sess->sess_kref);
}
iocb = a;
@@ -5988,14 +5995,13 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
if (rc != 0)
goto out_term;
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
return;
out_term:
qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d857feeb6514..f26c5f60eedd 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -738,7 +738,6 @@ struct qla_tgt_func_tmpl {
struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
const uint8_t *);
void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
- void (*put_sess)(struct qla_tgt_sess *);
void (*shutdown_sess)(struct qla_tgt_sess *);
};
@@ -930,6 +929,7 @@ struct qla_tgt_sess {
int generation;
struct se_session *se_sess;
+ struct kref sess_kref;
struct scsi_qla_host *vha;
struct qla_tgt *tgt;
@@ -1101,7 +1101,7 @@ extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
extern int qlt_lport_register(void *, u64, u64, u64,
int (*callback)(struct scsi_qla_host *, void *, u64, u64));
extern void qlt_lport_deregister(struct scsi_qla_host *);
-extern void qlt_unreg_sess(struct qla_tgt_sess *);
+void qlt_put_sess(struct qla_tgt_sess *sess);
extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
extern int __init qlt_init(void);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index c1461d225f08..6643f6fc7795 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -339,22 +339,6 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
qlt_free_cmd(cmd);
}
-static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
-{
- struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
- struct scsi_qla_host *vha;
- unsigned long flags;
-
- BUG_ON(!sess);
- vha = sess->vha;
-
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- target_sess_cmd_list_set_waiting(se_sess);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
-
- return 1;
-}
-
static void tcm_qla2xxx_close_session(struct se_session *se_sess)
{
struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
@@ -365,7 +349,8 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
vha = sess->vha;
spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- qlt_unreg_sess(sess);
+ target_sess_cmd_list_set_waiting(se_sess);
+ qlt_put_sess(sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
}
@@ -457,6 +442,10 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
struct se_cmd *se_cmd = &cmd->se_cmd;
struct se_session *se_sess;
struct qla_tgt_sess *sess;
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+ struct se_portal_group *se_tpg;
+ struct tcm_qla2xxx_tpg *tpg;
+#endif
int flags = TARGET_SCF_ACK_KREF;
if (bidi)
@@ -477,6 +466,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
return -EINVAL;
}
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+ se_tpg = se_sess->se_tpg;
+ tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg);
+ if (unlikely(tpg->tpg_attrib.jam_host)) {
+ /* return, and dont run target_submit_cmd,discarding command */
+ return 0;
+ }
+#endif
+
cmd->vha->tgt_counters.qla_core_sbt_cmd++;
return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
cmd->unpacked_lun, data_length, fcp_task_attr,
@@ -758,23 +756,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
}
-static void tcm_qla2xxx_release_session(struct kref *kref)
-{
- struct se_session *se_sess = container_of(kref,
- struct se_session, sess_kref);
-
- qlt_unreg_sess(se_sess->fabric_sess_ptr);
-}
-
-static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
-{
- if (!sess)
- return;
-
- assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
- kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
-}
-
static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
{
assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
@@ -844,6 +825,9 @@ DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+DEF_QLA_TPG_ATTRIB(jam_host);
+#endif
static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
&tcm_qla2xxx_tpg_attrib_attr_generate_node_acls,
@@ -851,6 +835,9 @@ static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
&tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect,
&tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect,
&tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only,
+#ifdef CONFIG_TCM_QLA2XXX_DEBUG
+ &tcm_qla2xxx_tpg_attrib_attr_jam_host,
+#endif
NULL,
};
@@ -1023,6 +1010,7 @@ static struct se_portal_group *tcm_qla2xxx_make_tpg(
tpg->tpg_attrib.demo_mode_write_protect = 1;
tpg->tpg_attrib.cache_dynamic_acls = 1;
tpg->tpg_attrib.demo_mode_login_only = 1;
+ tpg->tpg_attrib.jam_host = 0;
ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);
if (ret < 0) {
@@ -1579,7 +1567,6 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
.find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
- .put_sess = tcm_qla2xxx_put_sess,
.shutdown_sess = tcm_qla2xxx_shutdown_sess,
};
@@ -1847,7 +1834,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
- .shutdown_session = tcm_qla2xxx_shutdown_session,
.close_session = tcm_qla2xxx_close_session,
.sess_get_index = tcm_qla2xxx_sess_get_index,
.sess_get_initiator_sid = NULL,
@@ -1890,7 +1876,6 @@ static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
- .shutdown_session = tcm_qla2xxx_shutdown_session,
.close_session = tcm_qla2xxx_close_session,
.sess_get_index = tcm_qla2xxx_sess_get_index,
.sess_get_initiator_sid = NULL,
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 3bbf4cb6fd97..37e026a4823d 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -34,6 +34,7 @@ struct tcm_qla2xxx_tpg_attrib {
int prod_mode_write_protect;
int demo_mode_login_only;
int fabric_prot_type;
+ int jam_host;
};
struct tcm_qla2xxx_tpg {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b2e332af0f51..c71344aebdbb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -821,9 +821,12 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
}
/*
- * If we finished all bytes in the request we are done now.
+ * special case: failed zero length commands always need to
+ * drop down into the retry code. Otherwise, if we finished
+ * all bytes in the request we are done now.
*/
- if (!scsi_end_request(req, error, good_bytes, 0))
+ if (!(blk_rq_bytes(req) == 0 && error) &&
+ !scsi_end_request(req, error, good_bytes, 0))
return;
/*
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 428c03ef02b2..f459dff30512 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1398,11 +1398,15 @@ static int media_not_present(struct scsi_disk *sdkp,
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk(disk);
- struct scsi_device *sdp = sdkp->device;
+ struct scsi_disk *sdkp = scsi_disk_get(disk);
+ struct scsi_device *sdp;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
+ if (!sdkp)
+ return 0;
+
+ sdp = sdkp->device;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
/*
@@ -1459,6 +1463,7 @@ out:
kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp->changed = 0;
+ scsi_disk_put(sdkp);
return retval;
}
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index bb00be8d1851..17a6387e20b5 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -567,7 +567,7 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
if (IS_ERR(txd)) {
ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
- dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
+ dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
msg->status = PTR_ERR(txd);
return;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 5bac28a3944e..7c197d1a1231 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -66,8 +66,6 @@ source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
-source "drivers/staging/rdma/Kconfig"
-
source "drivers/staging/android/Kconfig"
source "drivers/staging/board/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a954242b0f2c..a470c7276142 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_FB_XGI) += xgifb/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_MFD_NVEC) += nvec/
-obj-$(CONFIG_STAGING_RDMA) += rdma/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index ce1f949430f1..3f2f30b6542c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -976,8 +976,8 @@ static inline __u64 ll_file_maxbytes(struct inode *inode)
}
/* llite/xattr.c */
-int ll_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
+int ll_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags);
ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
const char *name, void *buffer, size_t size);
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index ed4de04381c3..608014b0dbcd 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -211,11 +211,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
return 0;
}
-int ll_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+int ll_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
-
LASSERT(inode);
LASSERT(name);
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
deleted file mode 100644
index f1f3ecadf0fb..000000000000
--- a/drivers/staging/rdma/Kconfig
+++ /dev/null
@@ -1,27 +0,0 @@
-menuconfig STAGING_RDMA
- tristate "RDMA staging drivers"
- depends on INFINIBAND
- depends on PCI || BROKEN
- depends on HAS_IOMEM
- depends on NET
- depends on INET
- default n
- ---help---
- This option allows you to select a number of RDMA drivers that
- fall into one of two categories: deprecated drivers being held
- here before finally being removed or new drivers that still need
- some work before being moved to the normal RDMA driver area.
-
- If you wish to work on these drivers, to help improve them, or
- to report problems you have with them, please use the
- linux-rdma@vger.kernel.org mailing list.
-
- If in doubt, say N here.
-
-
-# Please keep entries in alphabetic order
-if STAGING_RDMA
-
-source "drivers/staging/rdma/hfi1/Kconfig"
-
-endif
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
deleted file mode 100644
index 8c7fc1de48a7..000000000000
--- a/drivers/staging/rdma/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# Entries for RDMA_STAGING tree
-obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
deleted file mode 100644
index 4c6f1d7d2eaf..000000000000
--- a/drivers/staging/rdma/hfi1/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-July, 2015
-
-- Remove unneeded file entries in sysfs
-- Remove software processing of IB protocol and place in library for use
- by qib, ipath (if still present), hfi1, and eventually soft-roce
-- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
deleted file mode 100644
index bb2409ad891a..000000000000
--- a/drivers/staging/rdma/hfi1/diag.c
+++ /dev/null
@@ -1,1925 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-/*
- * This file contains support for diagnostic functions. It is accessed by
- * opening the hfi1_diag device, normally minor number 129. Diagnostic use
- * of the chip may render the chip or board unusable until the driver
- * is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/module.h>
-#include <rdma/ib_smi.h>
-#include "hfi.h"
-#include "device.h"
-#include "common.h"
-#include "verbs_txreq.h"
-#include "trace.h"
-
-#undef pr_fmt
-#define pr_fmt(fmt) DRIVER_NAME ": " fmt
-#define snoop_dbg(fmt, ...) \
- hfi1_cdbg(SNOOP, fmt, ##__VA_ARGS__)
-
-/* Snoop option mask */
-#define SNOOP_DROP_SEND BIT(0)
-#define SNOOP_USE_METADATA BIT(1)
-#define SNOOP_SET_VL0TOVL15 BIT(2)
-
-static u8 snoop_flags;
-
-/*
- * Extract packet length from LRH header.
- * This is in Dwords so multiply by 4 to get size in bytes
- */
-#define HFI1_GET_PKT_LEN(x) (((be16_to_cpu((x)->lrh[2]) & 0xFFF)) << 2)
-
-enum hfi1_filter_status {
- HFI1_FILTER_HIT,
- HFI1_FILTER_ERR,
- HFI1_FILTER_MISS
-};
-
-/* snoop processing functions */
-rhf_rcv_function_ptr snoop_rhf_rcv_functions[8] = {
- [RHF_RCV_TYPE_EXPECTED] = snoop_recv_handler,
- [RHF_RCV_TYPE_EAGER] = snoop_recv_handler,
- [RHF_RCV_TYPE_IB] = snoop_recv_handler,
- [RHF_RCV_TYPE_ERROR] = snoop_recv_handler,
- [RHF_RCV_TYPE_BYPASS] = snoop_recv_handler,
- [RHF_RCV_TYPE_INVALID5] = process_receive_invalid,
- [RHF_RCV_TYPE_INVALID6] = process_receive_invalid,
- [RHF_RCV_TYPE_INVALID7] = process_receive_invalid
-};
-
-/* Snoop packet structure */
-struct snoop_packet {
- struct list_head list;
- u32 total_len;
- u8 data[];
-};
-
-/* Do not make these an enum or it will blow up the capture_md */
-#define PKT_DIR_EGRESS 0x0
-#define PKT_DIR_INGRESS 0x1
-
-/* Packet capture metadata returned to the user with the packet. */
-struct capture_md {
- u8 port;
- u8 dir;
- u8 reserved[6];
- union {
- u64 pbc;
- u64 rhf;
- } u;
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev diagpkt_cdev;
-static struct device *diagpkt_device;
-
-static ssize_t diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
- .owner = THIS_MODULE,
- .write = diagpkt_write,
- .llseek = noop_llseek,
-};
-
-/*
- * This is used for communication with user space for snoop extended IOCTLs
- */
-struct hfi1_link_info {
- __be64 node_guid;
- u8 port_mode;
- u8 port_state;
- u16 link_speed_active;
- u16 link_width_active;
- u16 vl15_init;
- u8 port_number;
- /*
- * Add padding to make this a full IB SMP payload. Note: changing the
- * size of this structure will make the IOCTLs created with _IOWR
- * change.
- * Be sure to run tests on all IOCTLs when making changes to this
- * structure.
- */
- u8 res[47];
-};
-
-/*
- * This starts our ioctl sequence numbers *way* off from the ones
- * defined in ib_core.
- */
-#define SNOOP_CAPTURE_VERSION 0x1
-
-#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl-number.txt */
-#define HFI1_SNOOP_IOC_MAGIC IB_IOCTL_MAGIC
-#define HFI1_SNOOP_IOC_BASE_SEQ 0x80
-
-#define HFI1_SNOOP_IOCGETLINKSTATE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ)
-#define HFI1_SNOOP_IOCSETLINKSTATE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 1)
-#define HFI1_SNOOP_IOCCLEARQUEUE \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 2)
-#define HFI1_SNOOP_IOCCLEARFILTER \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 3)
-#define HFI1_SNOOP_IOCSETFILTER \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 4)
-#define HFI1_SNOOP_IOCGETVERSION \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 5)
-#define HFI1_SNOOP_IOCSET_OPTS \
- _IO(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6)
-
-/*
- * These offsets +6/+7 could change, but these are already known and used
- * IOCTL numbers so don't change them without a good reason.
- */
-#define HFI1_SNOOP_IOCGETLINKSTATE_EXTRA \
- _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 6, \
- struct hfi1_link_info)
-#define HFI1_SNOOP_IOCSETLINKSTATE_EXTRA \
- _IOWR(HFI1_SNOOP_IOC_MAGIC, HFI1_SNOOP_IOC_BASE_SEQ + 7, \
- struct hfi1_link_info)
-
-static int hfi1_snoop_open(struct inode *in, struct file *fp);
-static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
- size_t pkt_len, loff_t *off);
-static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off);
-static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
-static unsigned int hfi1_snoop_poll(struct file *fp,
- struct poll_table_struct *wait);
-static int hfi1_snoop_release(struct inode *in, struct file *fp);
-
-struct hfi1_packet_filter_command {
- int opcode;
- int length;
- void *value_ptr;
-};
-
-/* Can't re-use PKT_DIR_*GRESS here because 0 means no packets for this */
-#define HFI1_SNOOP_INGRESS 0x1
-#define HFI1_SNOOP_EGRESS 0x2
-
-enum hfi1_packet_filter_opcodes {
- FILTER_BY_LID,
- FILTER_BY_DLID,
- FILTER_BY_MAD_MGMT_CLASS,
- FILTER_BY_QP_NUMBER,
- FILTER_BY_PKT_TYPE,
- FILTER_BY_SERVICE_LEVEL,
- FILTER_BY_PKEY,
- FILTER_BY_DIRECTION,
-};
-
-static const struct file_operations snoop_file_ops = {
- .owner = THIS_MODULE,
- .open = hfi1_snoop_open,
- .read = hfi1_snoop_read,
- .unlocked_ioctl = hfi1_ioctl,
- .poll = hfi1_snoop_poll,
- .write = hfi1_snoop_write,
- .release = hfi1_snoop_release
-};
-
-struct hfi1_filter_array {
- int (*filter)(void *, void *, void *);
-};
-
-static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
- void *value);
-static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
- void *value);
-static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
- void *value);
-static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value);
-static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value);
-
-static const struct hfi1_filter_array hfi1_filters[] = {
- { hfi1_filter_lid },
- { hfi1_filter_dlid },
- { hfi1_filter_mad_mgmt_class },
- { hfi1_filter_qp_number },
- { hfi1_filter_ibpacket_type },
- { hfi1_filter_ib_service_level },
- { hfi1_filter_ib_pkey },
- { hfi1_filter_direction },
-};
-
-#define HFI1_MAX_FILTERS ARRAY_SIZE(hfi1_filters)
-#define HFI1_DIAG_MINOR_BASE 129
-
-static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name);
-
-int hfi1_diag_add(struct hfi1_devdata *dd)
-{
- char name[16];
- int ret = 0;
-
- snprintf(name, sizeof(name), "%s_diagpkt%d", class_name(),
- dd->unit);
- /*
- * Do this for each device as opposed to the normal diagpkt
- * interface which is one per host
- */
- ret = hfi1_snoop_add(dd, name);
- if (ret)
- dd_dev_err(dd, "Unable to init snoop/capture device");
-
- snprintf(name, sizeof(name), "%s_diagpkt", class_name());
- if (atomic_inc_return(&diagpkt_count) == 1) {
- ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name,
- &diagpkt_file_ops, &diagpkt_cdev,
- &diagpkt_device, false);
- }
-
- return ret;
-}
-
-/* this must be called w/ dd->snoop_in_lock held */
-static void drain_snoop_list(struct list_head *queue)
-{
- struct list_head *pos, *q;
- struct snoop_packet *packet;
-
- list_for_each_safe(pos, q, queue) {
- packet = list_entry(pos, struct snoop_packet, list);
- list_del(pos);
- kfree(packet);
- }
-}
-
-static void hfi1_snoop_remove(struct hfi1_devdata *dd)
-{
- unsigned long flags = 0;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- drain_snoop_list(&dd->hfi1_snoop.queue);
- hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev);
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-}
-
-void hfi1_diag_remove(struct hfi1_devdata *dd)
-{
- hfi1_snoop_remove(dd);
- if (atomic_dec_and_test(&diagpkt_count))
- hfi1_cdev_cleanup(&diagpkt_cdev, &diagpkt_device);
- hfi1_cdev_cleanup(&dd->diag_cdev, &dd->diag_device);
-}
-
-/*
- * Allocated structure shared between the credit return mechanism and
- * diagpkt_send().
- */
-struct diagpkt_wait {
- struct completion credits_returned;
- int code;
- atomic_t count;
-};
-
-/*
- * When each side is finished with the structure, they call this.
- * The last user frees the structure.
- */
-static void put_diagpkt_wait(struct diagpkt_wait *wait)
-{
- if (atomic_dec_and_test(&wait->count))
- kfree(wait);
-}
-
-/*
- * Callback from the credit return code. Set the complete, which
- * will let diapkt_send() continue.
- */
-static void diagpkt_complete(void *arg, int code)
-{
- struct diagpkt_wait *wait = (struct diagpkt_wait *)arg;
-
- wait->code = code;
- complete(&wait->credits_returned);
- put_diagpkt_wait(wait); /* finished with the structure */
-}
-
-/**
- * diagpkt_send - send a packet
- * @dp: diag packet descriptor
- */
-static ssize_t diagpkt_send(struct diag_pkt *dp)
-{
- struct hfi1_devdata *dd;
- struct send_context *sc;
- struct pio_buf *pbuf;
- u32 *tmpbuf = NULL;
- ssize_t ret = 0;
- u32 pkt_len, total_len;
- pio_release_cb credit_cb = NULL;
- void *credit_arg = NULL;
- struct diagpkt_wait *wait = NULL;
- int trycount = 0;
-
- dd = hfi1_lookup(dp->unit);
- if (!dd || !(dd->flags & HFI1_PRESENT) || !dd->kregbase) {
- ret = -ENODEV;
- goto bail;
- }
- if (!(dd->flags & HFI1_INITTED)) {
- /* no hardware, freeze, etc. */
- ret = -ENODEV;
- goto bail;
- }
-
- if (dp->version != _DIAG_PKT_VERS) {
- dd_dev_err(dd, "Invalid version %u for diagpkt_write\n",
- dp->version);
- ret = -EINVAL;
- goto bail;
- }
-
- /* send count must be an exact number of dwords */
- if (dp->len & 3) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* there is only port 1 */
- if (dp->port != 1) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* need a valid context */
- if (dp->sw_index >= dd->num_send_contexts) {
- ret = -EINVAL;
- goto bail;
- }
- /* can only use kernel contexts */
- if (dd->send_contexts[dp->sw_index].type != SC_KERNEL &&
- dd->send_contexts[dp->sw_index].type != SC_VL15) {
- ret = -EINVAL;
- goto bail;
- }
- /* must be allocated */
- sc = dd->send_contexts[dp->sw_index].sc;
- if (!sc) {
- ret = -EINVAL;
- goto bail;
- }
- /* must be enabled */
- if (!(sc->flags & SCF_ENABLED)) {
- ret = -EINVAL;
- goto bail;
- }
-
- /* allocate a buffer and copy the data in */
- tmpbuf = vmalloc(dp->len);
- if (!tmpbuf) {
- ret = -ENOMEM;
- goto bail;
- }
-
- if (copy_from_user(tmpbuf,
- (const void __user *)(unsigned long)dp->data,
- dp->len)) {
- ret = -EFAULT;
- goto bail;
- }
-
- /*
- * pkt_len is how much data we have to write, includes header and data.
- * total_len is length of the packet in Dwords plus the PBC should not
- * include the CRC.
- */
- pkt_len = dp->len >> 2;
- total_len = pkt_len + 2; /* PBC + packet */
-
- /* if 0, fill in a default */
- if (dp->pbc == 0) {
- struct hfi1_pportdata *ppd = dd->pport;
-
- hfi1_cdbg(PKT, "Generating PBC");
- dp->pbc = create_pbc(ppd, 0, 0, 0, total_len);
- } else {
- hfi1_cdbg(PKT, "Using passed in PBC");
- }
-
- hfi1_cdbg(PKT, "Egress PBC content is 0x%llx", dp->pbc);
-
- /*
- * The caller wants to wait until the packet is sent and to
- * check for errors. The best we can do is wait until
- * the buffer credits are returned and check if any packet
- * error has occurred. If there are any late errors, this
- * could miss it. If there are other senders who generate
- * an error, this may find it. However, in general, it
- * should catch most.
- */
- if (dp->flags & F_DIAGPKT_WAIT) {
- /* always force a credit return */
- dp->pbc |= PBC_CREDIT_RETURN;
- /* turn on credit return interrupts */
- sc_add_credit_return_intr(sc);
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- ret = -ENOMEM;
- goto bail;
- }
- init_completion(&wait->credits_returned);
- atomic_set(&wait->count, 2);
- wait->code = PRC_OK;
-
- credit_cb = diagpkt_complete;
- credit_arg = wait;
- }
-
-retry:
- pbuf = sc_buffer_alloc(sc, total_len, credit_cb, credit_arg);
- if (!pbuf) {
- if (trycount == 0) {
- /* force a credit return and try again */
- sc_return_credits(sc);
- trycount = 1;
- goto retry;
- }
- /*
- * No send buffer means no credit callback. Undo
- * the wait set-up that was done above. We free wait
- * because the callback will never be called.
- */
- if (dp->flags & F_DIAGPKT_WAIT) {
- sc_del_credit_return_intr(sc);
- kfree(wait);
- wait = NULL;
- }
- ret = -ENOSPC;
- goto bail;
- }
-
- pio_copy(dd, pbuf, dp->pbc, tmpbuf, pkt_len);
- /* no flush needed as the HW knows the packet size */
-
- ret = sizeof(*dp);
-
- if (dp->flags & F_DIAGPKT_WAIT) {
- /* wait for credit return */
- ret = wait_for_completion_interruptible(
- &wait->credits_returned);
- /*
- * If the wait returns an error, the wait was interrupted,
- * e.g. with a ^C in the user program. The callback is
- * still pending. This is OK as the wait structure is
- * kmalloc'ed and the structure will free itself when
- * all users are done with it.
- *
- * A context disable occurs on a send context restart, so
- * include that in the list of errors below to check for.
- * NOTE: PRC_FILL_ERR is at best informational and cannot
- * be depended on.
- */
- if (!ret && (((wait->code & PRC_STATUS_ERR) ||
- (wait->code & PRC_FILL_ERR) ||
- (wait->code & PRC_SC_DISABLE))))
- ret = -EIO;
-
- put_diagpkt_wait(wait); /* finished with the structure */
- sc_del_credit_return_intr(sc);
- }
-
-bail:
- vfree(tmpbuf);
- return ret;
-}
-
-static ssize_t diagpkt_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- struct hfi1_devdata *dd;
- struct send_context *sc;
- u8 vl;
-
- struct diag_pkt dp;
-
- if (count != sizeof(dp))
- return -EINVAL;
-
- if (copy_from_user(&dp, data, sizeof(dp)))
- return -EFAULT;
-
- /*
- * The Send Context is derived from the PbcVL value
- * if PBC is populated
- */
- if (dp.pbc) {
- dd = hfi1_lookup(dp.unit);
- if (!dd)
- return -ENODEV;
- vl = (dp.pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
- sc = dd->vld[vl].sc;
- if (sc) {
- dp.sw_index = sc->sw_index;
- hfi1_cdbg(
- PKT,
- "Packet sent over VL %d via Send Context %u(%u)",
- vl, sc->sw_index, sc->hw_context);
- }
- }
-
- return diagpkt_send(&dp);
-}
-
-static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name)
-{
- int ret = 0;
-
- dd->hfi1_snoop.mode_flag = 0;
- spin_lock_init(&dd->hfi1_snoop.snoop_lock);
- INIT_LIST_HEAD(&dd->hfi1_snoop.queue);
- init_waitqueue_head(&dd->hfi1_snoop.waitq);
-
- ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name,
- &snoop_file_ops,
- &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev,
- false);
-
- if (ret) {
- dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
- hfi1_cdev_cleanup(&dd->hfi1_snoop.cdev,
- &dd->hfi1_snoop.class_dev);
- }
-
- return ret;
-}
-
-static struct hfi1_devdata *hfi1_dd_from_sc_inode(struct inode *in)
-{
- int unit = iminor(in) - HFI1_SNOOP_CAPTURE_BASE;
- struct hfi1_devdata *dd;
-
- dd = hfi1_lookup(unit);
- return dd;
-}
-
-/* clear or restore send context integrity checks */
-static void adjust_integrity_checks(struct hfi1_devdata *dd)
-{
- struct send_context *sc;
- unsigned long sc_flags;
- int i;
-
- spin_lock_irqsave(&dd->sc_lock, sc_flags);
- for (i = 0; i < dd->num_send_contexts; i++) {
- int enable;
-
- sc = dd->send_contexts[i].sc;
-
- if (!sc)
- continue; /* not allocated */
-
- enable = likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) &&
- dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE;
-
- set_pio_integrity(sc);
-
- if (enable) /* take HFI_CAP_* flags into account */
- hfi1_init_ctxt(sc);
- }
- spin_unlock_irqrestore(&dd->sc_lock, sc_flags);
-}
-
-static int hfi1_snoop_open(struct inode *in, struct file *fp)
-{
- int ret;
- int mode_flag = 0;
- unsigned long flags = 0;
- struct hfi1_devdata *dd;
- struct list_head *queue;
-
- mutex_lock(&hfi1_mutex);
-
- dd = hfi1_dd_from_sc_inode(in);
- if (!dd) {
- ret = -ENODEV;
- goto bail;
- }
-
- /*
- * File mode determines snoop or capture. Some existing user
- * applications expect the capture device to be able to be opened RDWR
- * because they expect a dedicated capture device. For this reason we
- * support a module param to force capture mode even if the file open
- * mode matches snoop.
- */
- if ((fp->f_flags & O_ACCMODE) == O_RDONLY) {
- snoop_dbg("Capture Enabled");
- mode_flag = HFI1_PORT_CAPTURE_MODE;
- } else if ((fp->f_flags & O_ACCMODE) == O_RDWR) {
- snoop_dbg("Snoop Enabled");
- mode_flag = HFI1_PORT_SNOOP_MODE;
- } else {
- snoop_dbg("Invalid");
- ret = -EINVAL;
- goto bail;
- }
- queue = &dd->hfi1_snoop.queue;
-
- /*
- * We are not supporting snoop and capture at the same time.
- */
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- if (dd->hfi1_snoop.mode_flag) {
- ret = -EBUSY;
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- goto bail;
- }
-
- dd->hfi1_snoop.mode_flag = mode_flag;
- drain_snoop_list(queue);
-
- dd->hfi1_snoop.filter_callback = NULL;
- dd->hfi1_snoop.filter_value = NULL;
-
- /*
- * Send side packet integrity checks are not helpful when snooping so
- * disable and re-enable when we stop snooping.
- */
- if (mode_flag == HFI1_PORT_SNOOP_MODE) {
- /* clear after snoop mode is on */
- adjust_integrity_checks(dd); /* clear */
-
- /*
- * We also do not want to be doing the DLID LMC check for
- * ingressed packets.
- */
- dd->hfi1_snoop.dcc_cfg = read_csr(dd, DCC_CFG_PORT_CONFIG1);
- write_csr(dd, DCC_CFG_PORT_CONFIG1,
- (dd->hfi1_snoop.dcc_cfg >> 32) << 32);
- }
-
- /*
- * As soon as we set these function pointers the recv and send handlers
- * are active. This is a race condition so we must make sure to drain
- * the queue and init filter values above. Technically we should add
- * locking here but all that will happen is on recv a packet will get
- * allocated and get stuck on the snoop_lock before getting added to the
- * queue. Same goes for send.
- */
- dd->rhf_rcv_function_map = snoop_rhf_rcv_functions;
- dd->process_pio_send = snoop_send_pio_handler;
- dd->process_dma_send = snoop_send_pio_handler;
- dd->pio_inline_send = snoop_inline_pio_send;
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- ret = 0;
-
-bail:
- mutex_unlock(&hfi1_mutex);
-
- return ret;
-}
-
-static int hfi1_snoop_release(struct inode *in, struct file *fp)
-{
- unsigned long flags = 0;
- struct hfi1_devdata *dd;
- int mode_flag;
-
- dd = hfi1_dd_from_sc_inode(in);
- if (!dd)
- return -ENODEV;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
- /* clear the snoop mode before re-adjusting send context CSRs */
- mode_flag = dd->hfi1_snoop.mode_flag;
- dd->hfi1_snoop.mode_flag = 0;
-
- /*
- * Drain the queue and clear the filters we are done with it. Don't
- * forget to restore the packet integrity checks
- */
- drain_snoop_list(&dd->hfi1_snoop.queue);
- if (mode_flag == HFI1_PORT_SNOOP_MODE) {
- /* restore after snoop mode is clear */
- adjust_integrity_checks(dd); /* restore */
-
- /*
- * Also should probably reset the DCC_CONFIG1 register for DLID
- * checking on incoming packets again. Use the value saved when
- * opening the snoop device.
- */
- write_csr(dd, DCC_CFG_PORT_CONFIG1, dd->hfi1_snoop.dcc_cfg);
- }
-
- dd->hfi1_snoop.filter_callback = NULL;
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = NULL;
-
- /*
- * User is done snooping and capturing, return control to the normal
- * handler. Re-enable SDMA handling.
- */
- dd->rhf_rcv_function_map = dd->normal_rhf_rcv_functions;
- dd->process_pio_send = hfi1_verbs_send_pio;
- dd->process_dma_send = hfi1_verbs_send_dma;
- dd->pio_inline_send = pio_copy;
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-
- snoop_dbg("snoop/capture device released");
-
- return 0;
-}
-
-static unsigned int hfi1_snoop_poll(struct file *fp,
- struct poll_table_struct *wait)
-{
- int ret = 0;
- unsigned long flags = 0;
-
- struct hfi1_devdata *dd;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
- poll_wait(fp, &dd->hfi1_snoop.waitq, wait);
- if (!list_empty(&dd->hfi1_snoop.queue))
- ret |= POLLIN | POLLRDNORM;
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- return ret;
-}
-
-static ssize_t hfi1_snoop_write(struct file *fp, const char __user *data,
- size_t count, loff_t *off)
-{
- struct diag_pkt dpkt;
- struct hfi1_devdata *dd;
- size_t ret;
- u8 byte_two, sl, sc5, sc4, vl, byte_one;
- struct send_context *sc;
- u32 len;
- u64 pbc;
- struct hfi1_ibport *ibp;
- struct hfi1_pportdata *ppd;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- ppd = dd->pport;
- snoop_dbg("received %lu bytes from user", count);
-
- memset(&dpkt, 0, sizeof(struct diag_pkt));
- dpkt.version = _DIAG_PKT_VERS;
- dpkt.unit = dd->unit;
- dpkt.port = 1;
-
- if (likely(!(snoop_flags & SNOOP_USE_METADATA))) {
- /*
- * We need to generate the PBC and not let diagpkt_send do it,
- * to do this we need the VL and the length in dwords.
- * The VL can be determined by using the SL and looking up the
- * SC. Then the SC can be converted into VL. The exception to
- * this is those packets which are from an SMI queue pair.
- * Since we can't detect anything about the QP here we have to
- * rely on the SC. If its 0xF then we assume its SMI and
- * do not look at the SL.
- */
- if (copy_from_user(&byte_one, data, 1))
- return -EINVAL;
-
- if (copy_from_user(&byte_two, data + 1, 1))
- return -EINVAL;
-
- sc4 = (byte_one >> 4) & 0xf;
- if (sc4 == 0xF) {
- snoop_dbg("Detected VL15 packet ignoring SL in packet");
- vl = sc4;
- } else {
- sl = (byte_two >> 4) & 0xf;
- ibp = to_iport(&dd->verbs_dev.rdi.ibdev, 1);
- sc5 = ibp->sl_to_sc[sl];
- vl = sc_to_vlt(dd, sc5);
- if (vl != sc4) {
- snoop_dbg("VL %d does not match SC %d of packet",
- vl, sc4);
- return -EINVAL;
- }
- }
-
- sc = dd->vld[vl].sc; /* Look up the context based on VL */
- if (sc) {
- dpkt.sw_index = sc->sw_index;
- snoop_dbg("Sending on context %u(%u)", sc->sw_index,
- sc->hw_context);
- } else {
- snoop_dbg("Could not find context for vl %d", vl);
- return -EINVAL;
- }
-
- len = (count >> 2) + 2; /* Add in PBC */
- pbc = create_pbc(ppd, 0, 0, vl, len);
- } else {
- if (copy_from_user(&pbc, data, sizeof(pbc)))
- return -EINVAL;
- vl = (pbc >> PBC_VL_SHIFT) & PBC_VL_MASK;
- sc = dd->vld[vl].sc; /* Look up the context based on VL */
- if (sc) {
- dpkt.sw_index = sc->sw_index;
- } else {
- snoop_dbg("Could not find context for vl %d", vl);
- return -EINVAL;
- }
- data += sizeof(pbc);
- count -= sizeof(pbc);
- }
- dpkt.len = count;
- dpkt.data = (unsigned long)data;
-
- snoop_dbg("PBC: vl=0x%llx Length=0x%llx",
- (pbc >> 12) & 0xf,
- (pbc & 0xfff));
-
- dpkt.pbc = pbc;
- ret = diagpkt_send(&dpkt);
- /*
- * diagpkt_send only returns number of bytes in the diagpkt so patch
- * that up here before returning.
- */
- if (ret == sizeof(dpkt))
- return count;
-
- return ret;
-}
-
-static ssize_t hfi1_snoop_read(struct file *fp, char __user *data,
- size_t pkt_len, loff_t *off)
-{
- ssize_t ret = 0;
- unsigned long flags = 0;
- struct snoop_packet *packet = NULL;
- struct hfi1_devdata *dd;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
-
- while (list_empty(&dd->hfi1_snoop.queue)) {
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
-
- if (fp->f_flags & O_NONBLOCK)
- return -EAGAIN;
-
- if (wait_event_interruptible(
- dd->hfi1_snoop.waitq,
- !list_empty(&dd->hfi1_snoop.queue)))
- return -EINTR;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- }
-
- if (!list_empty(&dd->hfi1_snoop.queue)) {
- packet = list_entry(dd->hfi1_snoop.queue.next,
- struct snoop_packet, list);
- list_del(&packet->list);
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- if (pkt_len >= packet->total_len) {
- if (copy_to_user(data, packet->data,
- packet->total_len))
- ret = -EFAULT;
- else
- ret = packet->total_len;
- } else {
- ret = -EINVAL;
- }
-
- kfree(packet);
- } else {
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- }
-
- return ret;
-}
-
-/**
- * hfi1_assign_snoop_link_credits -- Set up credits for VL15 and others
- * @ppd : ptr to hfi1 port data
- * @value : options from user space
- *
- * Assumes the rest of the CM credit registers are zero from a
- * previous global or credit reset.
- * Leave shared count at zero for both global and all vls.
- * In snoop mode ideally we don't use shared credits
- * Reserve 8.5k for VL15
- * If total credits less than 8.5kbytes return error.
- * Divide the rest of the credits across VL0 to VL7 and if
- * each of these levels has less than 34 credits (at least 2048 + 128 bytes)
- * return with an error.
- * The credit registers will be reset to zero on link negotiation or link up
- * so this function should be activated from user space only if the port has
- * gone past link negotiation and link up.
- *
- * Return -- 0 if successful else error condition
- *
- */
-static long hfi1_assign_snoop_link_credits(struct hfi1_pportdata *ppd,
- int value)
-{
-#define OPA_MIN_PER_VL_CREDITS 34 /* 2048 + 128 bytes */
- struct buffer_control t;
- int i;
- struct hfi1_devdata *dd = ppd->dd;
- u16 total_credits = (value >> 16) & 0xffff;
- u16 vl15_credits = dd->vl15_init / 2;
- u16 per_vl_credits;
- __be16 be_per_vl_credits;
-
- if (!(ppd->host_link_state & HLS_UP))
- goto err_exit;
- if (total_credits < vl15_credits)
- goto err_exit;
-
- per_vl_credits = (total_credits - vl15_credits) / TXE_NUM_DATA_VL;
-
- if (per_vl_credits < OPA_MIN_PER_VL_CREDITS)
- goto err_exit;
-
- memset(&t, 0, sizeof(t));
- be_per_vl_credits = cpu_to_be16(per_vl_credits);
-
- for (i = 0; i < TXE_NUM_DATA_VL; i++)
- t.vl[i].dedicated = be_per_vl_credits;
-
- t.vl[15].dedicated = cpu_to_be16(vl15_credits);
- return set_buffer_control(ppd, &t);
-
-err_exit:
- snoop_dbg("port_state = 0x%x, total_credits = %d, vl15_credits = %d",
- ppd->host_link_state, total_credits, vl15_credits);
-
- return -EINVAL;
-}
-
-static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
-{
- struct hfi1_devdata *dd;
- void *filter_value = NULL;
- long ret = 0;
- int value = 0;
- u8 phys_state = 0;
- u8 link_state = 0;
- u16 dev_state = 0;
- unsigned long flags = 0;
- unsigned long *argp = NULL;
- struct hfi1_packet_filter_command filter_cmd = {0};
- int mode_flag = 0;
- struct hfi1_pportdata *ppd = NULL;
- unsigned int index;
- struct hfi1_link_info link_info;
- int read_cmd, write_cmd, read_ok, write_ok;
-
- dd = hfi1_dd_from_sc_inode(fp->f_inode);
- if (!dd)
- return -ENODEV;
-
- mode_flag = dd->hfi1_snoop.mode_flag;
- read_cmd = _IOC_DIR(cmd) & _IOC_READ;
- write_cmd = _IOC_DIR(cmd) & _IOC_WRITE;
- write_ok = access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd));
- read_ok = access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd));
-
- if ((read_cmd && !write_ok) || (write_cmd && !read_ok))
- return -EFAULT;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- if ((mode_flag & HFI1_PORT_CAPTURE_MODE) &&
- (cmd != HFI1_SNOOP_IOCCLEARQUEUE) &&
- (cmd != HFI1_SNOOP_IOCCLEARFILTER) &&
- (cmd != HFI1_SNOOP_IOCSETFILTER))
- /* Capture devices are allowed only 3 operations
- * 1.Clear capture queue
- * 2.Clear capture filter
- * 3.Set capture filter
- * Other are invalid.
- */
- return -EINVAL;
-
- switch (cmd) {
- case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
- memset(&link_info, 0, sizeof(link_info));
-
- if (copy_from_user(&link_info,
- (struct hfi1_link_info __user *)arg,
- sizeof(link_info)))
- return -EFAULT;
-
- value = link_info.port_state;
- index = link_info.port_number;
- if (index > dd->num_pports - 1)
- return -EINVAL;
-
- ppd = &dd->pport[index];
- if (!ppd)
- return -EINVAL;
-
- /* What we want to transition to */
- phys_state = (value >> 4) & 0xF;
- link_state = value & 0xF;
- snoop_dbg("Setting link state 0x%x", value);
-
- switch (link_state) {
- case IB_PORT_NOP:
- if (phys_state == 0)
- break;
- /* fall through */
- case IB_PORT_DOWN:
- switch (phys_state) {
- case 0:
- dev_state = HLS_DN_DOWNDEF;
- break;
- case 2:
- dev_state = HLS_DN_POLL;
- break;
- case 3:
- dev_state = HLS_DN_DISABLE;
- break;
- default:
- return -EINVAL;
- }
- ret = set_link_state(ppd, dev_state);
- break;
- case IB_PORT_ARMED:
- ret = set_link_state(ppd, HLS_UP_ARMED);
- if (!ret)
- send_idle_sma(dd, SMA_IDLE_ARM);
- break;
- case IB_PORT_ACTIVE:
- ret = set_link_state(ppd, HLS_UP_ACTIVE);
- if (!ret)
- send_idle_sma(dd, SMA_IDLE_ACTIVE);
- break;
- default:
- return -EINVAL;
- }
-
- if (ret)
- break;
- /* fall through */
- case HFI1_SNOOP_IOCGETLINKSTATE:
- case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
- if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
- memset(&link_info, 0, sizeof(link_info));
- if (copy_from_user(&link_info,
- (struct hfi1_link_info __user *)arg,
- sizeof(link_info)))
- return -EFAULT;
- index = link_info.port_number;
- } else {
- ret = __get_user(index, (int __user *)arg);
- if (ret != 0)
- break;
- }
-
- if (index > dd->num_pports - 1)
- return -EINVAL;
-
- ppd = &dd->pport[index];
- if (!ppd)
- return -EINVAL;
-
- value = hfi1_ibphys_portstate(ppd);
- value <<= 4;
- value |= driver_lstate(ppd);
-
- snoop_dbg("Link port | Link State: %d", value);
-
- if ((cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) ||
- (cmd == HFI1_SNOOP_IOCSETLINKSTATE_EXTRA)) {
- link_info.port_state = value;
- link_info.node_guid = cpu_to_be64(ppd->guid);
- link_info.link_speed_active =
- ppd->link_speed_active;
- link_info.link_width_active =
- ppd->link_width_active;
- if (copy_to_user((struct hfi1_link_info __user *)arg,
- &link_info, sizeof(link_info)))
- return -EFAULT;
- } else {
- ret = __put_user(value, (int __user *)arg);
- }
- break;
-
- case HFI1_SNOOP_IOCCLEARQUEUE:
- snoop_dbg("Clearing snoop queue");
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- drain_snoop_list(&dd->hfi1_snoop.queue);
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- break;
-
- case HFI1_SNOOP_IOCCLEARFILTER:
- snoop_dbg("Clearing filter");
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- if (dd->hfi1_snoop.filter_callback) {
- /* Drain packets first */
- drain_snoop_list(&dd->hfi1_snoop.queue);
- dd->hfi1_snoop.filter_callback = NULL;
- }
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = NULL;
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- break;
-
- case HFI1_SNOOP_IOCSETFILTER:
- snoop_dbg("Setting filter");
- /* just copy command structure */
- argp = (unsigned long *)arg;
- if (copy_from_user(&filter_cmd, (void __user *)argp,
- sizeof(filter_cmd)))
- return -EFAULT;
-
- if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
- pr_alert("Invalid opcode in request\n");
- return -EINVAL;
- }
-
- snoop_dbg("Opcode %d Len %d Ptr %p",
- filter_cmd.opcode, filter_cmd.length,
- filter_cmd.value_ptr);
-
- filter_value = kcalloc(filter_cmd.length, sizeof(u8),
- GFP_KERNEL);
- if (!filter_value)
- return -ENOMEM;
-
- /* copy remaining data from userspace */
- if (copy_from_user((u8 *)filter_value,
- (void __user *)filter_cmd.value_ptr,
- filter_cmd.length)) {
- kfree(filter_value);
- return -EFAULT;
- }
- /* Drain packets first */
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- drain_snoop_list(&dd->hfi1_snoop.queue);
- dd->hfi1_snoop.filter_callback =
- hfi1_filters[filter_cmd.opcode].filter;
- /* just in case we see back to back sets */
- kfree(dd->hfi1_snoop.filter_value);
- dd->hfi1_snoop.filter_value = filter_value;
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- break;
- case HFI1_SNOOP_IOCGETVERSION:
- value = SNOOP_CAPTURE_VERSION;
- snoop_dbg("Getting version: %d", value);
- ret = __put_user(value, (int __user *)arg);
- break;
- case HFI1_SNOOP_IOCSET_OPTS:
- snoop_flags = 0;
- ret = __get_user(value, (int __user *)arg);
- if (ret != 0)
- break;
-
- snoop_dbg("Setting snoop option %d", value);
- if (value & SNOOP_DROP_SEND)
- snoop_flags |= SNOOP_DROP_SEND;
- if (value & SNOOP_USE_METADATA)
- snoop_flags |= SNOOP_USE_METADATA;
- if (value & (SNOOP_SET_VL0TOVL15)) {
- ppd = &dd->pport[0]; /* first port will do */
- ret = hfi1_assign_snoop_link_credits(ppd, value);
- }
- break;
- default:
- return -ENOTTY;
- }
-
- return ret;
-}
-
-static void snoop_list_add_tail(struct snoop_packet *packet,
- struct hfi1_devdata *dd)
-{
- unsigned long flags = 0;
-
- spin_lock_irqsave(&dd->hfi1_snoop.snoop_lock, flags);
- if (likely((dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) ||
- (dd->hfi1_snoop.mode_flag & HFI1_PORT_CAPTURE_MODE))) {
- list_add_tail(&packet->list, &dd->hfi1_snoop.queue);
- snoop_dbg("Added packet to list");
- }
-
- /*
- * Technically we can could have closed the snoop device while waiting
- * on the above lock and it is gone now. The snoop mode_flag will
- * prevent us from adding the packet to the queue though.
- */
-
- spin_unlock_irqrestore(&dd->hfi1_snoop.snoop_lock, flags);
- wake_up_interruptible(&dd->hfi1_snoop.waitq);
-}
-
-static inline int hfi1_filter_check(void *val, const char *msg)
-{
- if (!val) {
- snoop_dbg("Error invalid %s value for filter", msg);
- return HFI1_FILTER_ERR;
- }
- return 0;
-}
-
-static int hfi1_filter_lid(void *ibhdr, void *packet_data, void *value)
-{
- struct hfi1_ib_header *hdr;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- if (*((u16 *)value) == be16_to_cpu(hdr->lrh[3])) /* matches slid */
- return HFI1_FILTER_HIT; /* matched */
-
- return HFI1_FILTER_MISS; /* Not matched */
-}
-
-static int hfi1_filter_dlid(void *ibhdr, void *packet_data, void *value)
-{
- struct hfi1_ib_header *hdr;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- if (*((u16 *)value) == be16_to_cpu(hdr->lrh[1]))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-/* Not valid for outgoing packets, send handler passes null for data*/
-static int hfi1_filter_mad_mgmt_class(void *ibhdr, void *packet_data,
- void *value)
-{
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- struct ib_smp *smp = NULL;
- u32 qpn = 0;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(packet_data, "packet_data");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- /* Check for GRH */
- if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
- else
- ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
-
- qpn = be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF;
- if (qpn <= 1) {
- smp = (struct ib_smp *)packet_data;
- if (*((u8 *)value) == smp->mgmt_class)
- return HFI1_FILTER_HIT;
- else
- return HFI1_FILTER_MISS;
- }
- return HFI1_FILTER_ERR;
-}
-
-static int hfi1_filter_qp_number(void *ibhdr, void *packet_data, void *value)
-{
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- /* Check for GRH */
- if ((be16_to_cpu(hdr->lrh[0]) & 3) == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth; /* LRH + BTH + DETH */
- else
- ohdr = &hdr->u.l.oth; /* LRH + GRH + BTH + DETH */
- if (*((u32 *)value) == (be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ibpacket_type(void *ibhdr, void *packet_data,
- void *value)
-{
- u32 lnh = 0;
- u8 opcode = 0;
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
-
- if (lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
- return HFI1_FILTER_ERR;
-
- opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-
- if (*((u8 *)value) == ((opcode >> 5) & 0x7))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ib_service_level(void *ibhdr, void *packet_data,
- void *value)
-{
- struct hfi1_ib_header *hdr;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- if ((*((u8 *)value)) == ((be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-static int hfi1_filter_ib_pkey(void *ibhdr, void *packet_data, void *value)
-{
- u32 lnh = 0;
- struct hfi1_ib_header *hdr;
- struct hfi1_other_headers *ohdr = NULL;
- int ret;
-
- ret = hfi1_filter_check(ibhdr, "header");
- if (ret)
- return ret;
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- hdr = (struct hfi1_ib_header *)ibhdr;
-
- lnh = (be16_to_cpu(hdr->lrh[0]) & 3);
- if (lnh == HFI1_LRH_BTH)
- ohdr = &hdr->u.oth;
- else if (lnh == HFI1_LRH_GRH)
- ohdr = &hdr->u.l.oth;
- else
- return HFI1_FILTER_ERR;
-
- /* P_key is 16-bit entity, however top most bit indicates
- * type of membership. 0 for limited and 1 for Full.
- * Limited members cannot accept information from other
- * Limited members, but communication is allowed between
- * every other combination of membership.
- * Hence we'll omit comparing top-most bit while filtering
- */
-
- if ((*(u16 *)value & 0x7FFF) ==
- ((be32_to_cpu(ohdr->bth[0])) & 0x7FFF))
- return HFI1_FILTER_HIT;
-
- return HFI1_FILTER_MISS;
-}
-
-/*
- * If packet_data is NULL then this is coming from one of the send functions.
- * Thus we know if its an ingressed or egressed packet.
- */
-static int hfi1_filter_direction(void *ibhdr, void *packet_data, void *value)
-{
- u8 user_dir = *(u8 *)value;
- int ret;
-
- ret = hfi1_filter_check(value, "user");
- if (ret)
- return ret;
-
- if (packet_data) {
- /* Incoming packet */
- if (user_dir & HFI1_SNOOP_INGRESS)
- return HFI1_FILTER_HIT;
- } else {
- /* Outgoing packet */
- if (user_dir & HFI1_SNOOP_EGRESS)
- return HFI1_FILTER_HIT;
- }
-
- return HFI1_FILTER_MISS;
-}
-
-/*
- * Allocate a snoop packet. The structure that is stored in the ring buffer, not
- * to be confused with an hfi packet type.
- */
-static struct snoop_packet *allocate_snoop_packet(u32 hdr_len,
- u32 data_len,
- u32 md_len)
-{
- struct snoop_packet *packet;
-
- packet = kzalloc(sizeof(*packet) + hdr_len + data_len
- + md_len,
- GFP_ATOMIC | __GFP_NOWARN);
- if (likely(packet))
- INIT_LIST_HEAD(&packet->list);
-
- return packet;
-}
-
-/*
- * Instead of having snoop and capture code intermixed with the recv functions,
- * both the interrupt handler and hfi1_ib_rcv() we are going to hijack the call
- * and land in here for snoop/capture but if not enabled the call will go
- * through as before. This gives us a single point to constrain all of the snoop
- * snoop recv logic. There is nothing special that needs to happen for bypass
- * packets. This routine should not try to look into the packet. It just copied
- * it. There is no guarantee for filters when it comes to bypass packets as
- * there is no specific support. Bottom line is this routine does now even know
- * what a bypass packet is.
- */
-int snoop_recv_handler(struct hfi1_packet *packet)
-{
- struct hfi1_pportdata *ppd = packet->rcd->ppd;
- struct hfi1_ib_header *hdr = packet->hdr;
- int header_size = packet->hlen;
- void *data = packet->ebuf;
- u32 tlen = packet->tlen;
- struct snoop_packet *s_packet = NULL;
- int ret;
- int snoop_mode = 0;
- u32 md_len = 0;
- struct capture_md md;
-
- snoop_dbg("PACKET IN: hdr size %d tlen %d data %p", header_size, tlen,
- data);
-
- trace_snoop_capture(ppd->dd, header_size, hdr, tlen - header_size,
- data);
-
- if (!ppd->dd->hfi1_snoop.filter_callback) {
- snoop_dbg("filter not set");
- ret = HFI1_FILTER_HIT;
- } else {
- ret = ppd->dd->hfi1_snoop.filter_callback(hdr, data,
- ppd->dd->hfi1_snoop.filter_value);
- }
-
- switch (ret) {
- case HFI1_FILTER_ERR:
- snoop_dbg("Error in filter call");
- break;
- case HFI1_FILTER_MISS:
- snoop_dbg("Filter Miss");
- break;
- case HFI1_FILTER_HIT:
-
- if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
- snoop_mode = 1;
- if ((snoop_mode == 0) ||
- unlikely(snoop_flags & SNOOP_USE_METADATA))
- md_len = sizeof(struct capture_md);
-
- s_packet = allocate_snoop_packet(header_size,
- tlen - header_size,
- md_len);
-
- if (unlikely(!s_packet)) {
- dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
- break;
- }
-
- if (md_len > 0) {
- memset(&md, 0, sizeof(struct capture_md));
- md.port = 1;
- md.dir = PKT_DIR_INGRESS;
- md.u.rhf = packet->rhf;
- memcpy(s_packet->data, &md, md_len);
- }
-
- /* We should always have a header */
- if (hdr) {
- memcpy(s_packet->data + md_len, hdr, header_size);
- } else {
- dd_dev_err(ppd->dd, "Unable to copy header to snoop/capture packet\n");
- kfree(s_packet);
- break;
- }
-
- /*
- * Packets with no data are possible. If there is no data needed
- * to take care of the last 4 bytes which are normally included
- * with data buffers and are included in tlen. Since we kzalloc
- * the buffer we do not need to set any values but if we decide
- * not to use kzalloc we should zero them.
- */
- if (data)
- memcpy(s_packet->data + header_size + md_len, data,
- tlen - header_size);
-
- s_packet->total_len = tlen + md_len;
- snoop_list_add_tail(s_packet, ppd->dd);
-
- /*
- * If we are snooping the packet not capturing then throw away
- * after adding to the list.
- */
- snoop_dbg("Capturing packet");
- if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE) {
- snoop_dbg("Throwing packet away");
- /*
- * If we are dropping the packet we still may need to
- * handle the case where error flags are set, this is
- * normally done by the type specific handler but that
- * won't be called in this case.
- */
- if (unlikely(rhf_err_flags(packet->rhf)))
- handle_eflags(packet);
-
- /* throw the packet on the floor */
- return RHF_RCV_CONTINUE;
- }
- break;
- default:
- break;
- }
-
- /*
- * We do not care what type of packet came in here - just pass it off
- * to the normal handler.
- */
- return ppd->dd->normal_rhf_rcv_functions[rhf_rcv_type(packet->rhf)]
- (packet);
-}
-
-/*
- * Handle snooping and capturing packets when sdma is being used.
- */
-int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc)
-{
- pr_alert("Snooping/Capture of Send DMA Packets Is Not Supported!\n");
- snoop_dbg("Unsupported Operation");
- return hfi1_verbs_send_dma(qp, ps, 0);
-}
-
-/*
- * Handle snooping and capturing packets when pio is being used. Does not handle
- * bypass packets. The only way to send a bypass packet currently is to use the
- * diagpkt interface. When that interface is enable snoop/capture is not.
- */
-int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
- u64 pbc)
-{
- u32 hdrwords = qp->s_hdrwords;
- struct rvt_sge_state *ss = qp->s_cur_sge;
- u32 len = qp->s_cur_size;
- u32 dwords = (len + 3) >> 2;
- u32 plen = hdrwords + dwords + 2; /* includes pbc */
- struct hfi1_pportdata *ppd = ps->ppd;
- struct snoop_packet *s_packet = NULL;
- u32 *hdr = (u32 *)&ps->s_txreq->phdr.hdr;
- u32 length = 0;
- struct rvt_sge_state temp_ss;
- void *data = NULL;
- void *data_start = NULL;
- int ret;
- int snoop_mode = 0;
- int md_len = 0;
- struct capture_md md;
- u32 vl;
- u32 hdr_len = hdrwords << 2;
- u32 tlen = HFI1_GET_PKT_LEN(&ps->s_txreq->phdr.hdr);
-
- md.u.pbc = 0;
-
- snoop_dbg("PACKET OUT: hdrword %u len %u plen %u dwords %u tlen %u",
- hdrwords, len, plen, dwords, tlen);
- if (ppd->dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
- snoop_mode = 1;
- if ((snoop_mode == 0) ||
- unlikely(snoop_flags & SNOOP_USE_METADATA))
- md_len = sizeof(struct capture_md);
-
- /* not using ss->total_len as arg 2 b/c that does not count CRC */
- s_packet = allocate_snoop_packet(hdr_len, tlen - hdr_len, md_len);
-
- if (unlikely(!s_packet)) {
- dd_dev_warn_ratelimited(ppd->dd, "Unable to allocate snoop/capture packet\n");
- goto out;
- }
-
- s_packet->total_len = tlen + md_len;
-
- if (md_len > 0) {
- memset(&md, 0, sizeof(struct capture_md));
- md.port = 1;
- md.dir = PKT_DIR_EGRESS;
- if (likely(pbc == 0)) {
- vl = be16_to_cpu(ps->s_txreq->phdr.hdr.lrh[0]) >> 12;
- md.u.pbc = create_pbc(ppd, 0, qp->s_srate, vl, plen);
- } else {
- md.u.pbc = 0;
- }
- memcpy(s_packet->data, &md, md_len);
- } else {
- md.u.pbc = pbc;
- }
-
- /* Copy header */
- if (likely(hdr)) {
- memcpy(s_packet->data + md_len, hdr, hdr_len);
- } else {
- dd_dev_err(ppd->dd,
- "Unable to copy header to snoop/capture packet\n");
- kfree(s_packet);
- goto out;
- }
-
- if (ss) {
- data = s_packet->data + hdr_len + md_len;
- data_start = data;
-
- /*
- * Copy SGE State
- * The update_sge() function below will not modify the
- * individual SGEs in the array. It will make a copy each time
- * and operate on that. So we only need to copy this instance
- * and it won't impact PIO.
- */
- temp_ss = *ss;
- length = len;
-
- snoop_dbg("Need to copy %d bytes", length);
- while (length) {
- void *addr = temp_ss.sge.vaddr;
- u32 slen = temp_ss.sge.length;
-
- if (slen > length) {
- slen = length;
- snoop_dbg("slen %d > len %d", slen, length);
- }
- snoop_dbg("copy %d to %p", slen, addr);
- memcpy(data, addr, slen);
- update_sge(&temp_ss, slen);
- length -= slen;
- data += slen;
- snoop_dbg("data is now %p bytes left %d", data, length);
- }
- snoop_dbg("Completed SGE copy");
- }
-
- /*
- * Why do the filter check down here? Because the event tracing has its
- * own filtering and we need to have the walked the SGE list.
- */
- if (!ppd->dd->hfi1_snoop.filter_callback) {
- snoop_dbg("filter not set\n");
- ret = HFI1_FILTER_HIT;
- } else {
- ret = ppd->dd->hfi1_snoop.filter_callback(
- &ps->s_txreq->phdr.hdr,
- NULL,
- ppd->dd->hfi1_snoop.filter_value);
- }
-
- switch (ret) {
- case HFI1_FILTER_ERR:
- snoop_dbg("Error in filter call");
- /* fall through */
- case HFI1_FILTER_MISS:
- snoop_dbg("Filter Miss");
- kfree(s_packet);
- break;
- case HFI1_FILTER_HIT:
- snoop_dbg("Capturing packet");
- snoop_list_add_tail(s_packet, ppd->dd);
-
- if (unlikely((snoop_flags & SNOOP_DROP_SEND) &&
- (ppd->dd->hfi1_snoop.mode_flag &
- HFI1_PORT_SNOOP_MODE))) {
- unsigned long flags;
-
- snoop_dbg("Dropping packet");
- if (qp->s_wqe) {
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(
- qp,
- qp->s_wqe,
- IB_WC_SUCCESS);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- } else if (qp->ibqp.qp_type == IB_QPT_RC) {
- spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_rc_send_complete(qp,
- &ps->s_txreq->phdr.hdr);
- spin_unlock_irqrestore(&qp->s_lock, flags);
- }
-
- /*
- * If snoop is dropping the packet we need to put the
- * txreq back because no one else will.
- */
- hfi1_put_txreq(ps->s_txreq);
- return 0;
- }
- break;
- default:
- kfree(s_packet);
- break;
- }
-out:
- return hfi1_verbs_send_pio(qp, ps, md.u.pbc);
-}
-
-/*
- * Callers of this must pass a hfi1_ib_header type for the from ptr. Currently
- * this can be used anywhere, but the intention is for inline ACKs for RC and
- * CCA packets. We don't restrict this usage though.
- */
-void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf,
- u64 pbc, const void *from, size_t count)
-{
- int snoop_mode = 0;
- int md_len = 0;
- struct capture_md md;
- struct snoop_packet *s_packet = NULL;
-
- /*
- * count is in dwords so we need to convert to bytes.
- * We also need to account for CRC which would be tacked on by hardware.
- */
- int packet_len = (count << 2) + 4;
- int ret;
-
- snoop_dbg("ACK OUT: len %d", packet_len);
-
- if (!dd->hfi1_snoop.filter_callback) {
- snoop_dbg("filter not set");
- ret = HFI1_FILTER_HIT;
- } else {
- ret = dd->hfi1_snoop.filter_callback(
- (struct hfi1_ib_header *)from,
- NULL,
- dd->hfi1_snoop.filter_value);
- }
-
- switch (ret) {
- case HFI1_FILTER_ERR:
- snoop_dbg("Error in filter call");
- /* fall through */
- case HFI1_FILTER_MISS:
- snoop_dbg("Filter Miss");
- break;
- case HFI1_FILTER_HIT:
- snoop_dbg("Capturing packet");
- if (dd->hfi1_snoop.mode_flag & HFI1_PORT_SNOOP_MODE)
- snoop_mode = 1;
- if ((snoop_mode == 0) ||
- unlikely(snoop_flags & SNOOP_USE_METADATA))
- md_len = sizeof(struct capture_md);
-
- s_packet = allocate_snoop_packet(packet_len, 0, md_len);
-
- if (unlikely(!s_packet)) {
- dd_dev_warn_ratelimited(dd, "Unable to allocate snoop/capture packet\n");
- goto inline_pio_out;
- }
-
- s_packet->total_len = packet_len + md_len;
-
- /* Fill in the metadata for the packet */
- if (md_len > 0) {
- memset(&md, 0, sizeof(struct capture_md));
- md.port = 1;
- md.dir = PKT_DIR_EGRESS;
- md.u.pbc = pbc;
- memcpy(s_packet->data, &md, md_len);
- }
-
- /* Add the packet data which is a single buffer */
- memcpy(s_packet->data + md_len, from, packet_len);
-
- snoop_list_add_tail(s_packet, dd);
-
- if (unlikely((snoop_flags & SNOOP_DROP_SEND) && snoop_mode)) {
- snoop_dbg("Dropping packet");
- return;
- }
- break;
- default:
- break;
- }
-
-inline_pio_out:
- pio_copy(dd, pbuf, pbc, from, count);
-}
diff --git a/drivers/staging/rdma/hfi1/eprom.c b/drivers/staging/rdma/hfi1/eprom.c
deleted file mode 100644
index bd8771570f81..000000000000
--- a/drivers/staging/rdma/hfi1/eprom.c
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * Copyright(c) 2015, 2016 Intel Corporation.
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * BSD LICENSE
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * - Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * - Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * - Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-#include <linux/delay.h>
-#include "hfi.h"
-#include "common.h"
-#include "eprom.h"
-
-/*
- * The EPROM is logically divided into three partitions:
- * partition 0: the first 128K, visible from PCI ROM BAR
- * partition 1: 4K config file (sector size)
- * partition 2: the rest
- */
-#define P0_SIZE (128 * 1024)
-#define P1_SIZE (4 * 1024)
-#define P1_START P0_SIZE
-#define P2_START (P0_SIZE + P1_SIZE)
-
-/* erase sizes supported by the controller */
-#define SIZE_4KB (4 * 1024)
-#define MASK_4KB (SIZE_4KB - 1)
-
-#define SIZE_32KB (32 * 1024)
-#define MASK_32KB (SIZE_32KB - 1)
-
-#define SIZE_64KB (64 * 1024)
-#define MASK_64KB (SIZE_64KB - 1)
-
-/* controller page size, in bytes */
-#define EP_PAGE_SIZE 256
-#define EEP_PAGE_MASK (EP_PAGE_SIZE - 1)
-
-/* controller commands */
-#define CMD_SHIFT 24
-#define CMD_NOP (0)
-#define CMD_PAGE_PROGRAM(addr) ((0x02 << CMD_SHIFT) | addr)
-#define CMD_READ_DATA(addr) ((0x03 << CMD_SHIFT) | addr)
-#define CMD_READ_SR1 ((0x05 << CMD_SHIFT))
-#define CMD_WRITE_ENABLE ((0x06 << CMD_SHIFT))
-#define CMD_SECTOR_ERASE_4KB(addr) ((0x20 << CMD_SHIFT) | addr)
-#define CMD_SECTOR_ERASE_32KB(addr) ((0x52 << CMD_SHIFT) | addr)
-#define CMD_CHIP_ERASE ((0x60 << CMD_SHIFT))
-#define CMD_READ_MANUF_DEV_ID ((0x90 << CMD_SHIFT))
-#define CMD_RELEASE_POWERDOWN_NOID ((0xab << CMD_SHIFT))
-#define CMD_SECTOR_ERASE_64KB(addr) ((0xd8 << CMD_SHIFT) | addr)
-
-/* controller interface speeds */
-#define EP_SPEED_FULL 0x2 /* full speed */
-
-/* controller status register 1 bits */
-#define SR1_BUSY 0x1ull /* the BUSY bit in SR1 */
-
-/* sleep length while waiting for controller */
-#define WAIT_SLEEP_US 100 /* must be larger than 5 (see usage) */
-#define COUNT_DELAY_SEC(n) ((n) * (1000000 / WAIT_SLEEP_US))
-
-/* GPIO pins */
-#define EPROM_WP_N BIT_ULL(14) /* EPROM write line */
-
-/*
- * How long to wait for the EPROM to become available, in ms.
- * The spec 32 Mb EPROM takes around 40s to erase then write.
- * Double it for safety.
- */
-#define EPROM_TIMEOUT 80000 /* ms */
-
-/*
- * Turn on external enable line that allows writing on the flash.
- */
-static void write_enable(struct hfi1_devdata *dd)
-{
- /* raise signal */
- write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) | EPROM_WP_N);
- /* raise enable */
- write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) | EPROM_WP_N);
-}
-
-/*
- * Turn off external enable line that allows writing on the flash.
- */
-static void write_disable(struct hfi1_devdata *dd)
-{
- /* lower signal */
- write_csr(dd, ASIC_GPIO_OUT, read_csr(dd, ASIC_GPIO_OUT) & ~EPROM_WP_N);
- /* lower enable */
- write_csr(dd, ASIC_GPIO_OE, read_csr(dd, ASIC_GPIO_OE) & ~EPROM_WP_N);
-}
-
-/*
- * Wait for the device to become not busy. Must be called after all
- * write or erase operations.
- */
-static int wait_for_not_busy(struct hfi1_devdata *dd)
-{
- unsigned long count = 0;
- u64 reg;
- int ret = 0;
-
- /* starts page mode */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_SR1);
- while (1) {
- udelay(WAIT_SLEEP_US);
- usleep_range(WAIT_SLEEP_US - 5, WAIT_SLEEP_US + 5);
- count++;
- reg = read_csr(dd, ASIC_EEP_DATA);
- if ((reg & SR1_BUSY) == 0)
- break;
- /* 200s is the largest time for a 128Mb device */
- if (count > COUNT_DELAY_SEC(200)) {
- dd_dev_err(dd, "waited too long for SPI FLASH busy to clear - failing\n");
- ret = -ETIMEDOUT;
- break; /* break, not goto - must stop page mode */
- }
- }
-
- /* stop page mode with a NOP */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP);
-
- return ret;
-}
-
-/*
- * Read the device ID from the SPI controller.
- */
-static u32 read_device_id(struct hfi1_devdata *dd)
-{
- /* read the Manufacture Device ID */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_MANUF_DEV_ID);
- return (u32)read_csr(dd, ASIC_EEP_DATA);
-}
-
-/*
- * Erase the whole flash.
- */
-static int erase_chip(struct hfi1_devdata *dd)
-{
- int ret;
-
- write_enable(dd);
-
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_CHIP_ERASE);
- ret = wait_for_not_busy(dd);
-
- write_disable(dd);
-
- return ret;
-}
-
-/*
- * Erase a range.
- */
-static int erase_range(struct hfi1_devdata *dd, u32 start, u32 len)
-{
- u32 end = start + len;
- int ret = 0;
-
- if (end < start)
- return -EINVAL;
-
- /* check the end points for the minimum erase */
- if ((start & MASK_4KB) || (end & MASK_4KB)) {
- dd_dev_err(dd,
- "%s: non-aligned range (0x%x,0x%x) for a 4KB erase\n",
- __func__, start, end);
- return -EINVAL;
- }
-
- write_enable(dd);
-
- while (start < end) {
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
- /* check in order of largest to smallest */
- if (((start & MASK_64KB) == 0) && (start + SIZE_64KB <= end)) {
- write_csr(dd, ASIC_EEP_ADDR_CMD,
- CMD_SECTOR_ERASE_64KB(start));
- start += SIZE_64KB;
- } else if (((start & MASK_32KB) == 0) &&
- (start + SIZE_32KB <= end)) {
- write_csr(dd, ASIC_EEP_ADDR_CMD,
- CMD_SECTOR_ERASE_32KB(start));
- start += SIZE_32KB;
- } else { /* 4KB will work */
- write_csr(dd, ASIC_EEP_ADDR_CMD,
- CMD_SECTOR_ERASE_4KB(start));
- start += SIZE_4KB;
- }
- ret = wait_for_not_busy(dd);
- if (ret)
- goto done;
- }
-
-done:
- write_disable(dd);
-
- return ret;
-}
-
-/*
- * Read a 256 byte (64 dword) EPROM page.
- * All callers have verified the offset is at a page boundary.
- */
-static void read_page(struct hfi1_devdata *dd, u32 offset, u32 *result)
-{
- int i;
-
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_READ_DATA(offset));
- for (i = 0; i < EP_PAGE_SIZE / sizeof(u32); i++)
- result[i] = (u32)read_csr(dd, ASIC_EEP_DATA);
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_NOP); /* close open page */
-}
-
-/*
- * Read length bytes starting at offset. Copy to user address addr.
- */
-static int read_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
-{
- u32 offset;
- u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
- int ret = 0;
-
- /* reject anything not on an EPROM page boundary */
- if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
- return -EINVAL;
-
- for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
- read_page(dd, start + offset, buffer);
- if (copy_to_user((void __user *)(addr + offset),
- buffer, EP_PAGE_SIZE)) {
- ret = -EFAULT;
- goto done;
- }
- }
-
-done:
- return ret;
-}
-
-/*
- * Write a 256 byte (64 dword) EPROM page.
- * All callers have verified the offset is at a page boundary.
- */
-static int write_page(struct hfi1_devdata *dd, u32 offset, u32 *data)
-{
- int i;
-
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_WRITE_ENABLE);
- write_csr(dd, ASIC_EEP_DATA, data[0]);
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_PAGE_PROGRAM(offset));
- for (i = 1; i < EP_PAGE_SIZE / sizeof(u32); i++)
- write_csr(dd, ASIC_EEP_DATA, data[i]);
- /* will close the open page */
- return wait_for_not_busy(dd);
-}
-
-/*
- * Write length bytes starting at offset. Read from user address addr.
- */
-static int write_length(struct hfi1_devdata *dd, u32 start, u32 len, u64 addr)
-{
- u32 offset;
- u32 buffer[EP_PAGE_SIZE / sizeof(u32)];
- int ret = 0;
-
- /* reject anything not on an EPROM page boundary */
- if ((start & EEP_PAGE_MASK) || (len & EEP_PAGE_MASK))
- return -EINVAL;
-
- write_enable(dd);
-
- for (offset = 0; offset < len; offset += EP_PAGE_SIZE) {
- if (copy_from_user(buffer, (void __user *)(addr + offset),
- EP_PAGE_SIZE)) {
- ret = -EFAULT;
- goto done;
- }
- ret = write_page(dd, start + offset, buffer);
- if (ret)
- goto done;
- }
-
-done:
- write_disable(dd);
- return ret;
-}
-
-/* convert an range composite to a length, in bytes */
-static inline u32 extract_rlen(u32 composite)
-{
- return (composite & 0xffff) * EP_PAGE_SIZE;
-}
-
-/* convert an range composite to a start, in bytes */
-static inline u32 extract_rstart(u32 composite)
-{
- return (composite >> 16) * EP_PAGE_SIZE;
-}
-
-/*
- * Perform the given operation on the EPROM. Called from user space. The
- * user credentials have already been checked.
- *
- * Return 0 on success, -ERRNO on error
- */
-int handle_eprom_command(struct file *fp, const struct hfi1_cmd *cmd)
-{
- struct hfi1_devdata *dd;
- u32 dev_id;
- u32 rlen; /* range length */
- u32 rstart; /* range start */
- int i_minor;
- int ret = 0;
-
- /*
- * Map the device file to device data using the relative minor.
- * The device file minor number is the unit number + 1. 0 is
- * the generic device file - reject it.
- */
- i_minor = iminor(file_inode(fp)) - HFI1_USER_MINOR_BASE;
- if (i_minor <= 0)
- return -EINVAL;
- dd = hfi1_lookup(i_minor - 1);
- if (!dd) {
- pr_err("%s: cannot find unit %d!\n", __func__, i_minor);
- return -EINVAL;
- }
-
- /* some devices do not have an EPROM */
- if (!dd->eprom_available)
- return -EOPNOTSUPP;
-
- ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
- if (ret) {
- dd_dev_err(dd, "%s: unable to acquire EPROM resource\n",
- __func__);
- goto done_asic;
- }
-
- dd_dev_info(dd, "%s: cmd: type %d, len 0x%x, addr 0x%016llx\n",
- __func__, cmd->type, cmd->len, cmd->addr);
-
- switch (cmd->type) {
- case HFI1_CMD_EP_INFO:
- if (cmd->len != sizeof(u32)) {
- ret = -ERANGE;
- break;
- }
- dev_id = read_device_id(dd);
- /* addr points to a u32 user buffer */
- if (copy_to_user((void __user *)cmd->addr, &dev_id,
- sizeof(u32)))
- ret = -EFAULT;
- break;
-
- case HFI1_CMD_EP_ERASE_CHIP:
- ret = erase_chip(dd);
- break;
-
- case HFI1_CMD_EP_ERASE_RANGE:
- rlen = extract_rlen(cmd->len);
- rstart = extract_rstart(cmd->len);
- ret = erase_range(dd, rstart, rlen);
- break;
-
- case HFI1_CMD_EP_READ_RANGE:
- rlen = extract_rlen(cmd->len);
- rstart = extract_rstart(cmd->len);
- ret = read_length(dd, rstart, rlen, cmd->addr);
- break;
-
- case HFI1_CMD_EP_WRITE_RANGE:
- rlen = extract_rlen(cmd->len);
- rstart = extract_rstart(cmd->len);
- ret = write_length(dd, rstart, rlen, cmd->addr);
- break;
-
- default:
- dd_dev_err(dd, "%s: unexpected command %d\n",
- __func__, cmd->type);
- ret = -EINVAL;
- break;
- }
-
- release_chip_resource(dd, CR_EPROM);
-done_asic:
- return ret;
-}
-
-/*
- * Initialize the EPROM handler.
- */
-int eprom_init(struct hfi1_devdata *dd)
-{
- int ret = 0;
-
- /* only the discrete chip has an EPROM */
- if (dd->pcidev->device != PCI_DEVICE_ID_INTEL0)
- return 0;
-
- /*
- * It is OK if both HFIs reset the EPROM as long as they don't
- * do it at the same time.
- */
- ret = acquire_chip_resource(dd, CR_EPROM, EPROM_TIMEOUT);
- if (ret) {
- dd_dev_err(dd,
- "%s: unable to acquire EPROM resource, no EPROM support\n",
- __func__);
- goto done_asic;
- }
-
- /* reset EPROM to be sure it is in a good state */
-
- /* set reset */
- write_csr(dd, ASIC_EEP_CTL_STAT, ASIC_EEP_CTL_STAT_EP_RESET_SMASK);
- /* clear reset, set speed */
- write_csr(dd, ASIC_EEP_CTL_STAT,
- EP_SPEED_FULL << ASIC_EEP_CTL_STAT_RATE_SPI_SHIFT);
-
- /* wake the device with command "release powerdown NoID" */
- write_csr(dd, ASIC_EEP_ADDR_CMD, CMD_RELEASE_POWERDOWN_NOID);
-
- dd->eprom_available = true;
- release_chip_resource(dd, CR_EPROM);
-done_asic:
- return ret;
-}
diff --git a/drivers/target/iscsi/Kconfig b/drivers/target/iscsi/Kconfig
index 8345fb457a40..bbdbf9c4e93a 100644
--- a/drivers/target/iscsi/Kconfig
+++ b/drivers/target/iscsi/Kconfig
@@ -7,3 +7,5 @@ config ISCSI_TARGET
help
Say M here to enable the ConfigFS enabled Linux-iSCSI.org iSCSI
Target Mode Stack.
+
+source "drivers/target/iscsi/cxgbit/Kconfig"
diff --git a/drivers/target/iscsi/Makefile b/drivers/target/iscsi/Makefile
index 0f43be9c3453..0f18295e05bc 100644
--- a/drivers/target/iscsi/Makefile
+++ b/drivers/target/iscsi/Makefile
@@ -18,3 +18,4 @@ iscsi_target_mod-y += iscsi_target_parameters.o \
iscsi_target_transport.o
obj-$(CONFIG_ISCSI_TARGET) += iscsi_target_mod.o
+obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit/
diff --git a/drivers/target/iscsi/cxgbit/Kconfig b/drivers/target/iscsi/cxgbit/Kconfig
new file mode 100644
index 000000000000..c9b6a3c758b1
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/Kconfig
@@ -0,0 +1,7 @@
+config ISCSI_TARGET_CXGB4
+ tristate "Chelsio iSCSI target offload driver"
+ depends on ISCSI_TARGET && CHELSIO_T4 && INET
+ select CHELSIO_T4_UWIRE
+ ---help---
+ To compile this driver as module, choose M here: the module
+ will be called cxgbit.
diff --git a/drivers/target/iscsi/cxgbit/Makefile b/drivers/target/iscsi/cxgbit/Makefile
new file mode 100644
index 000000000000..bd56c073dff6
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/Makefile
@@ -0,0 +1,6 @@
+ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4
+ccflags-y += -Idrivers/target/iscsi
+
+obj-$(CONFIG_ISCSI_TARGET_CXGB4) += cxgbit.o
+
+cxgbit-y := cxgbit_main.o cxgbit_cm.o cxgbit_target.o cxgbit_ddp.o
diff --git a/drivers/target/iscsi/cxgbit/cxgbit.h b/drivers/target/iscsi/cxgbit/cxgbit.h
new file mode 100644
index 000000000000..625c7f6de6b2
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CXGBIT_H__
+#define __CXGBIT_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/idr.h>
+#include <linux/completion.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/inet.h>
+#include <linux/wait.h>
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+
+#include <asm/byteorder.h>
+
+#include <net/net_namespace.h>
+
+#include <target/iscsi/iscsi_transport.h>
+#include <iscsi_target_parameters.h>
+#include <iscsi_target_login.h>
+
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "cxgb4.h"
+#include "cxgb4_uld.h"
+#include "l2t.h"
+#include "cxgb4_ppm.h"
+#include "cxgbit_lro.h"
+
+extern struct mutex cdev_list_lock;
+extern struct list_head cdev_list_head;
+struct cxgbit_np;
+
+struct cxgbit_sock;
+
+struct cxgbit_cmd {
+ struct scatterlist sg;
+ struct cxgbi_task_tag_info ttinfo;
+ bool setup_ddp;
+ bool release;
+};
+
+#define CXGBIT_MAX_ISO_PAYLOAD \
+ min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
+
+struct cxgbit_iso_info {
+ u8 flags;
+ u32 mpdu;
+ u32 len;
+ u32 burst_len;
+};
+
+enum cxgbit_skcb_flags {
+ SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */
+ SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */
+ SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */
+ SKCBF_RX_LRO = (1 << 3), /* lro skb */
+};
+
+struct cxgbit_skb_rx_cb {
+ u8 opcode;
+ void *pdu_cb;
+ void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
+};
+
+struct cxgbit_skb_tx_cb {
+ u8 submode;
+ u32 extra_len;
+};
+
+union cxgbit_skb_cb {
+ struct {
+ u8 flags;
+ union {
+ struct cxgbit_skb_tx_cb tx;
+ struct cxgbit_skb_rx_cb rx;
+ };
+ };
+
+ struct {
+ /* This member must be first. */
+ struct l2t_skb_cb l2t;
+ struct sk_buff *wr_next;
+ };
+};
+
+#define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
+#define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
+#define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
+#define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
+#define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
+#define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
+#define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
+#define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
+
+static inline void *cplhdr(struct sk_buff *skb)
+{
+ return skb->data;
+}
+
+enum cxgbit_cdev_flags {
+ CDEV_STATE_UP = 0,
+ CDEV_ISO_ENABLE,
+ CDEV_DDP_ENABLE,
+};
+
+#define NP_INFO_HASH_SIZE 32
+
+struct np_info {
+ struct np_info *next;
+ struct cxgbit_np *cnp;
+ unsigned int stid;
+};
+
+struct cxgbit_list_head {
+ struct list_head list;
+ /* device lock */
+ spinlock_t lock;
+};
+
+struct cxgbit_device {
+ struct list_head list;
+ struct cxgb4_lld_info lldi;
+ struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
+ /* np lock */
+ spinlock_t np_lock;
+ u8 selectq[MAX_NPORTS][2];
+ struct cxgbit_list_head cskq;
+ u32 mdsl;
+ struct kref kref;
+ unsigned long flags;
+};
+
+struct cxgbit_wr_wait {
+ struct completion completion;
+ int ret;
+};
+
+enum cxgbit_csk_state {
+ CSK_STATE_IDLE = 0,
+ CSK_STATE_LISTEN,
+ CSK_STATE_CONNECTING,
+ CSK_STATE_ESTABLISHED,
+ CSK_STATE_ABORTING,
+ CSK_STATE_CLOSING,
+ CSK_STATE_MORIBUND,
+ CSK_STATE_DEAD,
+};
+
+enum cxgbit_csk_flags {
+ CSK_TX_DATA_SENT = 0,
+ CSK_LOGIN_PDU_DONE,
+ CSK_LOGIN_DONE,
+ CSK_DDP_ENABLE,
+};
+
+struct cxgbit_sock_common {
+ struct cxgbit_device *cdev;
+ struct sockaddr_storage local_addr;
+ struct sockaddr_storage remote_addr;
+ struct cxgbit_wr_wait wr_wait;
+ enum cxgbit_csk_state state;
+ unsigned long flags;
+};
+
+struct cxgbit_np {
+ struct cxgbit_sock_common com;
+ wait_queue_head_t accept_wait;
+ struct iscsi_np *np;
+ struct completion accept_comp;
+ struct list_head np_accept_list;
+ /* np accept lock */
+ spinlock_t np_accept_lock;
+ struct kref kref;
+ unsigned int stid;
+};
+
+struct cxgbit_sock {
+ struct cxgbit_sock_common com;
+ struct cxgbit_np *cnp;
+ struct iscsi_conn *conn;
+ struct l2t_entry *l2t;
+ struct dst_entry *dst;
+ struct list_head list;
+ struct sk_buff_head rxq;
+ struct sk_buff_head txq;
+ struct sk_buff_head ppodq;
+ struct sk_buff_head backlogq;
+ struct sk_buff_head skbq;
+ struct sk_buff *wr_pending_head;
+ struct sk_buff *wr_pending_tail;
+ struct sk_buff *skb;
+ struct sk_buff *lro_skb;
+ struct sk_buff *lro_hskb;
+ struct list_head accept_node;
+ /* socket lock */
+ spinlock_t lock;
+ wait_queue_head_t waitq;
+ wait_queue_head_t ack_waitq;
+ bool lock_owner;
+ struct kref kref;
+ u32 max_iso_npdu;
+ u32 wr_cred;
+ u32 wr_una_cred;
+ u32 wr_max_cred;
+ u32 snd_una;
+ u32 tid;
+ u32 snd_nxt;
+ u32 rcv_nxt;
+ u32 smac_idx;
+ u32 tx_chan;
+ u32 mtu;
+ u32 write_seq;
+ u32 rx_credits;
+ u32 snd_win;
+ u32 rcv_win;
+ u16 mss;
+ u16 emss;
+ u16 plen;
+ u16 rss_qid;
+ u16 txq_idx;
+ u16 ctrlq_idx;
+ u8 tos;
+ u8 port_id;
+#define CXGBIT_SUBMODE_HCRC 0x1
+#define CXGBIT_SUBMODE_DCRC 0x2
+ u8 submode;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 dcb_priority;
+#endif
+ u8 snd_wscale;
+};
+
+void _cxgbit_free_cdev(struct kref *kref);
+void _cxgbit_free_csk(struct kref *kref);
+void _cxgbit_free_cnp(struct kref *kref);
+
+static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
+{
+ kref_get(&cdev->kref);
+}
+
+static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
+{
+ kref_put(&cdev->kref, _cxgbit_free_cdev);
+}
+
+static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
+{
+ kref_get(&csk->kref);
+}
+
+static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
+{
+ kref_put(&csk->kref, _cxgbit_free_csk);
+}
+
+static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
+{
+ kref_get(&cnp->kref);
+}
+
+static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
+{
+ kref_put(&cnp->kref, _cxgbit_free_cnp);
+}
+
+static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
+{
+ csk->wr_pending_tail = NULL;
+ csk->wr_pending_head = NULL;
+}
+
+static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
+{
+ return csk->wr_pending_head;
+}
+
+static inline void
+cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ cxgbit_skcb_tx_wr_next(skb) = NULL;
+
+ skb_get(skb);
+
+ if (!csk->wr_pending_head)
+ csk->wr_pending_head = skb;
+ else
+ cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
+ csk->wr_pending_tail = skb;
+}
+
+static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = csk->wr_pending_head;
+
+ if (likely(skb)) {
+ csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
+ cxgbit_skcb_tx_wr_next(skb) = NULL;
+ }
+ return skb;
+}
+
+typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
+ struct sk_buff *);
+
+int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
+int cxgbit_setup_conn_digest(struct cxgbit_sock *);
+int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
+void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_free_conn(struct iscsi_conn *);
+extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
+int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
+int cxgbit_rx_data_ack(struct cxgbit_sock *);
+int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
+ struct l2t_entry *);
+void cxgbit_push_tx_frames(struct cxgbit_sock *);
+int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
+int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *,
+ struct iscsi_datain_req *, const void *, u32);
+void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *,
+ struct iscsi_r2t *);
+u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
+int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
+void cxgbit_get_rx_pdu(struct iscsi_conn *);
+int cxgbit_validate_params(struct iscsi_conn *);
+struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
+
+/* DDP */
+int cxgbit_ddp_init(struct cxgbit_device *);
+int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
+int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
+void cxgbit_release_cmd(struct iscsi_conn *, struct iscsi_cmd *);
+
+static inline
+struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
+{
+ return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
+}
+#endif /* __CXGBIT_H__ */
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
new file mode 100644
index 000000000000..0ae0b131abfc
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -0,0 +1,2086 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+#include <linux/inetdevice.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/route.h>
+#include <net/tcp.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+
+#include "cxgbit.h"
+#include "clip_tbl.h"
+
+static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
+{
+ wr_waitp->ret = 0;
+ reinit_completion(&wr_waitp->completion);
+}
+
+static void
+cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
+{
+ if (ret == CPL_ERR_NONE)
+ wr_waitp->ret = 0;
+ else
+ wr_waitp->ret = -EIO;
+
+ if (wr_waitp->ret)
+ pr_err("%s: err:%u", func, ret);
+
+ complete(&wr_waitp->completion);
+}
+
+static int
+cxgbit_wait_for_reply(struct cxgbit_device *cdev,
+ struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
+ const char *func)
+{
+ int ret;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+ wr_waitp->ret = -EIO;
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
+ if (!ret) {
+ pr_info("%s - Device %s not responding tid %u\n",
+ func, pci_name(cdev->lldi.pdev), tid);
+ wr_waitp->ret = -ETIMEDOUT;
+ }
+out:
+ if (wr_waitp->ret)
+ pr_info("%s: FW reply %d tid %u\n",
+ pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
+ return wr_waitp->ret;
+}
+
+/* Returns whether a CPL status conveys negative advice.
+ */
+static int cxgbit_is_neg_adv(unsigned int status)
+{
+ return status == CPL_ERR_RTX_NEG_ADVICE ||
+ status == CPL_ERR_PERSIST_NEG_ADVICE ||
+ status == CPL_ERR_KEEPALV_NEG_ADVICE;
+}
+
+static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
+{
+ return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
+}
+
+static struct np_info *
+cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
+ unsigned int stid)
+{
+ struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+ if (p) {
+ int bucket = cxgbit_np_hashfn(cnp);
+
+ p->cnp = cnp;
+ p->stid = stid;
+ spin_lock(&cdev->np_lock);
+ p->next = cdev->np_hash_tab[bucket];
+ cdev->np_hash_tab[bucket] = p;
+ spin_unlock(&cdev->np_lock);
+ }
+
+ return p;
+}
+
+static int
+cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid = -1, bucket = cxgbit_np_hashfn(cnp);
+ struct np_info *p;
+
+ spin_lock(&cdev->np_lock);
+ for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
+ if (p->cnp == cnp) {
+ stid = p->stid;
+ break;
+ }
+ }
+ spin_unlock(&cdev->np_lock);
+
+ return stid;
+}
+
+static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid = -1, bucket = cxgbit_np_hashfn(cnp);
+ struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
+
+ spin_lock(&cdev->np_lock);
+ for (p = *prev; p; prev = &p->next, p = p->next) {
+ if (p->cnp == cnp) {
+ stid = p->stid;
+ *prev = p->next;
+ kfree(p);
+ break;
+ }
+ }
+ spin_unlock(&cdev->np_lock);
+
+ return stid;
+}
+
+void _cxgbit_free_cnp(struct kref *kref)
+{
+ struct cxgbit_np *cnp;
+
+ cnp = container_of(kref, struct cxgbit_np, kref);
+ kfree(cnp);
+}
+
+static int
+cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
+ struct cxgbit_np *cnp)
+{
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &cnp->com.local_addr;
+ int addr_type;
+ int ret;
+
+ pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
+ __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
+
+ addr_type = ipv6_addr_type((const struct in6_addr *)
+ &sin6->sin6_addr);
+ if (addr_type != IPV6_ADDR_ANY) {
+ ret = cxgb4_clip_get(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+ if (ret) {
+ pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
+ sin6->sin6_addr.s6_addr, ret);
+ return -ENOMEM;
+ }
+ }
+
+ cxgbit_get_cnp(cnp);
+ cxgbit_init_wr_wait(&cnp->com.wr_wait);
+
+ ret = cxgb4_create_server6(cdev->lldi.ports[0],
+ stid, &sin6->sin6_addr,
+ sin6->sin6_port,
+ cdev->lldi.rxq_ids[0]);
+ if (!ret)
+ ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
+ 0, 10, __func__);
+ else if (ret > 0)
+ ret = net_xmit_errno(ret);
+ else
+ cxgbit_put_cnp(cnp);
+
+ if (ret) {
+ if (ret != -ETIMEDOUT)
+ cxgb4_clip_release(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr, 1);
+
+ pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
+ ret, stid, sin6->sin6_addr.s6_addr,
+ ntohs(sin6->sin6_port));
+ }
+
+ return ret;
+}
+
+static int
+cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
+ struct cxgbit_np *cnp)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)
+ &cnp->com.local_addr;
+ int ret;
+
+ pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
+ __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
+
+ cxgbit_get_cnp(cnp);
+ cxgbit_init_wr_wait(&cnp->com.wr_wait);
+
+ ret = cxgb4_create_server(cdev->lldi.ports[0],
+ stid, sin->sin_addr.s_addr,
+ sin->sin_port, 0,
+ cdev->lldi.rxq_ids[0]);
+ if (!ret)
+ ret = cxgbit_wait_for_reply(cdev,
+ &cnp->com.wr_wait,
+ 0, 10, __func__);
+ else if (ret > 0)
+ ret = net_xmit_errno(ret);
+ else
+ cxgbit_put_cnp(cnp);
+
+ if (ret)
+ pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
+ ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
+ return ret;
+}
+
+struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
+{
+ struct cxgbit_device *cdev;
+ u8 i;
+
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+
+ for (i = 0; i < lldi->nports; i++) {
+ if (lldi->ports[i] == ndev) {
+ if (port_id)
+ *port_id = i;
+ return cdev;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
+{
+ if (ndev->priv_flags & IFF_BONDING) {
+ pr_err("Bond devices are not supported. Interface:%s\n",
+ ndev->name);
+ return NULL;
+ }
+
+ if (is_vlan_dev(ndev))
+ return vlan_dev_real_dev(ndev);
+
+ return ndev;
+}
+
+static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
+{
+ struct net_device *ndev;
+
+ ndev = __ip_dev_find(&init_net, saddr, false);
+ if (!ndev)
+ return NULL;
+
+ return cxgbit_get_real_dev(ndev);
+}
+
+static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
+{
+ struct net_device *ndev = NULL;
+ bool found = false;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ for_each_netdev_rcu(&init_net, ndev)
+ if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ return NULL;
+ return cxgbit_get_real_dev(ndev);
+}
+
+static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
+{
+ struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
+ int ss_family = sockaddr->ss_family;
+ struct net_device *ndev = NULL;
+ struct cxgbit_device *cdev = NULL;
+
+ rcu_read_lock();
+ if (ss_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)sockaddr;
+ ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
+ } else if (ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sockaddr;
+ ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
+ }
+ if (!ndev)
+ goto out;
+
+ cdev = cxgbit_find_device(ndev, NULL);
+out:
+ rcu_read_unlock();
+ return cdev;
+}
+
+static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
+{
+ struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
+ int ss_family = sockaddr->ss_family;
+ int addr_type;
+
+ if (ss_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)sockaddr;
+ if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
+ return true;
+ } else if (ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)sockaddr;
+ addr_type = ipv6_addr_type((const struct in6_addr *)
+ &sin6->sin6_addr);
+ if (addr_type == IPV6_ADDR_ANY)
+ return true;
+ }
+ return false;
+}
+
+static int
+__cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid, ret;
+ int ss_family = cnp->com.local_addr.ss_family;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags))
+ return -EINVAL;
+
+ stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
+ if (stid < 0)
+ return -EINVAL;
+
+ if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
+ cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
+ return -EINVAL;
+ }
+
+ if (ss_family == AF_INET)
+ ret = cxgbit_create_server4(cdev, stid, cnp);
+ else
+ ret = cxgbit_create_server6(cdev, stid, cnp);
+
+ if (ret) {
+ if (ret != -ETIMEDOUT)
+ cxgb4_free_stid(cdev->lldi.tids, stid,
+ ss_family);
+ cxgbit_np_hash_del(cdev, cnp);
+ return ret;
+ }
+ return ret;
+}
+
+static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ int ret = -1;
+
+ mutex_lock(&cdev_list_lock);
+ cdev = cxgbit_find_np_cdev(cnp);
+ if (!cdev)
+ goto out;
+
+ if (cxgbit_np_hash_find(cdev, cnp) >= 0)
+ goto out;
+
+ if (__cxgbit_setup_cdev_np(cdev, cnp))
+ goto out;
+
+ cnp->com.cdev = cdev;
+ ret = 0;
+out:
+ mutex_unlock(&cdev_list_lock);
+ return ret;
+}
+
+static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ int ret;
+ u32 count = 0;
+
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
+ mutex_unlock(&cdev_list_lock);
+ return -1;
+ }
+ }
+
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ ret = __cxgbit_setup_cdev_np(cdev, cnp);
+ if (ret == -ETIMEDOUT)
+ break;
+ if (ret != 0)
+ continue;
+ count++;
+ }
+ mutex_unlock(&cdev_list_lock);
+
+ return count ? 0 : -1;
+}
+
+int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
+{
+ struct cxgbit_np *cnp;
+ int ret;
+
+ if ((ksockaddr->ss_family != AF_INET) &&
+ (ksockaddr->ss_family != AF_INET6))
+ return -EINVAL;
+
+ cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
+ if (!cnp)
+ return -ENOMEM;
+
+ init_waitqueue_head(&cnp->accept_wait);
+ init_completion(&cnp->com.wr_wait.completion);
+ init_completion(&cnp->accept_comp);
+ INIT_LIST_HEAD(&cnp->np_accept_list);
+ spin_lock_init(&cnp->np_accept_lock);
+ kref_init(&cnp->kref);
+ memcpy(&np->np_sockaddr, ksockaddr,
+ sizeof(struct sockaddr_storage));
+ memcpy(&cnp->com.local_addr, &np->np_sockaddr,
+ sizeof(cnp->com.local_addr));
+
+ cnp->np = np;
+ cnp->com.cdev = NULL;
+
+ if (cxgbit_inaddr_any(cnp))
+ ret = cxgbit_setup_all_np(cnp);
+ else
+ ret = cxgbit_setup_cdev_np(cnp);
+
+ if (ret) {
+ cxgbit_put_cnp(cnp);
+ return -EINVAL;
+ }
+
+ np->np_context = cnp;
+ cnp->com.state = CSK_STATE_LISTEN;
+ return 0;
+}
+
+static void
+cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
+ struct cxgbit_sock *csk)
+{
+ conn->login_family = np->np_sockaddr.ss_family;
+ conn->login_sockaddr = csk->com.remote_addr;
+ conn->local_sockaddr = csk->com.local_addr;
+}
+
+int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
+{
+ struct cxgbit_np *cnp = np->np_context;
+ struct cxgbit_sock *csk;
+ int ret = 0;
+
+accept_wait:
+ ret = wait_for_completion_interruptible(&cnp->accept_comp);
+ if (ret)
+ return -ENODEV;
+
+ spin_lock_bh(&np->np_thread_lock);
+ if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
+ spin_unlock_bh(&np->np_thread_lock);
+ /**
+ * No point in stalling here when np_thread
+ * is in state RESET/SHUTDOWN/EXIT - bail
+ **/
+ return -ENODEV;
+ }
+ spin_unlock_bh(&np->np_thread_lock);
+
+ spin_lock_bh(&cnp->np_accept_lock);
+ if (list_empty(&cnp->np_accept_list)) {
+ spin_unlock_bh(&cnp->np_accept_lock);
+ goto accept_wait;
+ }
+
+ csk = list_first_entry(&cnp->np_accept_list,
+ struct cxgbit_sock,
+ accept_node);
+
+ list_del_init(&csk->accept_node);
+ spin_unlock_bh(&cnp->np_accept_lock);
+ conn->context = csk;
+ csk->conn = conn;
+
+ cxgbit_set_conn_info(np, conn, csk);
+ return 0;
+}
+
+static int
+__cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
+{
+ int stid, ret;
+ bool ipv6 = false;
+
+ stid = cxgbit_np_hash_del(cdev, cnp);
+ if (stid < 0)
+ return -EINVAL;
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags))
+ return -EINVAL;
+
+ if (cnp->np->np_sockaddr.ss_family == AF_INET6)
+ ipv6 = true;
+
+ cxgbit_get_cnp(cnp);
+ cxgbit_init_wr_wait(&cnp->com.wr_wait);
+ ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
+ cdev->lldi.rxq_ids[0], ipv6);
+
+ if (ret > 0)
+ ret = net_xmit_errno(ret);
+
+ if (ret) {
+ cxgbit_put_cnp(cnp);
+ return ret;
+ }
+
+ ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
+ 0, 10, __func__);
+ if (ret == -ETIMEDOUT)
+ return ret;
+
+ if (ipv6 && cnp->com.cdev) {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
+ cxgb4_clip_release(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr,
+ 1);
+ }
+
+ cxgb4_free_stid(cdev->lldi.tids, stid,
+ cnp->com.local_addr.ss_family);
+ return 0;
+}
+
+static void cxgbit_free_all_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ int ret;
+
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ ret = __cxgbit_free_cdev_np(cdev, cnp);
+ if (ret == -ETIMEDOUT)
+ break;
+ }
+ mutex_unlock(&cdev_list_lock);
+}
+
+static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
+{
+ struct cxgbit_device *cdev;
+ bool found = false;
+
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry(cdev, &cdev_list_head, list) {
+ if (cdev == cnp->com.cdev) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ goto out;
+
+ __cxgbit_free_cdev_np(cdev, cnp);
+out:
+ mutex_unlock(&cdev_list_lock);
+}
+
+void cxgbit_free_np(struct iscsi_np *np)
+{
+ struct cxgbit_np *cnp = np->np_context;
+
+ cnp->com.state = CSK_STATE_DEAD;
+ if (cnp->com.cdev)
+ cxgbit_free_cdev_np(cnp);
+ else
+ cxgbit_free_all_np(cnp);
+
+ np->np_context = NULL;
+ cxgbit_put_cnp(cnp);
+}
+
+static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ struct cpl_close_con_req *req;
+ unsigned int len = roundup(sizeof(struct cpl_close_con_req), 16);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ req = (struct cpl_close_con_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
+ csk->tid));
+ req->rsvd = 0;
+
+ cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
+ __skb_queue_tail(&csk->txq, skb);
+ cxgbit_push_tx_frames(csk);
+}
+
+static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
+{
+ pr_debug("%s cxgbit_device %p\n", __func__, handle);
+ kfree_skb(skb);
+}
+
+static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
+{
+ struct cxgbit_device *cdev = handle;
+ struct cpl_abort_req *req = cplhdr(skb);
+
+ pr_debug("%s cdev %p\n", __func__, cdev);
+ req->cmd = CPL_ABORT_NO_RST;
+ cxgbit_ofld_send(cdev, skb);
+}
+
+static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
+{
+ struct cpl_abort_req *req;
+ unsigned int len = roundup(sizeof(*req), 16);
+ struct sk_buff *skb;
+
+ pr_debug("%s: csk %p tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ __skb_queue_purge(&csk->txq);
+
+ if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
+ cxgbit_send_tx_flowc_wr(csk);
+
+ skb = __skb_dequeue(&csk->skbq);
+ req = (struct cpl_abort_req *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+ t4_set_arp_err_handler(skb, csk->com.cdev, cxgbit_abort_arp_failure);
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ,
+ csk->tid));
+ req->cmd = CPL_ABORT_SEND_RST;
+ return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+}
+
+void cxgbit_free_conn(struct iscsi_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+ bool release = false;
+
+ pr_debug("%s: state %d\n",
+ __func__, csk->com.state);
+
+ spin_lock_bh(&csk->lock);
+ switch (csk->com.state) {
+ case CSK_STATE_ESTABLISHED:
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+ csk->com.state = CSK_STATE_CLOSING;
+ cxgbit_send_halfclose(csk);
+ } else {
+ csk->com.state = CSK_STATE_ABORTING;
+ cxgbit_send_abort_req(csk);
+ }
+ break;
+ case CSK_STATE_CLOSING:
+ csk->com.state = CSK_STATE_MORIBUND;
+ cxgbit_send_halfclose(csk);
+ break;
+ case CSK_STATE_DEAD:
+ release = true;
+ break;
+ default:
+ pr_err("%s: csk %p; state %d\n",
+ __func__, csk, csk->com.state);
+ }
+ spin_unlock_bh(&csk->lock);
+
+ if (release)
+ cxgbit_put_csk(csk);
+}
+
+static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
+{
+ csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
+ ((csk->com.remote_addr.ss_family == AF_INET) ?
+ sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
+ sizeof(struct tcphdr);
+ csk->mss = csk->emss;
+ if (TCPOPT_TSTAMP_G(opt))
+ csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
+ if (csk->emss < 128)
+ csk->emss = 128;
+ if (csk->emss & 7)
+ pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
+ TCPOPT_MSS_G(opt), csk->mss, csk->emss);
+ pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
+ csk->mss, csk->emss);
+}
+
+static void cxgbit_free_skb(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+
+ __skb_queue_purge(&csk->txq);
+ __skb_queue_purge(&csk->rxq);
+ __skb_queue_purge(&csk->backlogq);
+ __skb_queue_purge(&csk->ppodq);
+ __skb_queue_purge(&csk->skbq);
+
+ while ((skb = cxgbit_sock_dequeue_wr(csk)))
+ kfree_skb(skb);
+
+ __kfree_skb(csk->lro_hskb);
+}
+
+void _cxgbit_free_csk(struct kref *kref)
+{
+ struct cxgbit_sock *csk;
+ struct cxgbit_device *cdev;
+
+ csk = container_of(kref, struct cxgbit_sock, kref);
+
+ pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
+
+ if (csk->com.local_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &csk->com.local_addr;
+ cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
+ (const u32 *)
+ &sin6->sin6_addr.s6_addr, 1);
+ }
+
+ cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid);
+ dst_release(csk->dst);
+ cxgb4_l2t_release(csk->l2t);
+
+ cdev = csk->com.cdev;
+ spin_lock_bh(&cdev->cskq.lock);
+ list_del(&csk->list);
+ spin_unlock_bh(&cdev->cskq.lock);
+
+ cxgbit_free_skb(csk);
+ cxgbit_put_cdev(cdev);
+
+ kfree(csk);
+}
+
+static void
+cxgbit_get_tuple_info(struct cpl_pass_accept_req *req, int *iptype,
+ __u8 *local_ip, __u8 *peer_ip, __be16 *local_port,
+ __be16 *peer_port)
+{
+ u32 eth_len = ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ u32 ip_len = IP_HDR_LEN_G(be32_to_cpu(req->hdr_len));
+ struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+ struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
+ struct tcphdr *tcp = (struct tcphdr *)
+ ((u8 *)(req + 1) + eth_len + ip_len);
+
+ if (ip->version == 4) {
+ pr_debug("%s saddr 0x%x daddr 0x%x sport %u dport %u\n",
+ __func__,
+ ntohl(ip->saddr), ntohl(ip->daddr),
+ ntohs(tcp->source),
+ ntohs(tcp->dest));
+ *iptype = 4;
+ memcpy(peer_ip, &ip->saddr, 4);
+ memcpy(local_ip, &ip->daddr, 4);
+ } else {
+ pr_debug("%s saddr %pI6 daddr %pI6 sport %u dport %u\n",
+ __func__,
+ ip6->saddr.s6_addr, ip6->daddr.s6_addr,
+ ntohs(tcp->source),
+ ntohs(tcp->dest));
+ *iptype = 6;
+ memcpy(peer_ip, ip6->saddr.s6_addr, 16);
+ memcpy(local_ip, ip6->daddr.s6_addr, 16);
+ }
+
+ *peer_port = tcp->source;
+ *local_port = tcp->dest;
+}
+
+static int
+cxgbit_our_interface(struct cxgbit_device *cdev, struct net_device *egress_dev)
+{
+ u8 i;
+
+ egress_dev = cxgbit_get_real_dev(egress_dev);
+ for (i = 0; i < cdev->lldi.nports; i++)
+ if (cdev->lldi.ports[i] == egress_dev)
+ return 1;
+ return 0;
+}
+
+static struct dst_entry *
+cxgbit_find_route6(struct cxgbit_device *cdev, __u8 *local_ip, __u8 *peer_ip,
+ __be16 local_port, __be16 peer_port, u8 tos,
+ __u32 sin6_scope_id)
+{
+ struct dst_entry *dst = NULL;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ memcpy(&fl6.daddr, peer_ip, 16);
+ memcpy(&fl6.saddr, local_ip, 16);
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = sin6_scope_id;
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ if (!dst)
+ goto out;
+ if (!cxgbit_our_interface(cdev, ip6_dst_idev(dst)->dev) &&
+ !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+ dst_release(dst);
+ dst = NULL;
+ }
+ }
+out:
+ return dst;
+}
+
+static struct dst_entry *
+cxgbit_find_route(struct cxgbit_device *cdev, __be32 local_ip, __be32 peer_ip,
+ __be16 local_port, __be16 peer_port, u8 tos)
+{
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct neighbour *n;
+
+ rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip,
+ local_ip,
+ peer_port, local_port, IPPROTO_TCP,
+ tos, 0);
+ if (IS_ERR(rt))
+ return NULL;
+ n = dst_neigh_lookup(&rt->dst, &peer_ip);
+ if (!n)
+ return NULL;
+ if (!cxgbit_our_interface(cdev, n->dev) &&
+ !(n->dev->flags & IFF_LOOPBACK)) {
+ neigh_release(n);
+ dst_release(&rt->dst);
+ return NULL;
+ }
+ neigh_release(n);
+ return &rt->dst;
+}
+
+static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
+{
+ unsigned int linkspeed;
+ u8 scale;
+
+ linkspeed = pi->link_cfg.speed;
+ scale = linkspeed / SPEED_10000;
+
+#define CXGBIT_10G_RCV_WIN (256 * 1024)
+ csk->rcv_win = CXGBIT_10G_RCV_WIN;
+ if (scale)
+ csk->rcv_win *= scale;
+
+#define CXGBIT_10G_SND_WIN (256 * 1024)
+ csk->snd_win = CXGBIT_10G_SND_WIN;
+ if (scale)
+ csk->snd_win *= scale;
+
+ pr_debug("%s snd_win %d rcv_win %d\n",
+ __func__, csk->snd_win, csk->rcv_win);
+}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
+{
+ return ndev->dcbnl_ops->getstate(ndev);
+}
+
+static int cxgbit_select_priority(int pri_mask)
+{
+ if (!pri_mask)
+ return 0;
+
+ return (ffs(pri_mask) - 1);
+}
+
+static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
+{
+ int ret;
+ u8 caps;
+
+ struct dcb_app iscsi_dcb_app = {
+ .protocol = local_port
+ };
+
+ ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
+
+ if (ret)
+ return 0;
+
+ if (caps & DCB_CAP_DCBX_VER_IEEE) {
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+
+ ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+
+ } else if (caps & DCB_CAP_DCBX_VER_CEE) {
+ iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
+
+ ret = dcb_getapp(ndev, &iscsi_dcb_app);
+ }
+
+ pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
+
+ return cxgbit_select_priority(ret);
+}
+#endif
+
+static int
+cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
+ u16 local_port, struct dst_entry *dst,
+ struct cxgbit_device *cdev)
+{
+ struct neighbour *n;
+ int ret, step;
+ struct net_device *ndev;
+ u16 rxq_idx, port_id;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 priority = 0;
+#endif
+
+ n = dst_neigh_lookup(dst, peer_ip);
+ if (!n)
+ return -ENODEV;
+
+ rcu_read_lock();
+ ret = -ENOMEM;
+ if (n->dev->flags & IFF_LOOPBACK) {
+ if (iptype == 4)
+ ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
+ else
+ ndev = NULL;
+
+ if (!ndev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
+ n, ndev, 0);
+ if (!csk->l2t)
+ goto out;
+ csk->mtu = ndev->mtu;
+ csk->tx_chan = cxgb4_port_chan(ndev);
+ csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
+ step = cdev->lldi.ntxq /
+ cdev->lldi.nchan;
+ csk->txq_idx = cxgb4_port_idx(ndev) * step;
+ step = cdev->lldi.nrxq /
+ cdev->lldi.nchan;
+ csk->ctrlq_idx = cxgb4_port_idx(ndev);
+ csk->rss_qid = cdev->lldi.rxq_ids[
+ cxgb4_port_idx(ndev) * step];
+ csk->port_id = cxgb4_port_idx(ndev);
+ cxgbit_set_tcp_window(csk,
+ (struct port_info *)netdev_priv(ndev));
+ } else {
+ ndev = cxgbit_get_real_dev(n->dev);
+ if (!ndev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ if (cxgbit_get_iscsi_dcb_state(ndev))
+ priority = cxgbit_get_iscsi_dcb_priority(ndev,
+ local_port);
+
+ csk->dcb_priority = priority;
+
+ csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
+#else
+ csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
+#endif
+ if (!csk->l2t)
+ goto out;
+ port_id = cxgb4_port_idx(ndev);
+ csk->mtu = dst_mtu(dst);
+ csk->tx_chan = cxgb4_port_chan(ndev);
+ csk->smac_idx = (cxgb4_port_viid(ndev) & 0x7F) << 1;
+ step = cdev->lldi.ntxq /
+ cdev->lldi.nports;
+ csk->txq_idx = (port_id * step) +
+ (cdev->selectq[port_id][0]++ % step);
+ csk->ctrlq_idx = cxgb4_port_idx(ndev);
+ step = cdev->lldi.nrxq /
+ cdev->lldi.nports;
+ rxq_idx = (port_id * step) +
+ (cdev->selectq[port_id][1]++ % step);
+ csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
+ csk->port_id = port_id;
+ cxgbit_set_tcp_window(csk,
+ (struct port_info *)netdev_priv(ndev));
+ }
+ ret = 0;
+out:
+ rcu_read_unlock();
+ neigh_release(n);
+ return ret;
+}
+
+int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ int ret = 0;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+ kfree_skb(skb);
+ pr_err("%s - device not up - dropping\n", __func__);
+ return -EIO;
+ }
+
+ ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
+ if (ret < 0)
+ kfree_skb(skb);
+ return ret < 0 ? ret : 0;
+}
+
+static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
+{
+ struct cpl_tid_release *req;
+ unsigned int len = roundup(sizeof(*req), 16);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ req = (struct cpl_tid_release *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(
+ CPL_TID_RELEASE, tid));
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
+ cxgbit_ofld_send(cdev, skb);
+}
+
+int
+cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
+ struct l2t_entry *l2e)
+{
+ int ret = 0;
+
+ if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
+ kfree_skb(skb);
+ pr_err("%s - device not up - dropping\n", __func__);
+ return -EIO;
+ }
+
+ ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
+ if (ret < 0)
+ kfree_skb(skb);
+ return ret < 0 ? ret : 0;
+}
+
+static void
+cxgbit_best_mtu(const unsigned short *mtus, unsigned short mtu,
+ unsigned int *idx, int use_ts, int ipv6)
+{
+ unsigned short hdr_size = (ipv6 ? sizeof(struct ipv6hdr) :
+ sizeof(struct iphdr)) +
+ sizeof(struct tcphdr) +
+ (use_ts ? round_up(TCPOLEN_TIMESTAMP,
+ 4) : 0);
+ unsigned short data_size = mtu - hdr_size;
+
+ cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
+}
+
+static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ if (csk->com.state != CSK_STATE_ESTABLISHED) {
+ __kfree_skb(skb);
+ return;
+ }
+
+ cxgbit_ofld_send(csk->com.cdev, skb);
+}
+
+/*
+ * CPL connection rx data ack: host ->
+ * Send RX credits through an RX_DATA_ACK CPL message.
+ * Returns the number of credits sent.
+ */
+int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ struct cpl_rx_data_ack *req;
+ unsigned int len = roundup(sizeof(*req), 16);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -1;
+
+ req = (struct cpl_rx_data_ack *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ set_wr_txq(skb, CPL_PRIORITY_ACK, csk->ctrlq_idx);
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
+ csk->tid));
+ req->credit_dack = cpu_to_be32(RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
+ RX_CREDITS_V(csk->rx_credits));
+
+ csk->rx_credits = 0;
+
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
+ __skb_queue_tail(&csk->backlogq, skb);
+ spin_unlock_bh(&csk->lock);
+ return 0;
+ }
+
+ cxgbit_send_rx_credits(csk, skb);
+ spin_unlock_bh(&csk->lock);
+
+ return 0;
+}
+
+#define FLOWC_WR_NPARAMS_MIN 9
+#define FLOWC_WR_NPARAMS_MAX 11
+static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ u32 len, flowclen;
+ u8 i;
+
+ flowclen = offsetof(struct fw_flowc_wr,
+ mnemval[FLOWC_WR_NPARAMS_MAX]);
+
+ len = max_t(u32, sizeof(struct cpl_abort_req),
+ sizeof(struct cpl_abort_rpl));
+
+ len = max(len, flowclen);
+ len = roundup(len, 16);
+
+ for (i = 0; i < 3; i++) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+ __skb_queue_tail(&csk->skbq, skb);
+ }
+
+ skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
+ csk->lro_hskb = skb;
+
+ return 0;
+out:
+ __skb_queue_purge(&csk->skbq);
+ return -ENOMEM;
+}
+
+static u32 cxgbit_compute_wscale(u32 win)
+{
+ u32 wscale = 0;
+
+ while (wscale < 14 && (65535 << wscale) < win)
+ wscale++;
+ return wscale;
+}
+
+static void
+cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
+{
+ struct sk_buff *skb;
+ const struct tcphdr *tcph;
+ struct cpl_t5_pass_accept_rpl *rpl5;
+ unsigned int len = roundup(sizeof(*rpl5), 16);
+ unsigned int mtu_idx;
+ u64 opt0;
+ u32 opt2, hlen;
+ u32 wscale;
+ u32 win;
+
+ pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb) {
+ cxgbit_put_csk(csk);
+ return;
+ }
+
+ rpl5 = (struct cpl_t5_pass_accept_rpl *)__skb_put(skb, len);
+ memset(rpl5, 0, len);
+
+ INIT_TP_WR(rpl5, csk->tid);
+ OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+ csk->tid));
+ cxgbit_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
+ req->tcpopt.tstamp,
+ (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
+ wscale = cxgbit_compute_wscale(csk->rcv_win);
+ /*
+ * Specify the largest window that will fit in opt0. The
+ * remainder will be specified in the rx_data_ack.
+ */
+ win = csk->rcv_win >> 10;
+ if (win > RCV_BUFSIZ_M)
+ win = RCV_BUFSIZ_M;
+ opt0 = TCAM_BYPASS_F |
+ WND_SCALE_V(wscale) |
+ MSS_IDX_V(mtu_idx) |
+ L2T_IDX_V(csk->l2t->idx) |
+ TX_CHAN_V(csk->tx_chan) |
+ SMAC_SEL_V(csk->smac_idx) |
+ DSCP_V(csk->tos >> 2) |
+ ULP_MODE_V(ULP_MODE_ISCSI) |
+ RCV_BUFSIZ_V(win);
+
+ opt2 = RX_CHANNEL_V(0) |
+ RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
+
+ if (req->tcpopt.tstamp)
+ opt2 |= TSTAMPS_EN_F;
+ if (req->tcpopt.sack)
+ opt2 |= SACK_EN_F;
+ if (wscale)
+ opt2 |= WND_SCALE_EN_F;
+
+ hlen = ntohl(req->hdr_len);
+ tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
+ IP_HDR_LEN_G(hlen);
+
+ if (tcph->ece && tcph->cwr)
+ opt2 |= CCTRL_ECN_V(1);
+
+ opt2 |= RX_COALESCE_V(3);
+ opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
+
+ opt2 |= T5_ISS_F;
+ rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
+
+ opt2 |= T5_OPT_2_VALID_F;
+
+ rpl5->opt0 = cpu_to_be64(opt0);
+ rpl5->opt2 = cpu_to_be32(opt2);
+ set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
+ t4_set_arp_err_handler(skb, NULL, cxgbit_arp_failure_discard);
+ cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+}
+
+static void
+cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk = NULL;
+ struct cxgbit_np *cnp;
+ struct cpl_pass_accept_req *req = cplhdr(skb);
+ unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int tid = GET_TID(req);
+ u16 peer_mss = ntohs(req->tcpopt.mss);
+ unsigned short hdrs;
+
+ struct dst_entry *dst;
+ __u8 local_ip[16], peer_ip[16];
+ __be16 local_port, peer_port;
+ int ret;
+ int iptype;
+
+ pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
+ __func__, cdev, stid, tid);
+
+ cnp = lookup_stid(t, stid);
+ if (!cnp) {
+ pr_err("%s connect request on invalid stid %d\n",
+ __func__, stid);
+ goto rel_skb;
+ }
+
+ if (cnp->com.state != CSK_STATE_LISTEN) {
+ pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
+ __func__);
+ goto reject;
+ }
+
+ csk = lookup_tid(t, tid);
+ if (csk) {
+ pr_err("%s csk not null tid %u\n",
+ __func__, tid);
+ goto rel_skb;
+ }
+
+ cxgbit_get_tuple_info(req, &iptype, local_ip, peer_ip,
+ &local_port, &peer_port);
+
+ /* Find output route */
+ if (iptype == 4) {
+ pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
+ "lport %d rport %d peer_mss %d\n"
+ , __func__, cnp, tid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+ dst = cxgbit_find_route(cdev, *(__be32 *)local_ip,
+ *(__be32 *)peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
+ } else {
+ pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
+ "lport %d rport %d peer_mss %d\n"
+ , __func__, cnp, tid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+ dst = cxgbit_find_route6(cdev, local_ip, peer_ip,
+ local_port, peer_port,
+ PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
+ ((struct sockaddr_in6 *)
+ &cnp->com.local_addr)->sin6_scope_id);
+ }
+ if (!dst) {
+ pr_err("%s - failed to find dst entry!\n",
+ __func__);
+ goto reject;
+ }
+
+ csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
+ if (!csk) {
+ dst_release(dst);
+ goto rel_skb;
+ }
+
+ ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
+ dst, cdev);
+ if (ret) {
+ pr_err("%s - failed to allocate l2t entry!\n",
+ __func__);
+ dst_release(dst);
+ kfree(csk);
+ goto reject;
+ }
+
+ kref_init(&csk->kref);
+ init_completion(&csk->com.wr_wait.completion);
+
+ INIT_LIST_HEAD(&csk->accept_node);
+
+ hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
+ sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
+ if (peer_mss && csk->mtu > (peer_mss + hdrs))
+ csk->mtu = peer_mss + hdrs;
+
+ csk->com.state = CSK_STATE_CONNECTING;
+ csk->com.cdev = cdev;
+ csk->cnp = cnp;
+ csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
+ csk->dst = dst;
+ csk->tid = tid;
+ csk->wr_cred = cdev->lldi.wr_cred -
+ DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
+ csk->wr_max_cred = csk->wr_cred;
+ csk->wr_una_cred = 0;
+
+ if (iptype == 4) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)
+ &csk->com.local_addr;
+ sin->sin_family = AF_INET;
+ sin->sin_port = local_port;
+ sin->sin_addr.s_addr = *(__be32 *)local_ip;
+
+ sin = (struct sockaddr_in *)&csk->com.remote_addr;
+ sin->sin_family = AF_INET;
+ sin->sin_port = peer_port;
+ sin->sin_addr.s_addr = *(__be32 *)peer_ip;
+ } else {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &csk->com.local_addr;
+
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_port = local_port;
+ memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
+ cxgb4_clip_get(cdev->lldi.ports[0],
+ (const u32 *)&sin6->sin6_addr.s6_addr,
+ 1);
+
+ sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_port = peer_port;
+ memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
+ }
+
+ skb_queue_head_init(&csk->rxq);
+ skb_queue_head_init(&csk->txq);
+ skb_queue_head_init(&csk->ppodq);
+ skb_queue_head_init(&csk->backlogq);
+ skb_queue_head_init(&csk->skbq);
+ cxgbit_sock_reset_wr_list(csk);
+ spin_lock_init(&csk->lock);
+ init_waitqueue_head(&csk->waitq);
+ init_waitqueue_head(&csk->ack_waitq);
+ csk->lock_owner = false;
+
+ if (cxgbit_alloc_csk_skb(csk)) {
+ dst_release(dst);
+ kfree(csk);
+ goto rel_skb;
+ }
+
+ cxgbit_get_cdev(cdev);
+
+ spin_lock(&cdev->cskq.lock);
+ list_add_tail(&csk->list, &cdev->cskq.list);
+ spin_unlock(&cdev->cskq.lock);
+
+ cxgb4_insert_tid(t, csk, tid);
+ cxgbit_pass_accept_rpl(csk, req);
+ goto rel_skb;
+
+reject:
+ cxgbit_release_tid(cdev, tid);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static u32
+cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
+ u32 *flowclenp)
+{
+ u32 nparams, flowclen16, flowclen;
+
+ nparams = FLOWC_WR_NPARAMS_MIN;
+
+ if (csk->snd_wscale)
+ nparams++;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ nparams++;
+#endif
+ flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
+ flowclen16 = DIV_ROUND_UP(flowclen, 16);
+ flowclen = flowclen16 * 16;
+ /*
+ * Return the number of 16-byte credits used by the flowc request.
+ * Pass back the nparams and actual flowc length if requested.
+ */
+ if (nparamsp)
+ *nparamsp = nparams;
+ if (flowclenp)
+ *flowclenp = flowclen;
+ return flowclen16;
+}
+
+u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
+{
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct fw_flowc_wr *flowc;
+ u32 nparams, flowclen16, flowclen;
+ struct sk_buff *skb;
+ u8 index;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
+#endif
+
+ flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
+
+ skb = __skb_dequeue(&csk->skbq);
+ flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
+ memset(flowc, 0, flowclen);
+
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS_V(nparams));
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
+ FW_WR_FLOWID_V(csk->tid));
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
+ (csk->com.cdev->lldi.pf));
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
+ flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
+ flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
+ flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
+ flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
+ flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
+ flowc->mnemval[7].val = cpu_to_be32(csk->emss);
+
+ flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+ if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
+ flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
+ else
+ flowc->mnemval[8].val = cpu_to_be32(16384);
+
+ index = 9;
+
+ if (csk->snd_wscale) {
+ flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
+ flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
+ index++;
+ }
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
+ if (vlan == VLAN_NONE) {
+ pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
+ flowc->mnemval[index].val = cpu_to_be32(0);
+ } else
+ flowc->mnemval[index].val = cpu_to_be32(
+ (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
+#endif
+
+ pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
+ " rcv_seq = %u; snd_win = %u; emss = %u\n",
+ __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
+ csk->rcv_nxt, csk->snd_win, csk->emss);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+ cxgbit_ofld_send(csk->com.cdev, skb);
+ return flowclen16;
+}
+
+int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+ u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
+ u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
+ unsigned int len = roundup(sizeof(*req), 16);
+ int ret;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ /* set up ulp submode */
+ req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
+ req->word_cookie = htons(0);
+ req->mask = cpu_to_be64(0x3 << 4);
+ req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
+ (dcrc ? ULP_CRC_DATA : 0)) << 4);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+ cxgbit_ofld_send(csk->com.cdev, skb);
+
+ ret = cxgbit_wait_for_reply(csk->com.cdev,
+ &csk->com.wr_wait,
+ csk->tid, 5, __func__);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
+{
+ struct sk_buff *skb;
+ struct cpl_set_tcb_field *req;
+ unsigned int len = roundup(sizeof(*req), 16);
+ int ret;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_set_tcb_field *)__skb_put(skb, len);
+ memset(req, 0, len);
+
+ INIT_TP_WR(req, csk->tid);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
+ req->word_cookie = htons(0);
+ req->mask = cpu_to_be64(0x3 << 8);
+ req->val = cpu_to_be64(pg_idx << 8);
+ set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+ cxgbit_ofld_send(csk->com.cdev, skb);
+
+ ret = cxgbit_wait_for_reply(csk->com.cdev,
+ &csk->com.wr_wait,
+ csk->tid, 5, __func__);
+ if (ret)
+ return -1;
+
+ return 0;
+}
+
+static void
+cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cpl_pass_open_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct cxgbit_np *cnp = lookup_stid(t, stid);
+
+ pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
+ __func__, cnp, stid, rpl->status);
+
+ if (!cnp) {
+ pr_info("%s stid %d lookup failure\n", __func__, stid);
+ return;
+ }
+
+ cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
+ cxgbit_put_cnp(cnp);
+}
+
+static void
+cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int stid = GET_TID(rpl);
+ struct cxgbit_np *cnp = lookup_stid(t, stid);
+
+ pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
+ __func__, cnp, stid, rpl->status);
+
+ if (!cnp) {
+ pr_info("%s stid %d lookup failure\n", __func__, stid);
+ return;
+ }
+
+ cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
+ cxgbit_put_cnp(cnp);
+}
+
+static void
+cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cpl_pass_establish *req = cplhdr(skb);
+ struct tid_info *t = cdev->lldi.tids;
+ unsigned int tid = GET_TID(req);
+ struct cxgbit_sock *csk;
+ struct cxgbit_np *cnp;
+ u16 tcp_opt = be16_to_cpu(req->tcp_opt);
+ u32 snd_isn = be32_to_cpu(req->snd_isn);
+ u32 rcv_isn = be32_to_cpu(req->rcv_isn);
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find connection for tid %u.\n", tid);
+ goto rel_skb;
+ }
+ cnp = csk->cnp;
+
+ pr_debug("%s: csk %p; tid %u; cnp %p\n",
+ __func__, csk, tid, cnp);
+
+ csk->write_seq = snd_isn;
+ csk->snd_una = snd_isn;
+ csk->snd_nxt = snd_isn;
+
+ csk->rcv_nxt = rcv_isn;
+
+ if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
+ csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
+
+ csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
+ cxgbit_set_emss(csk, tcp_opt);
+ dst_confirm(csk->dst);
+ csk->com.state = CSK_STATE_ESTABLISHED;
+ spin_lock_bh(&cnp->np_accept_lock);
+ list_add_tail(&csk->accept_node, &cnp->np_accept_list);
+ spin_unlock_bh(&cnp->np_accept_lock);
+ complete(&cnp->accept_comp);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ cxgbit_skcb_flags(skb) = 0;
+ spin_lock_bh(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ spin_unlock_bh(&csk->rxq.lock);
+ wake_up(&csk->waitq);
+}
+
+static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ switch (csk->com.state) {
+ case CSK_STATE_ESTABLISHED:
+ csk->com.state = CSK_STATE_CLOSING;
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+ case CSK_STATE_CLOSING:
+ /* simultaneous close */
+ csk->com.state = CSK_STATE_MORIBUND;
+ break;
+ case CSK_STATE_MORIBUND:
+ csk->com.state = CSK_STATE_DEAD;
+ cxgbit_put_csk(csk);
+ break;
+ case CSK_STATE_ABORTING:
+ break;
+ default:
+ pr_info("%s: cpl_peer_close in bad state %d\n",
+ __func__, csk->com.state);
+ }
+
+ __kfree_skb(skb);
+}
+
+static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ switch (csk->com.state) {
+ case CSK_STATE_CLOSING:
+ csk->com.state = CSK_STATE_MORIBUND;
+ break;
+ case CSK_STATE_MORIBUND:
+ csk->com.state = CSK_STATE_DEAD;
+ cxgbit_put_csk(csk);
+ break;
+ case CSK_STATE_ABORTING:
+ case CSK_STATE_DEAD:
+ break;
+ default:
+ pr_info("%s: cpl_close_con_rpl in bad state %d\n",
+ __func__, csk->com.state);
+ }
+
+ __kfree_skb(skb);
+}
+
+static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cpl_abort_req_rss *hdr = cplhdr(skb);
+ unsigned int tid = GET_TID(hdr);
+ struct cpl_abort_rpl *rpl;
+ struct sk_buff *rpl_skb;
+ bool release = false;
+ bool wakeup_thread = false;
+ unsigned int len = roundup(sizeof(*rpl), 16);
+
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, tid, csk->com.state);
+
+ if (cxgbit_is_neg_adv(hdr->status)) {
+ pr_err("%s: got neg advise %d on tid %u\n",
+ __func__, hdr->status, tid);
+ goto rel_skb;
+ }
+
+ switch (csk->com.state) {
+ case CSK_STATE_CONNECTING:
+ case CSK_STATE_MORIBUND:
+ csk->com.state = CSK_STATE_DEAD;
+ release = true;
+ break;
+ case CSK_STATE_ESTABLISHED:
+ csk->com.state = CSK_STATE_DEAD;
+ wakeup_thread = true;
+ break;
+ case CSK_STATE_CLOSING:
+ csk->com.state = CSK_STATE_DEAD;
+ if (!csk->conn)
+ release = true;
+ break;
+ case CSK_STATE_ABORTING:
+ break;
+ default:
+ pr_info("%s: cpl_abort_req_rss in bad state %d\n",
+ __func__, csk->com.state);
+ csk->com.state = CSK_STATE_DEAD;
+ }
+
+ __skb_queue_purge(&csk->txq);
+
+ if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
+ cxgbit_send_tx_flowc_wr(csk);
+
+ rpl_skb = __skb_dequeue(&csk->skbq);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+
+ rpl = (struct cpl_abort_rpl *)__skb_put(rpl_skb, len);
+ memset(rpl, 0, len);
+
+ INIT_TP_WR(rpl, csk->tid);
+ OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
+ rpl->cmd = CPL_ABORT_NO_RST;
+ cxgbit_ofld_send(csk->com.cdev, rpl_skb);
+
+ if (wakeup_thread) {
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+ }
+
+ if (release)
+ cxgbit_put_csk(csk);
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ pr_debug("%s: csk %p; tid %u; state %d\n",
+ __func__, csk, csk->tid, csk->com.state);
+
+ switch (csk->com.state) {
+ case CSK_STATE_ABORTING:
+ csk->com.state = CSK_STATE_DEAD;
+ cxgbit_put_csk(csk);
+ break;
+ default:
+ pr_info("%s: cpl_abort_rpl_rss in state %d\n",
+ __func__, csk->com.state);
+ }
+
+ __kfree_skb(skb);
+}
+
+static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
+{
+ const struct sk_buff *skb = csk->wr_pending_head;
+ u32 credit = 0;
+
+ if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
+ pr_err("csk 0x%p, tid %u, credit %u > %u\n",
+ csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
+ return true;
+ }
+
+ while (skb) {
+ credit += skb->csum;
+ skb = cxgbit_skcb_tx_wr_next(skb);
+ }
+
+ if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
+ pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
+ csk, csk->tid, csk->wr_cred,
+ credit, csk->wr_max_cred);
+
+ return true;
+ }
+
+ return false;
+}
+
+static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
+ u32 credits = rpl->credits;
+ u32 snd_una = ntohl(rpl->snd_una);
+
+ csk->wr_cred += credits;
+ if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
+ csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
+
+ while (credits) {
+ struct sk_buff *p = cxgbit_sock_peek_wr(csk);
+
+ if (unlikely(!p)) {
+ pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
+ csk, csk->tid, credits,
+ csk->wr_cred, csk->wr_una_cred);
+ break;
+ }
+
+ if (unlikely(credits < p->csum)) {
+ pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
+ csk, csk->tid,
+ credits, csk->wr_cred, csk->wr_una_cred,
+ p->csum);
+ p->csum -= credits;
+ break;
+ }
+
+ cxgbit_sock_dequeue_wr(csk);
+ credits -= p->csum;
+ kfree_skb(p);
+ }
+
+ if (unlikely(cxgbit_credit_err(csk))) {
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+ }
+
+ if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
+ if (unlikely(before(snd_una, csk->snd_una))) {
+ pr_warn("csk 0x%p,%u, snd_una %u/%u.",
+ csk, csk->tid, snd_una,
+ csk->snd_una);
+ goto rel_skb;
+ }
+
+ if (csk->snd_una != snd_una) {
+ csk->snd_una = snd_una;
+ dst_confirm(csk->dst);
+ wake_up(&csk->ack_waitq);
+ }
+ }
+
+ if (skb_queue_len(&csk->txq))
+ cxgbit_push_tx_frames(csk);
+
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk;
+ struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
+ unsigned int tid = GET_TID(rpl);
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk))
+ pr_err("can't find connection for tid %u.\n", tid);
+ else
+ cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
+
+ cxgbit_put_csk(csk);
+}
+
+static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk;
+ struct cpl_rx_data *cpl = cplhdr(skb);
+ unsigned int tid = GET_TID(cpl);
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct tid_info *t = lldi->tids;
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ cxgbit_queue_rx_skb(csk, skb);
+ return;
+rel_skb:
+ __kfree_skb(skb);
+}
+
+static void
+__cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ spin_lock(&csk->lock);
+ if (csk->lock_owner) {
+ __skb_queue_tail(&csk->backlogq, skb);
+ spin_unlock(&csk->lock);
+ return;
+ }
+
+ cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
+ spin_unlock(&csk->lock);
+}
+
+static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ cxgbit_get_csk(csk);
+ __cxgbit_process_rx_cpl(csk, skb);
+ cxgbit_put_csk(csk);
+}
+
+static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
+{
+ struct cxgbit_sock *csk;
+ struct cpl_tx_data *cpl = cplhdr(skb);
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct tid_info *t = lldi->tids;
+ unsigned int tid = GET_TID(cpl);
+ u8 opcode = cxgbit_skcb_rx_opcode(skb);
+ bool ref = true;
+
+ switch (opcode) {
+ case CPL_FW4_ACK:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
+ ref = false;
+ break;
+ case CPL_PEER_CLOSE:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
+ break;
+ case CPL_CLOSE_CON_RPL:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
+ break;
+ case CPL_ABORT_REQ_RSS:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
+ break;
+ case CPL_ABORT_RPL_RSS:
+ cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
+ break;
+ default:
+ goto rel_skb;
+ }
+
+ csk = lookup_tid(t, tid);
+ if (unlikely(!csk)) {
+ pr_err("can't find conn. for tid %u.\n", tid);
+ goto rel_skb;
+ }
+
+ if (ref)
+ cxgbit_process_rx_cpl(csk, skb);
+ else
+ __cxgbit_process_rx_cpl(csk, skb);
+
+ return;
+rel_skb:
+ __kfree_skb(skb);
+}
+
+cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
+ [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl,
+ [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
+ [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req,
+ [CPL_PASS_ESTABLISH] = cxgbit_pass_establish,
+ [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl,
+ [CPL_RX_DATA] = cxgbit_rx_data,
+ [CPL_FW4_ACK] = cxgbit_rx_cpl,
+ [CPL_PEER_CLOSE] = cxgbit_rx_cpl,
+ [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl,
+ [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl,
+ [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl,
+};
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_ddp.c b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
new file mode 100644
index 000000000000..5d78bdb7fc64
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_ddp.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include "cxgbit.h"
+
+static void
+cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
+ struct cxgbi_task_tag_info *ttinfo,
+ struct scatterlist **sg_pp, unsigned int *sg_off)
+{
+ struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
+ unsigned int offset = sg_off ? *sg_off : 0;
+ dma_addr_t addr = 0UL;
+ unsigned int len = 0;
+ int i;
+
+ memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));
+
+ if (sg) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ }
+
+ for (i = 0; i < PPOD_PAGES_MAX; i++) {
+ if (sg) {
+ ppod->addr[i] = cpu_to_be64(addr + offset);
+ offset += PAGE_SIZE;
+ if (offset == (len + sg->offset)) {
+ offset = 0;
+ sg = sg_next(sg);
+ if (sg) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ }
+ }
+ } else {
+ ppod->addr[i] = 0ULL;
+ }
+ }
+
+ /*
+ * the fifth address needs to be repeated in the next ppod, so do
+ * not move sg
+ */
+ if (sg_pp) {
+ *sg_pp = sg;
+ *sg_off = offset;
+ }
+
+ if (offset == len) {
+ offset = 0;
+ if (sg) {
+ sg = sg_next(sg);
+ if (sg)
+ addr = sg_dma_address(sg);
+ }
+ }
+ ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
+}
+
+static struct sk_buff *
+cxgbit_ppod_init_idata(struct cxgbit_device *cdev, struct cxgbi_ppm *ppm,
+ unsigned int idx, unsigned int npods, unsigned int tid)
+{
+ struct ulp_mem_io *req;
+ struct ulptx_idata *idata;
+ unsigned int pm_addr = (idx << PPOD_SIZE_SHIFT) + ppm->llimit;
+ unsigned int dlen = npods << PPOD_SIZE_SHIFT;
+ unsigned int wr_len = roundup(sizeof(struct ulp_mem_io) +
+ sizeof(struct ulptx_idata) + dlen, 16);
+ struct sk_buff *skb;
+
+ skb = alloc_skb(wr_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
+ INIT_ULPTX_WR(req, wr_len, 0, tid);
+ req->wr.wr_hi = htonl(FW_WR_OP_V(FW_ULPTX_WR) |
+ FW_WR_ATOMIC_V(0));
+ req->cmd = htonl(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
+ ULP_MEMIO_ORDER_V(0) |
+ T5_ULP_MEMIO_IMM_V(1));
+ req->dlen = htonl(ULP_MEMIO_DATA_LEN_V(dlen >> 5));
+ req->lock_addr = htonl(ULP_MEMIO_ADDR_V(pm_addr >> 5));
+ req->len16 = htonl(DIV_ROUND_UP(wr_len - sizeof(req->wr), 16));
+
+ idata = (struct ulptx_idata *)(req + 1);
+ idata->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
+ idata->len = htonl(dlen);
+
+ return skb;
+}
+
+static int
+cxgbit_ppod_write_idata(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
+ struct cxgbi_task_tag_info *ttinfo, unsigned int idx,
+ unsigned int npods, struct scatterlist **sg_pp,
+ unsigned int *sg_off)
+{
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct sk_buff *skb;
+ struct ulp_mem_io *req;
+ struct ulptx_idata *idata;
+ struct cxgbi_pagepod *ppod;
+ unsigned int i;
+
+ skb = cxgbit_ppod_init_idata(cdev, ppm, idx, npods, csk->tid);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct ulp_mem_io *)skb->data;
+ idata = (struct ulptx_idata *)(req + 1);
+ ppod = (struct cxgbi_pagepod *)(idata + 1);
+
+ for (i = 0; i < npods; i++, ppod++)
+ cxgbit_set_one_ppod(ppod, ttinfo, sg_pp, sg_off);
+
+ __skb_queue_tail(&csk->ppodq, skb);
+
+ return 0;
+}
+
+static int
+cxgbit_ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbit_sock *csk,
+ struct cxgbi_task_tag_info *ttinfo)
+{
+ unsigned int pidx = ttinfo->idx;
+ unsigned int npods = ttinfo->npods;
+ unsigned int i, cnt;
+ struct scatterlist *sg = ttinfo->sgl;
+ unsigned int offset = 0;
+ int ret = 0;
+
+ for (i = 0; i < npods; i += cnt, pidx += cnt) {
+ cnt = npods - i;
+
+ if (cnt > ULPMEM_IDATA_MAX_NPPODS)
+ cnt = ULPMEM_IDATA_MAX_NPPODS;
+
+ ret = cxgbit_ppod_write_idata(ppm, csk, ttinfo, pidx, cnt,
+ &sg, &offset);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int cxgbit_ddp_sgl_check(struct scatterlist *sg,
+ unsigned int nents)
+{
+ unsigned int last_sgidx = nents - 1;
+ unsigned int i;
+
+ for (i = 0; i < nents; i++, sg = sg_next(sg)) {
+ unsigned int len = sg->length + sg->offset;
+
+ if ((sg->offset & 0x3) || (i && sg->offset) ||
+ ((i != last_sgidx) && (len != PAGE_SIZE))) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+cxgbit_ddp_reserve(struct cxgbit_sock *csk, struct cxgbi_task_tag_info *ttinfo,
+ unsigned int xferlen)
+{
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ struct scatterlist *sgl = ttinfo->sgl;
+ unsigned int sgcnt = ttinfo->nents;
+ unsigned int sg_offset = sgl->offset;
+ int ret;
+
+ if ((xferlen < DDP_THRESHOLD) || (!sgcnt)) {
+ pr_debug("ppm 0x%p, pgidx %u, xfer %u, sgcnt %u, NO ddp.\n",
+ ppm, ppm->tformat.pgsz_idx_dflt,
+ xferlen, ttinfo->nents);
+ return -EINVAL;
+ }
+
+ if (cxgbit_ddp_sgl_check(sgl, sgcnt) < 0)
+ return -EINVAL;
+
+ ttinfo->nr_pages = (xferlen + sgl->offset +
+ (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT;
+
+ /*
+ * the ddp tag will be used for the ttt in the outgoing r2t pdu
+ */
+ ret = cxgbi_ppm_ppods_reserve(ppm, ttinfo->nr_pages, 0, &ttinfo->idx,
+ &ttinfo->tag, 0);
+ if (ret < 0)
+ return ret;
+ ttinfo->npods = ret;
+
+ sgl->offset = 0;
+ ret = dma_map_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+ sgl->offset = sg_offset;
+ if (!ret) {
+ pr_info("%s: 0x%x, xfer %u, sgl %u dma mapping err.\n",
+ __func__, 0, xferlen, sgcnt);
+ goto rel_ppods;
+ }
+
+ cxgbi_ppm_make_ppod_hdr(ppm, ttinfo->tag, csk->tid, sgl->offset,
+ xferlen, &ttinfo->hdr);
+
+ ret = cxgbit_ddp_set_map(ppm, csk, ttinfo);
+ if (ret < 0) {
+ __skb_queue_purge(&csk->ppodq);
+ dma_unmap_sg(&ppm->pdev->dev, sgl, sgcnt, DMA_FROM_DEVICE);
+ goto rel_ppods;
+ }
+
+ return 0;
+
+rel_ppods:
+ cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+ return -EINVAL;
+}
+
+void
+cxgbit_get_r2t_ttt(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_r2t *r2t)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+ int ret = -EINVAL;
+
+ if ((!ccmd->setup_ddp) ||
+ (!test_bit(CSK_DDP_ENABLE, &csk->com.flags)))
+ goto out;
+
+ ccmd->setup_ddp = false;
+
+ ttinfo->sgl = cmd->se_cmd.t_data_sg;
+ ttinfo->nents = cmd->se_cmd.t_data_nents;
+
+ ret = cxgbit_ddp_reserve(csk, ttinfo, cmd->se_cmd.data_length);
+ if (ret < 0) {
+ pr_info("csk 0x%p, cmd 0x%p, xfer len %u, sgcnt %u no ddp.\n",
+ csk, cmd, cmd->se_cmd.data_length, ttinfo->nents);
+
+ ttinfo->sgl = NULL;
+ ttinfo->nents = 0;
+ } else {
+ ccmd->release = true;
+ }
+out:
+ pr_debug("cdev 0x%p, cmd 0x%p, tag 0x%x\n", cdev, cmd, ttinfo->tag);
+ r2t->targ_xfer_tag = ttinfo->tag;
+}
+
+void cxgbit_release_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+ struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+
+ if (ccmd->release) {
+ struct cxgbi_task_tag_info *ttinfo = &ccmd->ttinfo;
+
+ if (ttinfo->sgl) {
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+
+ cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
+
+ dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
+ ttinfo->nents, DMA_FROM_DEVICE);
+ } else {
+ put_page(sg_page(&ccmd->sg));
+ }
+
+ ccmd->release = false;
+ }
+}
+
+int cxgbit_ddp_init(struct cxgbit_device *cdev)
+{
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct net_device *ndev = cdev->lldi.ports[0];
+ struct cxgbi_tag_format tformat;
+ unsigned int ppmax;
+ int ret, i;
+
+ if (!lldi->vr->iscsi.size) {
+ pr_warn("%s, iscsi NOT enabled, check config!\n", ndev->name);
+ return -EACCES;
+ }
+
+ ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT;
+
+ memset(&tformat, 0, sizeof(struct cxgbi_tag_format));
+ for (i = 0; i < 4; i++)
+ tformat.pgsz_order[i] = (lldi->iscsi_pgsz_order >> (i << 3))
+ & 0xF;
+ cxgbi_tagmask_check(lldi->iscsi_tagmask, &tformat);
+
+ ret = cxgbi_ppm_init(lldi->iscsi_ppm, cdev->lldi.ports[0],
+ cdev->lldi.pdev, &cdev->lldi, &tformat,
+ ppmax, lldi->iscsi_llimit,
+ lldi->vr->iscsi.start, 2);
+ if (ret >= 0) {
+ struct cxgbi_ppm *ppm = (struct cxgbi_ppm *)(*lldi->iscsi_ppm);
+
+ if ((ppm->tformat.pgsz_idx_dflt < DDP_PGIDX_MAX) &&
+ (ppm->ppmax >= 1024))
+ set_bit(CDEV_DDP_ENABLE, &cdev->flags);
+ ret = 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_lro.h b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
new file mode 100644
index 000000000000..28c11bd1b930
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_lro.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef __CXGBIT_LRO_H__
+#define __CXGBIT_LRO_H__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#define LRO_FLUSH_LEN_MAX 65535
+
+struct cxgbit_lro_cb {
+ struct cxgbit_sock *csk;
+ u32 pdu_totallen;
+ u32 offset;
+ u8 pdu_idx;
+ bool complete;
+};
+
+enum cxgbit_pducb_flags {
+ PDUCBF_RX_HDR = (1 << 0), /* received pdu header */
+ PDUCBF_RX_DATA = (1 << 1), /* received pdu payload */
+ PDUCBF_RX_STATUS = (1 << 2), /* received ddp status */
+ PDUCBF_RX_DATA_DDPD = (1 << 3), /* pdu payload ddp'd */
+ PDUCBF_RX_HCRC_ERR = (1 << 4), /* header digest error */
+ PDUCBF_RX_DCRC_ERR = (1 << 5), /* data digest error */
+};
+
+struct cxgbit_lro_pdu_cb {
+ u8 flags;
+ u8 frags;
+ u8 hfrag_idx;
+ u8 nr_dfrags;
+ u8 dfrag_idx;
+ bool complete;
+ u32 seq;
+ u32 pdulen;
+ u32 hlen;
+ u32 dlen;
+ u32 doffset;
+ u32 ddigest;
+ void *hdr;
+};
+
+#define LRO_SKB_MAX_HEADROOM \
+ (sizeof(struct cxgbit_lro_cb) + \
+ (MAX_SKB_FRAGS * sizeof(struct cxgbit_lro_pdu_cb)))
+
+#define LRO_SKB_MIN_HEADROOM \
+ (sizeof(struct cxgbit_lro_cb) + \
+ sizeof(struct cxgbit_lro_pdu_cb))
+
+#define cxgbit_skb_lro_cb(skb) ((struct cxgbit_lro_cb *)skb->data)
+#define cxgbit_skb_lro_pdu_cb(skb, i) \
+ ((struct cxgbit_lro_pdu_cb *)(skb->data + sizeof(struct cxgbit_lro_cb) \
+ + (i * sizeof(struct cxgbit_lro_pdu_cb))))
+
+#define CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT 16 /* ddp'able */
+#define CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT 19 /* pad error */
+#define CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT 20 /* hcrc error */
+#define CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT 21 /* dcrc error */
+
+#endif /*__CXGBIT_LRO_H_*/
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
new file mode 100644
index 000000000000..60dccd02bd85
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -0,0 +1,702 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define DRV_NAME "cxgbit"
+#define DRV_VERSION "1.0.0-ko"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#include "cxgbit.h"
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+#include <net/dcbevent.h>
+#include "cxgb4_dcb.h"
+#endif
+
+LIST_HEAD(cdev_list_head);
+/* cdev list lock */
+DEFINE_MUTEX(cdev_list_lock);
+
+void _cxgbit_free_cdev(struct kref *kref)
+{
+ struct cxgbit_device *cdev;
+
+ cdev = container_of(kref, struct cxgbit_device, kref);
+ kfree(cdev);
+}
+
+static void cxgbit_set_mdsl(struct cxgbit_device *cdev)
+{
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ u32 mdsl;
+
+#define ULP2_MAX_PKT_LEN 16224
+#define ISCSI_PDU_NONPAYLOAD_LEN 312
+ mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN,
+ ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN);
+ mdsl = min_t(u32, mdsl, 8192);
+ mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE);
+
+ cdev->mdsl = mdsl;
+}
+
+static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi)
+{
+ struct cxgbit_device *cdev;
+
+ if (is_t4(lldi->adapter_type))
+ return ERR_PTR(-ENODEV);
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&cdev->kref);
+
+ cdev->lldi = *lldi;
+
+ cxgbit_set_mdsl(cdev);
+
+ if (cxgbit_ddp_init(cdev) < 0) {
+ kfree(cdev);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags))
+ pr_info("cdev %s ddp init failed\n",
+ pci_name(lldi->pdev));
+
+ if (lldi->fw_vers >= 0x10d2b00)
+ set_bit(CDEV_ISO_ENABLE, &cdev->flags);
+
+ spin_lock_init(&cdev->cskq.lock);
+ INIT_LIST_HEAD(&cdev->cskq.list);
+
+ mutex_lock(&cdev_list_lock);
+ list_add_tail(&cdev->list, &cdev_list_head);
+ mutex_unlock(&cdev_list_lock);
+
+ pr_info("cdev %s added for iSCSI target transport\n",
+ pci_name(lldi->pdev));
+
+ return cdev;
+}
+
+static void cxgbit_close_conn(struct cxgbit_device *cdev)
+{
+ struct cxgbit_sock *csk;
+ struct sk_buff *skb;
+ bool wakeup_thread = false;
+
+ spin_lock_bh(&cdev->cskq.lock);
+ list_for_each_entry(csk, &cdev->cskq.list, list) {
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ spin_lock_bh(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ if (skb_queue_len(&csk->rxq) == 1)
+ wakeup_thread = true;
+ spin_unlock_bh(&csk->rxq.lock);
+
+ if (wakeup_thread) {
+ wake_up(&csk->waitq);
+ wakeup_thread = false;
+ }
+ }
+ spin_unlock_bh(&cdev->cskq.lock);
+}
+
+static void cxgbit_detach_cdev(struct cxgbit_device *cdev)
+{
+ bool free_cdev = false;
+
+ spin_lock_bh(&cdev->cskq.lock);
+ if (list_empty(&cdev->cskq.list))
+ free_cdev = true;
+ spin_unlock_bh(&cdev->cskq.lock);
+
+ if (free_cdev) {
+ mutex_lock(&cdev_list_lock);
+ list_del(&cdev->list);
+ mutex_unlock(&cdev_list_lock);
+
+ cxgbit_put_cdev(cdev);
+ } else {
+ cxgbit_close_conn(cdev);
+ }
+}
+
+static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state)
+{
+ struct cxgbit_device *cdev = handle;
+
+ switch (state) {
+ case CXGB4_STATE_UP:
+ set_bit(CDEV_STATE_UP, &cdev->flags);
+ pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev));
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ clear_bit(CDEV_STATE_UP, &cdev->flags);
+ cxgbit_close_conn(cdev);
+ pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev));
+ break;
+ case CXGB4_STATE_DOWN:
+ pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev));
+ break;
+ case CXGB4_STATE_DETACH:
+ clear_bit(CDEV_STATE_UP, &cdev->flags);
+ pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev));
+ cxgbit_detach_cdev(cdev);
+ break;
+ default:
+ pr_info("cdev %s unknown state %d.\n",
+ pci_name(cdev->lldi.pdev), state);
+ break;
+ }
+ return 0;
+}
+
+static void
+cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl,
+ struct cxgbit_lro_pdu_cb *pdu_cb)
+{
+ unsigned int status = ntohl(cpl->ddpvld);
+
+ pdu_cb->flags |= PDUCBF_RX_STATUS;
+ pdu_cb->ddigest = ntohl(cpl->ulp_crc);
+ pdu_cb->pdulen = ntohs(cpl->len);
+
+ if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) {
+ pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status);
+ pdu_cb->flags |= PDUCBF_RX_HCRC_ERR;
+ }
+
+ if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) {
+ pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status);
+ pdu_cb->flags |= PDUCBF_RX_DCRC_ERR;
+ }
+
+ if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT))
+ pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status);
+
+ if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) &&
+ (!(pdu_cb->flags & PDUCBF_RX_DATA))) {
+ pdu_cb->flags |= PDUCBF_RX_DATA_DDPD;
+ }
+}
+
+static void
+cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
+ lro_cb->pdu_idx);
+ struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1);
+
+ cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb);
+
+ if (pdu_cb->flags & PDUCBF_RX_HDR)
+ pdu_cb->complete = true;
+
+ lro_cb->complete = true;
+ lro_cb->pdu_totallen += pdu_cb->pdulen;
+ lro_cb->pdu_idx++;
+}
+
+static void
+cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl,
+ unsigned int offset)
+{
+ u8 skb_frag_idx = skb_shinfo(skb)->nr_frags;
+ u8 i;
+
+ /* usually there's just one frag */
+ __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page,
+ gl->frags[0].offset + offset,
+ gl->frags[0].size - offset);
+ for (i = 1; i < gl->nfrags; i++)
+ __skb_fill_page_desc(skb, skb_frag_idx + i,
+ gl->frags[i].page,
+ gl->frags[i].offset,
+ gl->frags[i].size);
+
+ skb_shinfo(skb)->nr_frags += gl->nfrags;
+
+ /* get a reference to the last page, we don't own it */
+ get_page(gl->frags[gl->nfrags - 1].page);
+}
+
+static void
+cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb,
+ lro_cb->pdu_idx);
+ u32 len, offset;
+
+ if (op == CPL_ISCSI_HDR) {
+ struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va;
+
+ offset = sizeof(struct cpl_iscsi_hdr);
+ pdu_cb->flags |= PDUCBF_RX_HDR;
+ pdu_cb->seq = ntohl(cpl->seq);
+ len = ntohs(cpl->len);
+ pdu_cb->hdr = gl->va + offset;
+ pdu_cb->hlen = len;
+ pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(gl->nfrags > 1))
+ cxgbit_skcb_flags(skb) = 0;
+
+ lro_cb->complete = false;
+ } else {
+ struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va;
+
+ offset = sizeof(struct cpl_iscsi_data);
+ pdu_cb->flags |= PDUCBF_RX_DATA;
+ len = ntohs(cpl->len);
+ pdu_cb->dlen = len;
+ pdu_cb->doffset = lro_cb->offset;
+ pdu_cb->nr_dfrags = gl->nfrags;
+ pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags;
+ }
+
+ cxgbit_copy_frags(skb, gl, offset);
+
+ pdu_cb->frags += gl->nfrags;
+ lro_cb->offset += len;
+ skb->len += len;
+ skb->data_len += len;
+ skb->truesize += len;
+}
+
+static struct sk_buff *
+cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl,
+ const __be64 *rsp, struct napi_struct *napi)
+{
+ struct sk_buff *skb;
+ struct cxgbit_lro_cb *lro_cb;
+
+ skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ memset(skb->data, 0, LRO_SKB_MAX_HEADROOM);
+
+ cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO;
+
+ lro_cb = cxgbit_skb_lro_cb(skb);
+
+ cxgbit_get_csk(csk);
+
+ lro_cb->csk = csk;
+
+ return skb;
+}
+
+static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ bool wakeup_thread = false;
+
+ spin_lock(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ if (skb_queue_len(&csk->rxq) == 1)
+ wakeup_thread = true;
+ spin_unlock(&csk->rxq.lock);
+
+ if (wakeup_thread)
+ wake_up(&csk->waitq);
+}
+
+static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_sock *csk = lro_cb->csk;
+
+ csk->lro_skb = NULL;
+
+ __skb_unlink(skb, &lro_mgr->lroq);
+ cxgbit_queue_lro_skb(csk, skb);
+
+ cxgbit_put_csk(csk);
+
+ lro_mgr->lro_pkts++;
+ lro_mgr->lro_session_cnt--;
+}
+
+static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_peek(&lro_mgr->lroq)))
+ cxgbit_lro_flush(lro_mgr, skb);
+}
+
+static int
+cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp,
+ const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
+ struct napi_struct *napi)
+{
+ struct sk_buff *skb;
+ struct cxgbit_lro_cb *lro_cb;
+
+ if (!csk) {
+ pr_err("%s: csk NULL, op 0x%x.\n", __func__, op);
+ goto out;
+ }
+
+ if (csk->lro_skb)
+ goto add_packet;
+
+start_lro:
+ if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) {
+ cxgbit_uld_lro_flush(lro_mgr);
+ goto start_lro;
+ }
+
+ skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi);
+ if (unlikely(!skb))
+ goto out;
+
+ csk->lro_skb = skb;
+
+ __skb_queue_tail(&lro_mgr->lroq, skb);
+ lro_mgr->lro_session_cnt++;
+
+add_packet:
+ skb = csk->lro_skb;
+ lro_cb = cxgbit_skb_lro_cb(skb);
+
+ if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) >
+ MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) ||
+ (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) {
+ cxgbit_lro_flush(lro_mgr, skb);
+ goto start_lro;
+ }
+
+ if (gl)
+ cxgbit_lro_add_packet_gl(skb, op, gl);
+ else
+ cxgbit_lro_add_packet_rsp(skb, op, rsp);
+
+ lro_mgr->lro_merged++;
+
+ return 0;
+
+out:
+ return -1;
+}
+
+static int
+cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp,
+ const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr,
+ struct napi_struct *napi)
+{
+ struct cxgbit_device *cdev = hndl;
+ struct cxgb4_lld_info *lldi = &cdev->lldi;
+ struct cpl_tx_data *rpl = NULL;
+ struct cxgbit_sock *csk = NULL;
+ unsigned int tid = 0;
+ struct sk_buff *skb;
+ unsigned int op = *(u8 *)rsp;
+ bool lro_flush = true;
+
+ switch (op) {
+ case CPL_ISCSI_HDR:
+ case CPL_ISCSI_DATA:
+ case CPL_RX_ISCSI_DDP:
+ case CPL_FW4_ACK:
+ lro_flush = false;
+ case CPL_ABORT_RPL_RSS:
+ case CPL_PASS_ESTABLISH:
+ case CPL_PEER_CLOSE:
+ case CPL_CLOSE_CON_RPL:
+ case CPL_ABORT_REQ_RSS:
+ case CPL_SET_TCB_RPL:
+ case CPL_RX_DATA:
+ rpl = gl ? (struct cpl_tx_data *)gl->va :
+ (struct cpl_tx_data *)(rsp + 1);
+ tid = GET_TID(rpl);
+ csk = lookup_tid(lldi->tids, tid);
+ break;
+ default:
+ break;
+ }
+
+ if (csk && csk->lro_skb && lro_flush)
+ cxgbit_lro_flush(lro_mgr, csk->lro_skb);
+
+ if (!gl) {
+ unsigned int len;
+
+ if (op == CPL_RX_ISCSI_DDP) {
+ if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr,
+ napi))
+ return 0;
+ }
+
+ len = 64 - sizeof(struct rsp_ctrl) - 8;
+ skb = napi_alloc_skb(napi, len);
+ if (!skb)
+ goto nomem;
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, &rsp[1], len);
+ } else {
+ if (unlikely(op != *(u8 *)gl->va)) {
+ pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n",
+ gl->va, be64_to_cpu(*rsp),
+ be64_to_cpu(*(u64 *)gl->va),
+ gl->tot_len);
+ return 0;
+ }
+
+ if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) {
+ if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr,
+ napi))
+ return 0;
+ }
+
+#define RX_PULL_LEN 128
+ skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+ if (unlikely(!skb))
+ goto nomem;
+ }
+
+ rpl = (struct cpl_tx_data *)skb->data;
+ op = rpl->ot.opcode;
+ cxgbit_skcb_rx_opcode(skb) = op;
+
+ pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n",
+ cdev, op, rpl->ot.opcode_tid,
+ ntohl(rpl->ot.opcode_tid), skb);
+
+ if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) {
+ cxgbit_cplhandlers[op](cdev, skb);
+ } else {
+ pr_err("No handler for opcode 0x%x.\n", op);
+ __kfree_skb(skb);
+ }
+ return 0;
+nomem:
+ pr_err("%s OOM bailing out.\n", __func__);
+ return 1;
+}
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+struct cxgbit_dcb_work {
+ struct dcb_app_type dcb_app;
+ struct work_struct work;
+};
+
+static void
+cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id,
+ u8 dcb_priority, u16 port_num)
+{
+ struct cxgbit_sock *csk;
+ struct sk_buff *skb;
+ u16 local_port;
+ bool wakeup_thread = false;
+
+ spin_lock_bh(&cdev->cskq.lock);
+ list_for_each_entry(csk, &cdev->cskq.list, list) {
+ if (csk->port_id != port_id)
+ continue;
+
+ if (csk->com.local_addr.ss_family == AF_INET6) {
+ struct sockaddr_in6 *sock_in6;
+
+ sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr;
+ local_port = ntohs(sock_in6->sin6_port);
+ } else {
+ struct sockaddr_in *sock_in;
+
+ sock_in = (struct sockaddr_in *)&csk->com.local_addr;
+ local_port = ntohs(sock_in->sin_port);
+ }
+
+ if (local_port != port_num)
+ continue;
+
+ if (csk->dcb_priority == dcb_priority)
+ continue;
+
+ skb = alloc_skb(0, GFP_ATOMIC);
+ if (!skb)
+ continue;
+
+ spin_lock(&csk->rxq.lock);
+ __skb_queue_tail(&csk->rxq, skb);
+ if (skb_queue_len(&csk->rxq) == 1)
+ wakeup_thread = true;
+ spin_unlock(&csk->rxq.lock);
+
+ if (wakeup_thread) {
+ wake_up(&csk->waitq);
+ wakeup_thread = false;
+ }
+ }
+ spin_unlock_bh(&cdev->cskq.lock);
+}
+
+static void cxgbit_dcb_workfn(struct work_struct *work)
+{
+ struct cxgbit_dcb_work *dcb_work;
+ struct net_device *ndev;
+ struct cxgbit_device *cdev = NULL;
+ struct dcb_app_type *iscsi_app;
+ u8 priority, port_id = 0xff;
+
+ dcb_work = container_of(work, struct cxgbit_dcb_work, work);
+ iscsi_app = &dcb_work->dcb_app;
+
+ if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
+ goto out;
+
+ priority = iscsi_app->app.priority;
+
+ } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
+ if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
+ goto out;
+
+ if (!iscsi_app->app.priority)
+ goto out;
+
+ priority = ffs(iscsi_app->app.priority) - 1;
+ } else {
+ goto out;
+ }
+
+ pr_debug("priority for ifid %d is %u\n",
+ iscsi_app->ifindex, priority);
+
+ ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
+
+ if (!ndev)
+ goto out;
+
+ mutex_lock(&cdev_list_lock);
+ cdev = cxgbit_find_device(ndev, &port_id);
+
+ dev_put(ndev);
+
+ if (!cdev) {
+ mutex_unlock(&cdev_list_lock);
+ goto out;
+ }
+
+ cxgbit_update_dcb_priority(cdev, port_id, priority,
+ iscsi_app->app.protocol);
+ mutex_unlock(&cdev_list_lock);
+out:
+ kfree(dcb_work);
+}
+
+static int
+cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct cxgbit_dcb_work *dcb_work;
+ struct dcb_app_type *dcb_app = data;
+
+ dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+ if (!dcb_work)
+ return NOTIFY_DONE;
+
+ dcb_work->dcb_app = *dcb_app;
+ INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn);
+ schedule_work(&dcb_work->work);
+ return NOTIFY_OK;
+}
+#endif
+
+static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+ return TARGET_PROT_NORMAL;
+}
+
+static struct iscsit_transport cxgbit_transport = {
+ .name = DRV_NAME,
+ .transport_type = ISCSI_CXGBIT,
+ .rdma_shutdown = false,
+ .priv_size = sizeof(struct cxgbit_cmd),
+ .owner = THIS_MODULE,
+ .iscsit_setup_np = cxgbit_setup_np,
+ .iscsit_accept_np = cxgbit_accept_np,
+ .iscsit_free_np = cxgbit_free_np,
+ .iscsit_free_conn = cxgbit_free_conn,
+ .iscsit_get_login_rx = cxgbit_get_login_rx,
+ .iscsit_put_login_tx = cxgbit_put_login_tx,
+ .iscsit_immediate_queue = iscsit_immediate_queue,
+ .iscsit_response_queue = iscsit_response_queue,
+ .iscsit_get_dataout = iscsit_build_r2ts_for_cmd,
+ .iscsit_queue_data_in = iscsit_queue_rsp,
+ .iscsit_queue_status = iscsit_queue_rsp,
+ .iscsit_xmit_pdu = cxgbit_xmit_pdu,
+ .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt,
+ .iscsit_get_rx_pdu = cxgbit_get_rx_pdu,
+ .iscsit_validate_params = cxgbit_validate_params,
+ .iscsit_release_cmd = cxgbit_release_cmd,
+ .iscsit_aborted_task = iscsit_aborted_task,
+ .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops,
+};
+
+static struct cxgb4_uld_info cxgbit_uld_info = {
+ .name = DRV_NAME,
+ .add = cxgbit_uld_add,
+ .state_change = cxgbit_uld_state_change,
+ .lro_rx_handler = cxgbit_uld_lro_rx_handler,
+ .lro_flush = cxgbit_uld_lro_flush,
+};
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+static struct notifier_block cxgbit_dcbevent_nb = {
+ .notifier_call = cxgbit_dcbevent_notify,
+};
+#endif
+
+static int __init cxgbit_init(void)
+{
+ cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info);
+ iscsit_register_transport(&cxgbit_transport);
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ pr_info("%s dcb enabled.\n", DRV_NAME);
+ register_dcbevent_notifier(&cxgbit_dcbevent_nb);
+#endif
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) <
+ sizeof(union cxgbit_skb_cb));
+ return 0;
+}
+
+static void __exit cxgbit_exit(void)
+{
+ struct cxgbit_device *cdev, *tmp;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+ unregister_dcbevent_notifier(&cxgbit_dcbevent_nb);
+#endif
+ mutex_lock(&cdev_list_lock);
+ list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) {
+ list_del(&cdev->list);
+ cxgbit_put_cdev(cdev);
+ }
+ mutex_unlock(&cdev_list_lock);
+ iscsit_unregister_transport(&cxgbit_transport);
+ cxgb4_unregister_uld(CXGB4_ULD_ISCSIT);
+}
+
+module_init(cxgbit_init);
+module_exit(cxgbit_exit);
+
+MODULE_DESCRIPTION("Chelsio iSCSI target offload driver");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
new file mode 100644
index 000000000000..d02bf58aea6d
--- /dev/null
+++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
@@ -0,0 +1,1561 @@
+/*
+ * Copyright (c) 2016 Chelsio Communications, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <asm/unaligned.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric.h>
+#include "cxgbit.h"
+
+struct sge_opaque_hdr {
+ void *dev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
+static const u8 cxgbit_digest_len[] = {0, 4, 4, 8};
+
+#define TX_HDR_LEN (sizeof(struct sge_opaque_hdr) + \
+ sizeof(struct fw_ofld_tx_data_wr))
+
+static struct sk_buff *
+__cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len, bool iso)
+{
+ struct sk_buff *skb = NULL;
+ u8 submode = 0;
+ int errcode;
+ static const u32 hdr_len = TX_HDR_LEN + ISCSI_HDR_LEN;
+
+ if (len) {
+ skb = alloc_skb_with_frags(hdr_len, len,
+ 0, &errcode,
+ GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, TX_HDR_LEN);
+ skb_reset_transport_header(skb);
+ __skb_put(skb, ISCSI_HDR_LEN);
+ skb->data_len = len;
+ skb->len += len;
+ submode |= (csk->submode & CXGBIT_SUBMODE_DCRC);
+
+ } else {
+ u32 iso_len = iso ? sizeof(struct cpl_tx_data_iso) : 0;
+
+ skb = alloc_skb(hdr_len + iso_len, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, TX_HDR_LEN + iso_len);
+ skb_reset_transport_header(skb);
+ __skb_put(skb, ISCSI_HDR_LEN);
+ }
+
+ submode |= (csk->submode & CXGBIT_SUBMODE_HCRC);
+ cxgbit_skcb_submode(skb) = submode;
+ cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[submode];
+ cxgbit_skcb_flags(skb) |= SKCBF_TX_NEED_HDR;
+ return skb;
+}
+
+static struct sk_buff *cxgbit_alloc_skb(struct cxgbit_sock *csk, u32 len)
+{
+ return __cxgbit_alloc_skb(csk, len, false);
+}
+
+/*
+ * cxgbit_is_ofld_imm - check whether a packet can be sent as immediate data
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+ * data. We currently use the same limit as for Ethernet packets.
+ */
+static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
+{
+ int length = skb->len;
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
+ length += sizeof(struct fw_ofld_tx_data_wr);
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
+ length += sizeof(struct cpl_tx_data_iso);
+
+#define MAX_IMM_TX_PKT_LEN 256
+ return length <= MAX_IMM_TX_PKT_LEN;
+}
+
+/*
+ * cxgbit_sgl_len - calculates the size of an SGL of the given capacity
+ * @n: the number of SGL entries
+ * Calculates the number of flits needed for a scatter/gather list that
+ * can hold the given number of entries.
+ */
+static inline unsigned int cxgbit_sgl_len(unsigned int n)
+{
+ n--;
+ return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/*
+ * cxgbit_calc_tx_flits_ofld - calculate # of flits for an offload packet
+ * @skb: the packet
+ *
+ * Returns the number of flits needed for the given offload packet.
+ * These packets are already fully constructed and no additional headers
+ * will be added.
+ */
+static unsigned int cxgbit_calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+ unsigned int flits, cnt;
+
+ if (cxgbit_is_ofld_imm(skb))
+ return DIV_ROUND_UP(skb->len, 8);
+ flits = skb_transport_offset(skb) / 8;
+ cnt = skb_shinfo(skb)->nr_frags;
+ if (skb_tail_pointer(skb) != skb_transport_header(skb))
+ cnt++;
+ return flits + cxgbit_sgl_len(cnt);
+}
+
+#define CXGBIT_ISO_FSLICE 0x1
+#define CXGBIT_ISO_LSLICE 0x2
+static void
+cxgbit_cpl_tx_data_iso(struct sk_buff *skb, struct cxgbit_iso_info *iso_info)
+{
+ struct cpl_tx_data_iso *cpl;
+ unsigned int submode = cxgbit_skcb_submode(skb);
+ unsigned int fslice = !!(iso_info->flags & CXGBIT_ISO_FSLICE);
+ unsigned int lslice = !!(iso_info->flags & CXGBIT_ISO_LSLICE);
+
+ cpl = (struct cpl_tx_data_iso *)__skb_push(skb, sizeof(*cpl));
+
+ cpl->op_to_scsi = htonl(CPL_TX_DATA_ISO_OP_V(CPL_TX_DATA_ISO) |
+ CPL_TX_DATA_ISO_FIRST_V(fslice) |
+ CPL_TX_DATA_ISO_LAST_V(lslice) |
+ CPL_TX_DATA_ISO_CPLHDRLEN_V(0) |
+ CPL_TX_DATA_ISO_HDRCRC_V(submode & 1) |
+ CPL_TX_DATA_ISO_PLDCRC_V(((submode >> 1) & 1)) |
+ CPL_TX_DATA_ISO_IMMEDIATE_V(0) |
+ CPL_TX_DATA_ISO_SCSI_V(2));
+
+ cpl->ahs_len = 0;
+ cpl->mpdu = htons(DIV_ROUND_UP(iso_info->mpdu, 4));
+ cpl->burst_size = htonl(DIV_ROUND_UP(iso_info->burst_len, 4));
+ cpl->len = htonl(iso_info->len);
+ cpl->reserved2_seglen_offset = htonl(0);
+ cpl->datasn_offset = htonl(0);
+ cpl->buffer_offset = htonl(0);
+ cpl->reserved3 = 0;
+
+ __skb_pull(skb, sizeof(*cpl));
+}
+
+static void
+cxgbit_tx_data_wr(struct cxgbit_sock *csk, struct sk_buff *skb, u32 dlen,
+ u32 len, u32 credits, u32 compl)
+{
+ struct fw_ofld_tx_data_wr *req;
+ u32 submode = cxgbit_skcb_submode(skb);
+ u32 wr_ulp_mode = 0;
+ u32 hdr_size = sizeof(*req);
+ u32 opcode = FW_OFLD_TX_DATA_WR;
+ u32 immlen = 0;
+ u32 force = TX_FORCE_V(!submode);
+
+ if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO) {
+ opcode = FW_ISCSI_TX_DATA_WR;
+ immlen += sizeof(struct cpl_tx_data_iso);
+ hdr_size += sizeof(struct cpl_tx_data_iso);
+ submode |= 8;
+ }
+
+ if (cxgbit_is_ofld_imm(skb))
+ immlen += dlen;
+
+ req = (struct fw_ofld_tx_data_wr *)__skb_push(skb,
+ hdr_size);
+ req->op_to_immdlen = cpu_to_be32(FW_WR_OP_V(opcode) |
+ FW_WR_COMPL_V(compl) |
+ FW_WR_IMMDLEN_V(immlen));
+ req->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(csk->tid) |
+ FW_WR_LEN16_V(credits));
+ req->plen = htonl(len);
+ wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE_V(ULP_MODE_ISCSI) |
+ FW_OFLD_TX_DATA_WR_ULPSUBMODE_V(submode);
+
+ req->tunnel_to_proxy = htonl((wr_ulp_mode) | force |
+ FW_OFLD_TX_DATA_WR_SHOVE_V(skb_peek(&csk->txq) ? 0 : 1));
+}
+
+static void cxgbit_arp_failure_skb_discard(void *handle, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+void cxgbit_push_tx_frames(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+
+ while (csk->wr_cred && ((skb = skb_peek(&csk->txq)) != NULL)) {
+ u32 dlen = skb->len;
+ u32 len = skb->len;
+ u32 credits_needed;
+ u32 compl = 0;
+ u32 flowclen16 = 0;
+ u32 iso_cpl_len = 0;
+
+ if (cxgbit_skcb_flags(skb) & SKCBF_TX_ISO)
+ iso_cpl_len = sizeof(struct cpl_tx_data_iso);
+
+ if (cxgbit_is_ofld_imm(skb))
+ credits_needed = DIV_ROUND_UP(dlen + iso_cpl_len, 16);
+ else
+ credits_needed = DIV_ROUND_UP((8 *
+ cxgbit_calc_tx_flits_ofld(skb)) +
+ iso_cpl_len, 16);
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR))
+ credits_needed += DIV_ROUND_UP(
+ sizeof(struct fw_ofld_tx_data_wr), 16);
+ /*
+ * Assumes the initial credits is large enough to support
+ * fw_flowc_wr plus largest possible first payload
+ */
+
+ if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags)) {
+ flowclen16 = cxgbit_send_tx_flowc_wr(csk);
+ csk->wr_cred -= flowclen16;
+ csk->wr_una_cred += flowclen16;
+ }
+
+ if (csk->wr_cred < credits_needed) {
+ pr_debug("csk 0x%p, skb %u/%u, wr %d < %u.\n",
+ csk, skb->len, skb->data_len,
+ credits_needed, csk->wr_cred);
+ break;
+ }
+ __skb_unlink(skb, &csk->txq);
+ set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
+ skb->csum = credits_needed + flowclen16;
+ csk->wr_cred -= credits_needed;
+ csk->wr_una_cred += credits_needed;
+
+ pr_debug("csk 0x%p, skb %u/%u, wr %d, left %u, unack %u.\n",
+ csk, skb->len, skb->data_len, credits_needed,
+ csk->wr_cred, csk->wr_una_cred);
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_NEED_HDR)) {
+ len += cxgbit_skcb_tx_extralen(skb);
+
+ if ((csk->wr_una_cred >= (csk->wr_max_cred / 2)) ||
+ (!before(csk->write_seq,
+ csk->snd_una + csk->snd_win))) {
+ compl = 1;
+ csk->wr_una_cred = 0;
+ }
+
+ cxgbit_tx_data_wr(csk, skb, dlen, len, credits_needed,
+ compl);
+ csk->snd_nxt += len;
+
+ } else if ((cxgbit_skcb_flags(skb) & SKCBF_TX_FLAG_COMPL) ||
+ (csk->wr_una_cred >= (csk->wr_max_cred / 2))) {
+ struct cpl_close_con_req *req =
+ (struct cpl_close_con_req *)skb->data;
+ req->wr.wr_hi |= htonl(FW_WR_COMPL_F);
+ csk->wr_una_cred = 0;
+ }
+
+ cxgbit_sock_enqueue_wr(csk, skb);
+ t4_set_arp_err_handler(skb, csk,
+ cxgbit_arp_failure_skb_discard);
+
+ pr_debug("csk 0x%p,%u, skb 0x%p, %u.\n",
+ csk, csk->tid, skb, len);
+
+ cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
+ }
+}
+
+static bool cxgbit_lock_sock(struct cxgbit_sock *csk)
+{
+ spin_lock_bh(&csk->lock);
+
+ if (before(csk->write_seq, csk->snd_una + csk->snd_win))
+ csk->lock_owner = true;
+
+ spin_unlock_bh(&csk->lock);
+
+ return csk->lock_owner;
+}
+
+static void cxgbit_unlock_sock(struct cxgbit_sock *csk)
+{
+ struct sk_buff_head backlogq;
+ struct sk_buff *skb;
+ void (*fn)(struct cxgbit_sock *, struct sk_buff *);
+
+ skb_queue_head_init(&backlogq);
+
+ spin_lock_bh(&csk->lock);
+ while (skb_queue_len(&csk->backlogq)) {
+ skb_queue_splice_init(&csk->backlogq, &backlogq);
+ spin_unlock_bh(&csk->lock);
+
+ while ((skb = __skb_dequeue(&backlogq))) {
+ fn = cxgbit_skcb_rx_backlog_fn(skb);
+ fn(csk, skb);
+ }
+
+ spin_lock_bh(&csk->lock);
+ }
+
+ csk->lock_owner = false;
+ spin_unlock_bh(&csk->lock);
+}
+
+static int cxgbit_queue_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ int ret = 0;
+
+ wait_event_interruptible(csk->ack_waitq, cxgbit_lock_sock(csk));
+
+ if (unlikely((csk->com.state != CSK_STATE_ESTABLISHED) ||
+ signal_pending(current))) {
+ __kfree_skb(skb);
+ __skb_queue_purge(&csk->ppodq);
+ ret = -1;
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ spin_unlock_bh(&csk->lock);
+ goto unlock;
+ }
+ spin_unlock_bh(&csk->lock);
+ return ret;
+ }
+
+ csk->write_seq += skb->len +
+ cxgbit_skcb_tx_extralen(skb);
+
+ skb_queue_splice_tail_init(&csk->ppodq, &csk->txq);
+ __skb_queue_tail(&csk->txq, skb);
+ cxgbit_push_tx_frames(csk);
+
+unlock:
+ cxgbit_unlock_sock(csk);
+ return ret;
+}
+
+static int
+cxgbit_map_skb(struct iscsi_cmd *cmd, struct sk_buff *skb, u32 data_offset,
+ u32 data_length)
+{
+ u32 i = 0, nr_frags = MAX_SKB_FRAGS;
+ u32 padding = ((-data_length) & 3);
+ struct scatterlist *sg;
+ struct page *page;
+ unsigned int page_off;
+
+ if (padding)
+ nr_frags--;
+
+ /*
+ * We know each entry in t_data_sg contains a page.
+ */
+ sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
+ page_off = (data_offset % PAGE_SIZE);
+
+ while (data_length && (i < nr_frags)) {
+ u32 cur_len = min_t(u32, data_length, sg->length - page_off);
+
+ page = sg_page(sg);
+
+ get_page(page);
+ skb_fill_page_desc(skb, i, page, sg->offset + page_off,
+ cur_len);
+ skb->data_len += cur_len;
+ skb->len += cur_len;
+ skb->truesize += cur_len;
+
+ data_length -= cur_len;
+ page_off = 0;
+ sg = sg_next(sg);
+ i++;
+ }
+
+ if (data_length)
+ return -1;
+
+ if (padding) {
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ return -1;
+ skb_fill_page_desc(skb, i, page, 0, padding);
+ skb->data_len += padding;
+ skb->len += padding;
+ skb->truesize += padding;
+ }
+
+ return 0;
+}
+
+static int
+cxgbit_tx_datain_iso(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct sk_buff *skb;
+ struct iscsi_datain datain;
+ struct cxgbit_iso_info iso_info;
+ u32 data_length = cmd->se_cmd.data_length;
+ u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
+ u32 num_pdu, plen, tx_data = 0;
+ bool task_sense = !!(cmd->se_cmd.se_cmd_flags &
+ SCF_TRANSPORT_TASK_SENSE);
+ bool set_statsn = false;
+ int ret = -1;
+
+ while (data_length) {
+ num_pdu = (data_length + mrdsl - 1) / mrdsl;
+ if (num_pdu > csk->max_iso_npdu)
+ num_pdu = csk->max_iso_npdu;
+
+ plen = num_pdu * mrdsl;
+ if (plen > data_length)
+ plen = data_length;
+
+ skb = __cxgbit_alloc_skb(csk, 0, true);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ memset(skb->data, 0, ISCSI_HDR_LEN);
+ cxgbit_skcb_flags(skb) |= SKCBF_TX_ISO;
+ cxgbit_skcb_submode(skb) |= (csk->submode &
+ CXGBIT_SUBMODE_DCRC);
+ cxgbit_skcb_tx_extralen(skb) = (num_pdu *
+ cxgbit_digest_len[cxgbit_skcb_submode(skb)]) +
+ ((num_pdu - 1) * ISCSI_HDR_LEN);
+
+ memset(&datain, 0, sizeof(struct iscsi_datain));
+ memset(&iso_info, 0, sizeof(iso_info));
+
+ if (!tx_data)
+ iso_info.flags |= CXGBIT_ISO_FSLICE;
+
+ if (!(data_length - plen)) {
+ iso_info.flags |= CXGBIT_ISO_LSLICE;
+ if (!task_sense) {
+ datain.flags = ISCSI_FLAG_DATA_STATUS;
+ iscsit_increment_maxcmdsn(cmd, conn->sess);
+ cmd->stat_sn = conn->stat_sn++;
+ set_statsn = true;
+ }
+ }
+
+ iso_info.burst_len = num_pdu * mrdsl;
+ iso_info.mpdu = mrdsl;
+ iso_info.len = ISCSI_HDR_LEN + plen;
+
+ cxgbit_cpl_tx_data_iso(skb, &iso_info);
+
+ datain.offset = tx_data;
+ datain.data_sn = cmd->data_sn - 1;
+
+ iscsit_build_datain_pdu(cmd, conn, &datain,
+ (struct iscsi_data_rsp *)skb->data,
+ set_statsn);
+
+ ret = cxgbit_map_skb(cmd, skb, tx_data, plen);
+ if (unlikely(ret)) {
+ __kfree_skb(skb);
+ goto out;
+ }
+
+ ret = cxgbit_queue_skb(csk, skb);
+ if (unlikely(ret))
+ goto out;
+
+ tx_data += plen;
+ data_length -= plen;
+
+ cmd->read_data_done += plen;
+ cmd->data_sn += num_pdu;
+ }
+
+ dr->dr_complete = DATAIN_COMPLETE_NORMAL;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int
+cxgbit_tx_datain(struct cxgbit_sock *csk, struct iscsi_cmd *cmd,
+ const struct iscsi_datain *datain)
+{
+ struct sk_buff *skb;
+ int ret = 0;
+
+ skb = cxgbit_alloc_skb(csk, 0);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
+
+ if (datain->length) {
+ cxgbit_skcb_submode(skb) |= (csk->submode &
+ CXGBIT_SUBMODE_DCRC);
+ cxgbit_skcb_tx_extralen(skb) =
+ cxgbit_digest_len[cxgbit_skcb_submode(skb)];
+ }
+
+ ret = cxgbit_map_skb(cmd, skb, datain->offset, datain->length);
+ if (ret < 0) {
+ __kfree_skb(skb);
+ return ret;
+ }
+
+ return cxgbit_queue_skb(csk, skb);
+}
+
+static int
+cxgbit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr,
+ const struct iscsi_datain *datain)
+{
+ struct cxgbit_sock *csk = conn->context;
+ u32 data_length = cmd->se_cmd.data_length;
+ u32 padding = ((-data_length) & 3);
+ u32 mrdsl = conn->conn_ops->MaxRecvDataSegmentLength;
+
+ if ((data_length > mrdsl) && (!dr->recovery) &&
+ (!padding) && (!datain->offset) && csk->max_iso_npdu) {
+ atomic_long_add(data_length - datain->length,
+ &conn->sess->tx_data_octets);
+ return cxgbit_tx_datain_iso(csk, cmd, dr);
+ }
+
+ return cxgbit_tx_datain(csk, cmd, datain);
+}
+
+static int
+cxgbit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ const void *data_buf, u32 data_buf_len)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct sk_buff *skb;
+ u32 padding = ((-data_buf_len) & 3);
+
+ skb = cxgbit_alloc_skb(csk, data_buf_len + padding);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ memcpy(skb->data, cmd->pdu, ISCSI_HDR_LEN);
+
+ if (data_buf_len) {
+ u32 pad_bytes = 0;
+
+ skb_store_bits(skb, ISCSI_HDR_LEN, data_buf, data_buf_len);
+
+ if (padding)
+ skb_store_bits(skb, ISCSI_HDR_LEN + data_buf_len,
+ &pad_bytes, padding);
+ }
+
+ cxgbit_skcb_tx_extralen(skb) = cxgbit_digest_len[
+ cxgbit_skcb_submode(skb)];
+
+ return cxgbit_queue_skb(csk, skb);
+}
+
+int
+cxgbit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr, const void *buf, u32 buf_len)
+{
+ if (dr)
+ return cxgbit_xmit_datain_pdu(conn, cmd, dr, buf);
+ else
+ return cxgbit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
+}
+
+int cxgbit_validate_params(struct iscsi_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct iscsi_param *param;
+ u32 max_xmitdsl;
+
+ param = iscsi_find_param_from_key(MAXXMITDATASEGMENTLENGTH,
+ conn->param_list);
+ if (!param)
+ return -1;
+
+ if (kstrtou32(param->value, 0, &max_xmitdsl) < 0)
+ return -1;
+
+ if (max_xmitdsl > cdev->mdsl) {
+ if (iscsi_change_param_sprintf(
+ conn, "MaxXmitDataSegmentLength=%u", cdev->mdsl))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cxgbit_set_digest(struct cxgbit_sock *csk)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_param *param;
+
+ param = iscsi_find_param_from_key(HEADERDIGEST, conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", HEADERDIGEST);
+ return -1;
+ }
+
+ if (!strcmp(param->value, CRC32C))
+ csk->submode |= CXGBIT_SUBMODE_HCRC;
+
+ param = iscsi_find_param_from_key(DATADIGEST, conn->param_list);
+ if (!param) {
+ csk->submode = 0;
+ pr_err("param not found key %s\n", DATADIGEST);
+ return -1;
+ }
+
+ if (!strcmp(param->value, CRC32C))
+ csk->submode |= CXGBIT_SUBMODE_DCRC;
+
+ if (cxgbit_setup_conn_digest(csk)) {
+ csk->submode = 0;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int cxgbit_set_iso_npdu(struct cxgbit_sock *csk)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_conn_ops *conn_ops = conn->conn_ops;
+ struct iscsi_param *param;
+ u32 mrdsl, mbl;
+ u32 max_npdu, max_iso_npdu;
+
+ if (conn->login->leading_connection) {
+ param = iscsi_find_param_from_key(DATASEQUENCEINORDER,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", DATASEQUENCEINORDER);
+ return -1;
+ }
+
+ if (strcmp(param->value, YES))
+ return 0;
+
+ param = iscsi_find_param_from_key(DATAPDUINORDER,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", DATAPDUINORDER);
+ return -1;
+ }
+
+ if (strcmp(param->value, YES))
+ return 0;
+
+ param = iscsi_find_param_from_key(MAXBURSTLENGTH,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", MAXBURSTLENGTH);
+ return -1;
+ }
+
+ if (kstrtou32(param->value, 0, &mbl) < 0)
+ return -1;
+ } else {
+ if (!conn->sess->sess_ops->DataSequenceInOrder)
+ return 0;
+ if (!conn->sess->sess_ops->DataPDUInOrder)
+ return 0;
+
+ mbl = conn->sess->sess_ops->MaxBurstLength;
+ }
+
+ mrdsl = conn_ops->MaxRecvDataSegmentLength;
+ max_npdu = mbl / mrdsl;
+
+ max_iso_npdu = CXGBIT_MAX_ISO_PAYLOAD /
+ (ISCSI_HDR_LEN + mrdsl +
+ cxgbit_digest_len[csk->submode]);
+
+ csk->max_iso_npdu = min(max_npdu, max_iso_npdu);
+
+ if (csk->max_iso_npdu <= 1)
+ csk->max_iso_npdu = 0;
+
+ return 0;
+}
+
+static int cxgbit_set_params(struct iscsi_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_device *cdev = csk->com.cdev;
+ struct cxgbi_ppm *ppm = *csk->com.cdev->lldi.iscsi_ppm;
+ struct iscsi_conn_ops *conn_ops = conn->conn_ops;
+ struct iscsi_param *param;
+ u8 erl;
+
+ if (conn_ops->MaxRecvDataSegmentLength > cdev->mdsl)
+ conn_ops->MaxRecvDataSegmentLength = cdev->mdsl;
+
+ if (conn->login->leading_connection) {
+ param = iscsi_find_param_from_key(ERRORRECOVERYLEVEL,
+ conn->param_list);
+ if (!param) {
+ pr_err("param not found key %s\n", ERRORRECOVERYLEVEL);
+ return -1;
+ }
+ if (kstrtou8(param->value, 0, &erl) < 0)
+ return -1;
+ } else {
+ erl = conn->sess->sess_ops->ErrorRecoveryLevel;
+ }
+
+ if (!erl) {
+ if (test_bit(CDEV_ISO_ENABLE, &cdev->flags)) {
+ if (cxgbit_set_iso_npdu(csk))
+ return -1;
+ }
+
+ if (test_bit(CDEV_DDP_ENABLE, &cdev->flags)) {
+ if (cxgbit_setup_conn_pgidx(csk,
+ ppm->tformat.pgsz_idx_dflt))
+ return -1;
+ set_bit(CSK_DDP_ENABLE, &csk->com.flags);
+ }
+ }
+
+ if (cxgbit_set_digest(csk))
+ return -1;
+
+ return 0;
+}
+
+int
+cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+ u32 length)
+{
+ struct cxgbit_sock *csk = conn->context;
+ struct sk_buff *skb;
+ u32 padding_buf = 0;
+ u8 padding = ((-length) & 3);
+
+ skb = cxgbit_alloc_skb(csk, length + padding);
+ if (!skb)
+ return -ENOMEM;
+ skb_store_bits(skb, 0, login->rsp, ISCSI_HDR_LEN);
+ skb_store_bits(skb, ISCSI_HDR_LEN, login->rsp_buf, length);
+
+ if (padding)
+ skb_store_bits(skb, ISCSI_HDR_LEN + length,
+ &padding_buf, padding);
+
+ if (login->login_complete) {
+ if (cxgbit_set_params(conn)) {
+ kfree_skb(skb);
+ return -1;
+ }
+
+ set_bit(CSK_LOGIN_DONE, &csk->com.flags);
+ }
+
+ if (cxgbit_queue_skb(csk, skb))
+ return -1;
+
+ if ((!login->login_complete) && (!login->login_failed))
+ schedule_delayed_work(&conn->login_work, 0);
+
+ return 0;
+}
+
+static void
+cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
+ unsigned int nents)
+{
+ struct skb_seq_state st;
+ const u8 *buf;
+ unsigned int consumed = 0, buf_len;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(skb);
+
+ skb_prepare_seq_read(skb, pdu_cb->doffset,
+ pdu_cb->doffset + pdu_cb->dlen,
+ &st);
+
+ while (true) {
+ buf_len = skb_seq_read(consumed, &buf, &st);
+ if (!buf_len) {
+ skb_abort_seq_read(&st);
+ break;
+ }
+
+ consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
+ buf_len, consumed);
+ }
+}
+
+static struct iscsi_cmd *cxgbit_allocate_cmd(struct cxgbit_sock *csk)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct cxgbi_ppm *ppm = cdev2ppm(csk->com.cdev);
+ struct cxgbit_cmd *ccmd;
+ struct iscsi_cmd *cmd;
+
+ cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+ if (!cmd) {
+ pr_err("Unable to allocate iscsi_cmd + cxgbit_cmd\n");
+ return NULL;
+ }
+
+ ccmd = iscsit_priv_cmd(cmd);
+ ccmd->ttinfo.tag = ppm->tformat.no_ddp_mask;
+ ccmd->setup_ddp = true;
+
+ return cmd;
+}
+
+static int
+cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+ u32 length)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ struct cxgbit_sock *csk = conn->context;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ pr_err("ImmediateData CRC32C DataDigest error\n");
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Immediate Data digest failure while"
+ " in ERL=0.\n");
+ iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+ (unsigned char *)hdr);
+ return IMMEDIATE_DATA_CANNOT_RECOVER;
+ }
+
+ iscsit_reject_cmd(cmd, ISCSI_REASON_DATA_DIGEST_ERROR,
+ (unsigned char *)hdr);
+ return IMMEDIATE_DATA_ERL1_CRC_FAILURE;
+ }
+
+ if (cmd->se_cmd.se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ struct cxgbit_cmd *ccmd = iscsit_priv_cmd(cmd);
+ struct skb_shared_info *ssi = skb_shinfo(csk->skb);
+ skb_frag_t *dfrag = &ssi->frags[pdu_cb->dfrag_idx];
+
+ sg_init_table(&ccmd->sg, 1);
+ sg_set_page(&ccmd->sg, dfrag->page.p, skb_frag_size(dfrag),
+ dfrag->page_offset);
+ get_page(dfrag->page.p);
+
+ cmd->se_cmd.t_data_sg = &ccmd->sg;
+ cmd->se_cmd.t_data_nents = 1;
+
+ ccmd->release = true;
+ } else {
+ struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
+ u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
+
+ cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents);
+ }
+
+ cmd->write_data_done += pdu_cb->dlen;
+
+ if (cmd->write_data_done == cmd->se_cmd.data_length) {
+ spin_lock_bh(&cmd->istate_lock);
+ cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
+ cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
+ spin_unlock_bh(&cmd->istate_lock);
+ }
+
+ return IMMEDIATE_DATA_NORMAL_OPERATION;
+}
+
+static int
+cxgbit_get_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
+ bool dump_payload)
+{
+ struct iscsi_conn *conn = cmd->conn;
+ int cmdsn_ret = 0, immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
+ /*
+ * Special case for Unsupported SAM WRITE Opcodes and ImmediateData=Yes.
+ */
+ if (dump_payload)
+ goto after_immediate_data;
+
+ immed_ret = cxgbit_handle_immediate_data(cmd, hdr,
+ cmd->first_burst_len);
+after_immediate_data:
+ if (immed_ret == IMMEDIATE_DATA_NORMAL_OPERATION) {
+ /*
+ * A PDU/CmdSN carrying Immediate Data passed
+ * DataCRC, check against ExpCmdSN/MaxCmdSN if
+ * Immediate Bit is not set.
+ */
+ cmdsn_ret = iscsit_sequence_cmd(conn, cmd,
+ (unsigned char *)hdr,
+ hdr->cmdsn);
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ return -1;
+
+ if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
+ return 0;
+ } else if (cmd->unsolicited_data) {
+ iscsit_set_unsoliticed_dataout(cmd);
+ }
+
+ } else if (immed_ret == IMMEDIATE_DATA_ERL1_CRC_FAILURE) {
+ /*
+ * Immediate Data failed DataCRC and ERL>=1,
+ * silently drop this PDU and let the initiator
+ * plug the CmdSN gap.
+ *
+ * FIXME: Send Unsolicited NOPIN with reserved
+ * TTT here to help the initiator figure out
+ * the missing CmdSN, although they should be
+ * intelligent enough to determine the missing
+ * CmdSN and issue a retry to plug the sequence.
+ */
+ cmd->i_state = ISTATE_REMOVE;
+ iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
+ } else /* immed_ret == IMMEDIATE_DATA_CANNOT_RECOVER */
+ return -1;
+
+ return 0;
+}
+
+static int
+cxgbit_handle_scsi_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)pdu_cb->hdr;
+ int rc;
+ bool dump_payload = false;
+
+ rc = iscsit_setup_scsi_cmd(conn, cmd, (unsigned char *)hdr);
+ if (rc < 0)
+ return rc;
+
+ if (pdu_cb->dlen && (pdu_cb->dlen == cmd->se_cmd.data_length) &&
+ (pdu_cb->nr_dfrags == 1))
+ cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+
+ rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
+ if (rc < 0)
+ return 0;
+ else if (rc > 0)
+ dump_payload = true;
+
+ if (!pdu_cb->dlen)
+ return 0;
+
+ return cxgbit_get_immediate_data(cmd, hdr, dump_payload);
+}
+
+static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
+{
+ struct scatterlist *sg_start;
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_cmd *cmd = NULL;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_data *hdr = (struct iscsi_data *)pdu_cb->hdr;
+ u32 data_offset = be32_to_cpu(hdr->offset);
+ u32 data_len = pdu_cb->dlen;
+ int rc, sg_nents, sg_off;
+ bool dcrc_err = false;
+
+ rc = iscsit_check_dataout_hdr(conn, (unsigned char *)hdr, &cmd);
+ if (rc < 0)
+ return rc;
+ else if (!cmd)
+ return 0;
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ pr_err("ITT: 0x%08x, Offset: %u, Length: %u,"
+ " DataSN: 0x%08x\n",
+ hdr->itt, hdr->offset, data_len,
+ hdr->datasn);
+
+ dcrc_err = true;
+ goto check_payload;
+ }
+
+ pr_debug("DataOut data_len: %u, "
+ "write_data_done: %u, data_length: %u\n",
+ data_len, cmd->write_data_done,
+ cmd->se_cmd.data_length);
+
+ if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
+ sg_off = data_offset / PAGE_SIZE;
+ sg_start = &cmd->se_cmd.t_data_sg[sg_off];
+ sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE));
+
+ cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents);
+ }
+
+check_payload:
+
+ rc = iscsit_check_dataout_payload(cmd, hdr, dcrc_err);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+static int cxgbit_handle_nop_out(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_nopout *hdr = (struct iscsi_nopout *)pdu_cb->hdr;
+ unsigned char *ping_data = NULL;
+ u32 payload_length = pdu_cb->dlen;
+ int ret;
+
+ ret = iscsit_setup_nop_out(conn, cmd, hdr);
+ if (ret < 0)
+ return 0;
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " NOPOUT Ping DataCRC failure while in"
+ " ERL=0.\n");
+ ret = -1;
+ goto out;
+ } else {
+ /*
+ * drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_info("Dropping NOPOUT"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ /*
+ * Handle NOP-OUT payload for traditional iSCSI sockets
+ */
+ if (payload_length && hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+ ping_data = kzalloc(payload_length + 1, GFP_KERNEL);
+ if (!ping_data) {
+ pr_err("Unable to allocate memory for"
+ " NOPOUT ping data.\n");
+ ret = -1;
+ goto out;
+ }
+
+ skb_copy_bits(csk->skb, pdu_cb->doffset,
+ ping_data, payload_length);
+
+ ping_data[payload_length] = '\0';
+ /*
+ * Attach ping data to struct iscsi_cmd->buf_ptr.
+ */
+ cmd->buf_ptr = ping_data;
+ cmd->buf_ptr_size = payload_length;
+
+ pr_debug("Got %u bytes of NOPOUT ping"
+ " data.\n", payload_length);
+ pr_debug("Ping Data: \"%s\"\n", ping_data);
+ }
+
+ return iscsit_process_nop_out(conn, cmd, hdr);
+out:
+ if (cmd)
+ iscsit_free_cmd(cmd, false);
+ return ret;
+}
+
+static int
+cxgbit_handle_text_cmd(struct cxgbit_sock *csk, struct iscsi_cmd *cmd)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_text *hdr = (struct iscsi_text *)pdu_cb->hdr;
+ u32 payload_length = pdu_cb->dlen;
+ int rc;
+ unsigned char *text_in = NULL;
+
+ rc = iscsit_setup_text_cmd(conn, cmd, hdr);
+ if (rc < 0)
+ return rc;
+
+ if (pdu_cb->flags & PDUCBF_RX_DCRC_ERR) {
+ if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
+ pr_err("Unable to recover from"
+ " Text Data digest failure while in"
+ " ERL=0.\n");
+ goto reject;
+ } else {
+ /*
+ * drop this PDU and let the
+ * initiator plug the CmdSN gap.
+ */
+ pr_info("Dropping Text"
+ " Command CmdSN: 0x%08x due to"
+ " DataCRC error.\n", hdr->cmdsn);
+ return 0;
+ }
+ }
+
+ if (payload_length) {
+ text_in = kzalloc(payload_length, GFP_KERNEL);
+ if (!text_in) {
+ pr_err("Unable to allocate text_in of payload_length: %u\n",
+ payload_length);
+ return -ENOMEM;
+ }
+ skb_copy_bits(csk->skb, pdu_cb->doffset,
+ text_in, payload_length);
+
+ text_in[payload_length - 1] = '\0';
+
+ cmd->text_in_ptr = text_in;
+ }
+
+ return iscsit_process_text_cmd(conn, cmd, hdr);
+
+reject:
+ return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+ pdu_cb->hdr);
+}
+
+static int cxgbit_target_rx_opcode(struct cxgbit_sock *csk)
+{
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_hdr *hdr = (struct iscsi_hdr *)pdu_cb->hdr;
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_cmd *cmd = NULL;
+ u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
+ int ret = -EINVAL;
+
+ switch (opcode) {
+ case ISCSI_OP_SCSI_CMD:
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+
+ ret = cxgbit_handle_scsi_cmd(csk, cmd);
+ break;
+ case ISCSI_OP_SCSI_DATA_OUT:
+ ret = cxgbit_handle_iscsi_dataout(csk);
+ break;
+ case ISCSI_OP_NOOP_OUT:
+ if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+ }
+
+ ret = cxgbit_handle_nop_out(csk, cmd);
+ break;
+ case ISCSI_OP_SCSI_TMFUNC:
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+
+ ret = iscsit_handle_task_mgt_cmd(conn, cmd,
+ (unsigned char *)hdr);
+ break;
+ case ISCSI_OP_TEXT:
+ if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+ cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+ if (!cmd)
+ goto reject;
+ } else {
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+ }
+
+ ret = cxgbit_handle_text_cmd(csk, cmd);
+ break;
+ case ISCSI_OP_LOGOUT:
+ cmd = cxgbit_allocate_cmd(csk);
+ if (!cmd)
+ goto reject;
+
+ ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
+ if (ret > 0)
+ wait_for_completion_timeout(&conn->conn_logout_comp,
+ SECONDS_FOR_LOGOUT_COMP
+ * HZ);
+ break;
+ case ISCSI_OP_SNACK:
+ ret = iscsit_handle_snack(conn, (unsigned char *)hdr);
+ break;
+ default:
+ pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
+ dump_stack();
+ break;
+ }
+
+ return ret;
+
+reject:
+ return iscsit_add_reject(conn, ISCSI_REASON_BOOKMARK_NO_RESOURCES,
+ (unsigned char *)hdr);
+ return ret;
+}
+
+static int cxgbit_rx_opcode(struct cxgbit_sock *csk)
+{
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_hdr *hdr = pdu_cb->hdr;
+ u8 opcode;
+
+ if (pdu_cb->flags & PDUCBF_RX_HCRC_ERR) {
+ atomic_long_inc(&conn->sess->conn_digest_errors);
+ goto transport_err;
+ }
+
+ if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
+ goto transport_err;
+
+ opcode = hdr->opcode & ISCSI_OPCODE_MASK;
+
+ if (conn->sess->sess_ops->SessionType &&
+ ((!(opcode & ISCSI_OP_TEXT)) ||
+ (!(opcode & ISCSI_OP_LOGOUT)))) {
+ pr_err("Received illegal iSCSI Opcode: 0x%02x"
+ " while in Discovery Session, rejecting.\n", opcode);
+ iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+ goto transport_err;
+ }
+
+ if (cxgbit_target_rx_opcode(csk) < 0)
+ goto transport_err;
+
+ return 0;
+
+transport_err:
+ return -1;
+}
+
+static int cxgbit_rx_login_pdu(struct cxgbit_sock *csk)
+{
+ struct iscsi_conn *conn = csk->conn;
+ struct iscsi_login *login = conn->login;
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_rx_pdu_cb(csk->skb);
+ struct iscsi_login_req *login_req;
+
+ login_req = (struct iscsi_login_req *)login->req;
+ memcpy(login_req, pdu_cb->hdr, sizeof(*login_req));
+
+ pr_debug("Got Login Command, Flags 0x%02x, ITT: 0x%08x,"
+ " CmdSN: 0x%08x, ExpStatSN: 0x%08x, CID: %hu, Length: %u\n",
+ login_req->flags, login_req->itt, login_req->cmdsn,
+ login_req->exp_statsn, login_req->cid, pdu_cb->dlen);
+ /*
+ * Setup the initial iscsi_login values from the leading
+ * login request PDU.
+ */
+ if (login->first_request) {
+ login_req = (struct iscsi_login_req *)login->req;
+ login->leading_connection = (!login_req->tsih) ? 1 : 0;
+ login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
+ login_req->flags);
+ login->version_min = login_req->min_version;
+ login->version_max = login_req->max_version;
+ memcpy(login->isid, login_req->isid, 6);
+ login->cmd_sn = be32_to_cpu(login_req->cmdsn);
+ login->init_task_tag = login_req->itt;
+ login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
+ login->cid = be16_to_cpu(login_req->cid);
+ login->tsih = be16_to_cpu(login_req->tsih);
+ }
+
+ if (iscsi_target_check_login_request(conn, login) < 0)
+ return -1;
+
+ memset(login->req_buf, 0, MAX_KEY_VALUE_PAIRS);
+ skb_copy_bits(csk->skb, pdu_cb->doffset, login->req_buf, pdu_cb->dlen);
+
+ return 0;
+}
+
+static int
+cxgbit_process_iscsi_pdu(struct cxgbit_sock *csk, struct sk_buff *skb, int idx)
+{
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, idx);
+ int ret;
+
+ cxgbit_rx_pdu_cb(skb) = pdu_cb;
+
+ csk->skb = skb;
+
+ if (!test_bit(CSK_LOGIN_DONE, &csk->com.flags)) {
+ ret = cxgbit_rx_login_pdu(csk);
+ set_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
+ } else {
+ ret = cxgbit_rx_opcode(csk);
+ }
+
+ return ret;
+}
+
+static void cxgbit_lro_skb_dump(struct sk_buff *skb)
+{
+ struct skb_shared_info *ssi = skb_shinfo(skb);
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+ u8 i;
+
+ pr_info("skb 0x%p, head 0x%p, 0x%p, len %u,%u, frags %u.\n",
+ skb, skb->head, skb->data, skb->len, skb->data_len,
+ ssi->nr_frags);
+ pr_info("skb 0x%p, lro_cb, csk 0x%p, pdu %u, %u.\n",
+ skb, lro_cb->csk, lro_cb->pdu_idx, lro_cb->pdu_totallen);
+
+ for (i = 0; i < lro_cb->pdu_idx; i++, pdu_cb++)
+ pr_info("skb 0x%p, pdu %d, %u, f 0x%x, seq 0x%x, dcrc 0x%x, "
+ "frags %u.\n",
+ skb, i, pdu_cb->pdulen, pdu_cb->flags, pdu_cb->seq,
+ pdu_cb->ddigest, pdu_cb->frags);
+ for (i = 0; i < ssi->nr_frags; i++)
+ pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
+ skb, i, ssi->frags[i].page_offset, ssi->frags[i].size);
+}
+
+static void cxgbit_lro_hskb_reset(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = csk->lro_hskb;
+ struct skb_shared_info *ssi = skb_shinfo(skb);
+ u8 i;
+
+ memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
+ for (i = 0; i < ssi->nr_frags; i++)
+ put_page(skb_frag_page(&ssi->frags[i]));
+ ssi->nr_frags = 0;
+}
+
+static void
+cxgbit_lro_skb_merge(struct cxgbit_sock *csk, struct sk_buff *skb, u8 pdu_idx)
+{
+ struct sk_buff *hskb = csk->lro_hskb;
+ struct cxgbit_lro_pdu_cb *hpdu_cb = cxgbit_skb_lro_pdu_cb(hskb, 0);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, pdu_idx);
+ struct skb_shared_info *hssi = skb_shinfo(hskb);
+ struct skb_shared_info *ssi = skb_shinfo(skb);
+ unsigned int len = 0;
+
+ if (pdu_cb->flags & PDUCBF_RX_HDR) {
+ hpdu_cb->flags = pdu_cb->flags;
+ hpdu_cb->seq = pdu_cb->seq;
+ hpdu_cb->hdr = pdu_cb->hdr;
+ hpdu_cb->hlen = pdu_cb->hlen;
+
+ memcpy(&hssi->frags[0], &ssi->frags[pdu_cb->hfrag_idx],
+ sizeof(skb_frag_t));
+
+ get_page(skb_frag_page(&hssi->frags[0]));
+ hssi->nr_frags = 1;
+ hpdu_cb->frags = 1;
+ hpdu_cb->hfrag_idx = 0;
+
+ len = hssi->frags[0].size;
+ hskb->len = len;
+ hskb->data_len = len;
+ hskb->truesize = len;
+ }
+
+ if (pdu_cb->flags & PDUCBF_RX_DATA) {
+ u8 hfrag_idx = 1, i;
+
+ hpdu_cb->flags |= pdu_cb->flags;
+
+ len = 0;
+ for (i = 0; i < pdu_cb->nr_dfrags; hfrag_idx++, i++) {
+ memcpy(&hssi->frags[hfrag_idx],
+ &ssi->frags[pdu_cb->dfrag_idx + i],
+ sizeof(skb_frag_t));
+
+ get_page(skb_frag_page(&hssi->frags[hfrag_idx]));
+
+ len += hssi->frags[hfrag_idx].size;
+
+ hssi->nr_frags++;
+ hpdu_cb->frags++;
+ }
+
+ hpdu_cb->dlen = pdu_cb->dlen;
+ hpdu_cb->doffset = hpdu_cb->hlen;
+ hpdu_cb->nr_dfrags = pdu_cb->nr_dfrags;
+ hpdu_cb->dfrag_idx = 1;
+ hskb->len += len;
+ hskb->data_len += len;
+ hskb->truesize += len;
+ }
+
+ if (pdu_cb->flags & PDUCBF_RX_STATUS) {
+ hpdu_cb->flags |= pdu_cb->flags;
+
+ if (hpdu_cb->flags & PDUCBF_RX_DATA)
+ hpdu_cb->flags &= ~PDUCBF_RX_DATA_DDPD;
+
+ hpdu_cb->ddigest = pdu_cb->ddigest;
+ hpdu_cb->pdulen = pdu_cb->pdulen;
+ }
+}
+
+static int cxgbit_process_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+ u8 pdu_idx = 0, last_idx = 0;
+ int ret = 0;
+
+ if (!pdu_cb->complete) {
+ cxgbit_lro_skb_merge(csk, skb, 0);
+
+ if (pdu_cb->flags & PDUCBF_RX_STATUS) {
+ struct sk_buff *hskb = csk->lro_hskb;
+
+ ret = cxgbit_process_iscsi_pdu(csk, hskb, 0);
+
+ cxgbit_lro_hskb_reset(csk);
+
+ if (ret < 0)
+ goto out;
+ }
+
+ pdu_idx = 1;
+ }
+
+ if (lro_cb->pdu_idx)
+ last_idx = lro_cb->pdu_idx - 1;
+
+ for (; pdu_idx <= last_idx; pdu_idx++) {
+ ret = cxgbit_process_iscsi_pdu(csk, skb, pdu_idx);
+ if (ret < 0)
+ goto out;
+ }
+
+ if ((!lro_cb->complete) && lro_cb->pdu_idx)
+ cxgbit_lro_skb_merge(csk, skb, lro_cb->pdu_idx);
+
+out:
+ return ret;
+}
+
+static int cxgbit_rx_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb);
+ struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, 0);
+ int ret = -1;
+
+ if ((pdu_cb->flags & PDUCBF_RX_HDR) &&
+ (pdu_cb->seq != csk->rcv_nxt)) {
+ pr_info("csk 0x%p, tid 0x%x, seq 0x%x != 0x%x.\n",
+ csk, csk->tid, pdu_cb->seq, csk->rcv_nxt);
+ cxgbit_lro_skb_dump(skb);
+ return ret;
+ }
+
+ csk->rcv_nxt += lro_cb->pdu_totallen;
+
+ ret = cxgbit_process_lro_skb(csk, skb);
+
+ csk->rx_credits += lro_cb->pdu_totallen;
+
+ if (csk->rx_credits >= (csk->rcv_win / 4))
+ cxgbit_rx_data_ack(csk);
+
+ return ret;
+}
+
+static int cxgbit_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ int ret = -1;
+
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_RX_LRO))
+ ret = cxgbit_rx_lro_skb(csk, skb);
+
+ __kfree_skb(skb);
+ return ret;
+}
+
+static bool cxgbit_rxq_len(struct cxgbit_sock *csk, struct sk_buff_head *rxq)
+{
+ spin_lock_bh(&csk->rxq.lock);
+ if (skb_queue_len(&csk->rxq)) {
+ skb_queue_splice_init(&csk->rxq, rxq);
+ spin_unlock_bh(&csk->rxq.lock);
+ return true;
+ }
+ spin_unlock_bh(&csk->rxq.lock);
+ return false;
+}
+
+static int cxgbit_wait_rxq(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb;
+ struct sk_buff_head rxq;
+
+ skb_queue_head_init(&rxq);
+
+ wait_event_interruptible(csk->waitq, cxgbit_rxq_len(csk, &rxq));
+
+ if (signal_pending(current))
+ goto out;
+
+ while ((skb = __skb_dequeue(&rxq))) {
+ if (cxgbit_rx_skb(csk, skb))
+ goto out;
+ }
+
+ return 0;
+out:
+ __skb_queue_purge(&rxq);
+ return -1;
+}
+
+int cxgbit_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
+{
+ struct cxgbit_sock *csk = conn->context;
+ int ret = -1;
+
+ while (!test_and_clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags)) {
+ ret = cxgbit_wait_rxq(csk);
+ if (ret) {
+ clear_bit(CSK_LOGIN_PDU_DONE, &csk->com.flags);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void cxgbit_get_rx_pdu(struct iscsi_conn *conn)
+{
+ struct cxgbit_sock *csk = conn->context;
+
+ while (!kthread_should_stop()) {
+ iscsit_thread_check_cpumask(conn, current, 0);
+ if (cxgbit_wait_rxq(csk))
+ return;
+ }
+}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 961202f4e9aa..50f3d3a0dd7b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -478,16 +478,16 @@ int iscsit_del_np(struct iscsi_np *np)
return 0;
}
-static int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
-static int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+static void iscsit_get_rx_pdu(struct iscsi_conn *);
-static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
return 0;
}
+EXPORT_SYMBOL(iscsit_queue_rsp);
-static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
{
bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
@@ -498,6 +498,169 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
__iscsit_free_cmd(cmd, scsi_cmd, true);
}
+EXPORT_SYMBOL(iscsit_aborted_task);
+
+static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
+ u32, u32, u8 *, u8 *);
+static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
+
+static int
+iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ const void *data_buf, u32 data_buf_len)
+{
+ struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
+ struct kvec *iov;
+ u32 niov = 0, tx_size = ISCSI_HDR_LEN;
+ int ret;
+
+ iov = &cmd->iov_misc[0];
+ iov[niov].iov_base = cmd->pdu;
+ iov[niov++].iov_len = ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
+ ISCSI_HDR_LEN, 0, NULL,
+ (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attaching CRC32C HeaderDigest"
+ " to opcode 0x%x 0x%08x\n",
+ hdr->opcode, *header_digest);
+ }
+
+ if (data_buf_len) {
+ u32 padding = ((-data_buf_len) & 3);
+
+ iov[niov].iov_base = (void *)data_buf;
+ iov[niov++].iov_len = data_buf_len;
+ tx_size += data_buf_len;
+
+ if (padding != 0) {
+ iov[niov].iov_base = &cmd->pad_bytes;
+ iov[niov++].iov_len = padding;
+ tx_size += padding;
+ pr_debug("Attaching %u additional"
+ " padding bytes.\n", padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
+ data_buf, data_buf_len,
+ padding,
+ (u8 *)&cmd->pad_bytes,
+ (u8 *)&cmd->data_crc);
+
+ iov[niov].iov_base = &cmd->data_crc;
+ iov[niov++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+ pr_debug("Attached DataDigest for %u"
+ " bytes opcode 0x%x, CRC 0x%08x\n",
+ data_buf_len, hdr->opcode, cmd->data_crc);
+ }
+ }
+
+ cmd->iov_misc_count = niov;
+ cmd->tx_size = tx_size;
+
+ ret = iscsit_send_tx_data(cmd, conn, 1);
+ if (ret < 0) {
+ iscsit_tx_thread_wait_for_tcp(conn);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iscsit_map_iovec(struct iscsi_cmd *, struct kvec *, u32, u32);
+static void iscsit_unmap_iovec(struct iscsi_cmd *);
+static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *,
+ u32, u32, u32, u8 *);
+static int
+iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ const struct iscsi_datain *datain)
+{
+ struct kvec *iov;
+ u32 iov_count = 0, tx_size = 0;
+ int ret, iov_ret;
+
+ iov = &cmd->iov_data[0];
+ iov[iov_count].iov_base = cmd->pdu;
+ iov[iov_count++].iov_len = ISCSI_HDR_LEN;
+ tx_size += ISCSI_HDR_LEN;
+
+ if (conn->conn_ops->HeaderDigest) {
+ u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
+
+ iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
+ ISCSI_HDR_LEN, 0, NULL,
+ (u8 *)header_digest);
+
+ iov[0].iov_len += ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
+ *header_digest);
+ }
+
+ iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
+ datain->offset, datain->length);
+ if (iov_ret < 0)
+ return -1;
+
+ iov_count += iov_ret;
+ tx_size += datain->length;
+
+ cmd->padding = ((-datain->length) & 3);
+ if (cmd->padding) {
+ iov[iov_count].iov_base = cmd->pad_bytes;
+ iov[iov_count++].iov_len = cmd->padding;
+ tx_size += cmd->padding;
+
+ pr_debug("Attaching %u padding bytes\n", cmd->padding);
+ }
+
+ if (conn->conn_ops->DataDigest) {
+ cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
+ cmd, datain->offset,
+ datain->length,
+ cmd->padding,
+ cmd->pad_bytes);
+
+ iov[iov_count].iov_base = &cmd->data_crc;
+ iov[iov_count++].iov_len = ISCSI_CRC_LEN;
+ tx_size += ISCSI_CRC_LEN;
+
+ pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
+ datain->length + cmd->padding, cmd->data_crc);
+ }
+
+ cmd->iov_data_count = iov_count;
+ cmd->tx_size = tx_size;
+
+ ret = iscsit_fe_sendpage_sg(cmd, conn);
+
+ iscsit_unmap_iovec(cmd);
+
+ if (ret < 0) {
+ iscsit_tx_thread_wait_for_tcp(conn);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+ struct iscsi_datain_req *dr, const void *buf,
+ u32 buf_len)
+{
+ if (dr)
+ return iscsit_xmit_datain_pdu(conn, cmd, buf);
+ else
+ return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
+}
static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
{
@@ -507,6 +670,7 @@ static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
static struct iscsit_transport iscsi_target_transport = {
.name = "iSCSI/TCP",
.transport_type = ISCSI_TCP,
+ .rdma_shutdown = false,
.owner = NULL,
.iscsit_setup_np = iscsit_setup_np,
.iscsit_accept_np = iscsit_accept_np,
@@ -519,6 +683,8 @@ static struct iscsit_transport iscsi_target_transport = {
.iscsit_queue_data_in = iscsit_queue_rsp,
.iscsit_queue_status = iscsit_queue_rsp,
.iscsit_aborted_task = iscsit_aborted_task,
+ .iscsit_xmit_pdu = iscsit_xmit_pdu,
+ .iscsit_get_rx_pdu = iscsit_get_rx_pdu,
.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
};
@@ -634,7 +800,7 @@ static void __exit iscsi_target_cleanup_module(void)
kfree(iscsit_global);
}
-static int iscsit_add_reject(
+int iscsit_add_reject(
struct iscsi_conn *conn,
u8 reason,
unsigned char *buf)
@@ -664,6 +830,7 @@ static int iscsit_add_reject(
return -1;
}
+EXPORT_SYMBOL(iscsit_add_reject);
static int iscsit_add_reject_from_cmd(
struct iscsi_cmd *cmd,
@@ -719,6 +886,7 @@ int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
{
return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
}
+EXPORT_SYMBOL(iscsit_reject_cmd);
/*
* Map some portion of the allocated scatterlist to an iovec, suitable for
@@ -737,7 +905,14 @@ static int iscsit_map_iovec(
/*
* We know each entry in t_data_sg contains a page.
*/
- sg = &cmd->se_cmd.t_data_sg[data_offset / PAGE_SIZE];
+ u32 ent = data_offset / PAGE_SIZE;
+
+ if (ent >= cmd->se_cmd.t_data_nents) {
+ pr_err("Initial page entry out-of-bounds\n");
+ return -1;
+ }
+
+ sg = &cmd->se_cmd.t_data_sg[ent];
page_off = (data_offset % PAGE_SIZE);
cmd->first_data_sg = sg;
@@ -2335,7 +2510,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
}
EXPORT_SYMBOL(iscsit_handle_logout_cmd);
-static int iscsit_handle_snack(
+int iscsit_handle_snack(
struct iscsi_conn *conn,
unsigned char *buf)
{
@@ -2388,6 +2563,7 @@ static int iscsit_handle_snack(
return 0;
}
+EXPORT_SYMBOL(iscsit_handle_snack);
static void iscsit_rx_thread_wait_for_tcp(struct iscsi_conn *conn)
{
@@ -2534,7 +2710,6 @@ static int iscsit_send_conn_drop_async_message(
{
struct iscsi_async *hdr;
- cmd->tx_size = ISCSI_HDR_LEN;
cmd->iscsi_opcode = ISCSI_OP_ASYNC_EVENT;
hdr = (struct iscsi_async *) cmd->pdu;
@@ -2552,25 +2727,11 @@ static int iscsit_send_conn_drop_async_message(
hdr->param2 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Wait);
hdr->param3 = cpu_to_be16(conn->sess->sess_ops->DefaultTime2Retain);
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- cmd->tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest to"
- " Async Message 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = cmd->tx_size;
- cmd->iov_misc_count = 1;
-
pr_debug("Sending Connection Dropped Async Message StatSN:"
" 0x%08x, for CID: %hu on CID: %hu\n", cmd->stat_sn,
cmd->logout_cid, conn->cid);
- return 0;
+
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
@@ -2583,7 +2744,7 @@ static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *conn)
}
}
-static void
+void
iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
struct iscsi_datain *datain, struct iscsi_data_rsp *hdr,
bool set_statsn)
@@ -2627,15 +2788,14 @@ iscsit_build_datain_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
cmd->init_task_tag, ntohl(hdr->statsn), ntohl(hdr->datasn),
ntohl(hdr->offset), datain->length, conn->cid);
}
+EXPORT_SYMBOL(iscsit_build_datain_pdu);
static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct iscsi_data_rsp *hdr = (struct iscsi_data_rsp *)&cmd->pdu[0];
struct iscsi_datain datain;
struct iscsi_datain_req *dr;
- struct kvec *iov;
- u32 iov_count = 0, tx_size = 0;
- int eodr = 0, ret, iov_ret;
+ int eodr = 0, ret;
bool set_statsn = false;
memset(&datain, 0, sizeof(struct iscsi_datain));
@@ -2677,64 +2837,9 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
iscsit_build_datain_pdu(cmd, conn, &datain, hdr, set_statsn);
- iov = &cmd->iov_data[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attaching CRC32 HeaderDigest"
- " for DataIN PDU 0x%08x\n", *header_digest);
- }
-
- iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[1],
- datain.offset, datain.length);
- if (iov_ret < 0)
- return -1;
-
- iov_count += iov_ret;
- tx_size += datain.length;
-
- cmd->padding = ((-datain.length) & 3);
- if (cmd->padding) {
- iov[iov_count].iov_base = cmd->pad_bytes;
- iov[iov_count++].iov_len = cmd->padding;
- tx_size += cmd->padding;
-
- pr_debug("Attaching %u padding bytes\n",
- cmd->padding);
- }
- if (conn->conn_ops->DataDigest) {
- cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, cmd,
- datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attached CRC32C DataDigest %d bytes, crc"
- " 0x%08x\n", datain.length+cmd->padding, cmd->data_crc);
- }
-
- cmd->iov_data_count = iov_count;
- cmd->tx_size = tx_size;
-
- ret = iscsit_fe_sendpage_sg(cmd, conn);
-
- iscsit_unmap_iovec(cmd);
-
- if (ret < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
+ ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, dr, &datain, 0);
+ if (ret < 0)
return ret;
- }
if (dr->dr_complete) {
eodr = (cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ?
@@ -2843,34 +2948,14 @@ EXPORT_SYMBOL(iscsit_build_logout_rsp);
static int
iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
- struct kvec *iov;
- int niov = 0, tx_size, rc;
+ int rc;
rc = iscsit_build_logout_rsp(cmd, conn,
(struct iscsi_logout_rsp *)&cmd->pdu[0]);
if (rc < 0)
return rc;
- tx_size = ISCSI_HDR_LEN;
- iov = &cmd->iov_misc[0];
- iov[niov].iov_base = cmd->pdu;
- iov[niov++].iov_len = ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, &cmd->pdu[0],
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest to"
- " Logout Response 0x%08x\n", *header_digest);
- }
- cmd->iov_misc_count = niov;
- cmd->tx_size = tx_size;
-
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
void
@@ -2910,34 +2995,16 @@ static int iscsit_send_unsolicited_nopin(
int want_response)
{
struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
- int tx_size = ISCSI_HDR_LEN, ret;
+ int ret;
iscsit_build_nopin_rsp(cmd, conn, hdr, false);
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest to"
- " NopIN 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = tx_size;
- cmd->iov_misc_count = 1;
- cmd->tx_size = tx_size;
-
pr_debug("Sending Unsolicited NOPIN TTT: 0x%08x StatSN:"
" 0x%08x CID: %hu\n", hdr->ttt, cmd->stat_sn, conn->cid);
- ret = iscsit_send_tx_data(cmd, conn, 1);
- if (ret < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
+ ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
+ if (ret < 0)
return ret;
- }
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = want_response ?
@@ -2951,75 +3018,24 @@ static int
iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct iscsi_nopin *hdr = (struct iscsi_nopin *)&cmd->pdu[0];
- struct kvec *iov;
- u32 padding = 0;
- int niov = 0, tx_size;
iscsit_build_nopin_rsp(cmd, conn, hdr, true);
- tx_size = ISCSI_HDR_LEN;
- iov = &cmd->iov_misc[0];
- iov[niov].iov_base = cmd->pdu;
- iov[niov++].iov_len = ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32C HeaderDigest"
- " to NopIn 0x%08x\n", *header_digest);
- }
-
/*
* NOPOUT Ping Data is attached to struct iscsi_cmd->buf_ptr.
* NOPOUT DataSegmentLength is at struct iscsi_cmd->buf_ptr_size.
*/
- if (cmd->buf_ptr_size) {
- iov[niov].iov_base = cmd->buf_ptr;
- iov[niov++].iov_len = cmd->buf_ptr_size;
- tx_size += cmd->buf_ptr_size;
-
- pr_debug("Echoing back %u bytes of ping"
- " data.\n", cmd->buf_ptr_size);
-
- padding = ((-cmd->buf_ptr_size) & 3);
- if (padding != 0) {
- iov[niov].iov_base = &cmd->pad_bytes;
- iov[niov++].iov_len = padding;
- tx_size += padding;
- pr_debug("Attaching %u additional"
- " padding bytes.\n", padding);
- }
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
- cmd->buf_ptr, cmd->buf_ptr_size,
- padding, (u8 *)&cmd->pad_bytes,
- (u8 *)&cmd->data_crc);
-
- iov[niov].iov_base = &cmd->data_crc;
- iov[niov++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attached DataDigest for %u"
- " bytes of ping data, CRC 0x%08x\n",
- cmd->buf_ptr_size, cmd->data_crc);
- }
- }
+ pr_debug("Echoing back %u bytes of ping data.\n", cmd->buf_ptr_size);
- cmd->iov_misc_count = niov;
- cmd->tx_size = tx_size;
-
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+ cmd->buf_ptr,
+ cmd->buf_ptr_size);
}
static int iscsit_send_r2t(
struct iscsi_cmd *cmd,
struct iscsi_conn *conn)
{
- int tx_size = 0;
struct iscsi_r2t *r2t;
struct iscsi_r2t_rsp *hdr;
int ret;
@@ -3035,7 +3051,10 @@ static int iscsit_send_r2t(
int_to_scsilun(cmd->se_cmd.orig_fe_lun,
(struct scsi_lun *)&hdr->lun);
hdr->itt = cmd->init_task_tag;
- r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
+ if (conn->conn_transport->iscsit_get_r2t_ttt)
+ conn->conn_transport->iscsit_get_r2t_ttt(conn, cmd, r2t);
+ else
+ r2t->targ_xfer_tag = session_get_next_ttt(conn->sess);
hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag);
hdr->statsn = cpu_to_be32(conn->stat_sn);
hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3044,38 +3063,18 @@ static int iscsit_send_r2t(
hdr->data_offset = cpu_to_be32(r2t->offset);
hdr->data_length = cpu_to_be32(r2t->xfer_len);
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for R2T"
- " PDU 0x%08x\n", *header_digest);
- }
-
pr_debug("Built %sR2T, ITT: 0x%08x, TTT: 0x%08x, StatSN:"
" 0x%08x, R2TSN: 0x%08x, Offset: %u, DDTL: %u, CID: %hu\n",
(!r2t->recovery_r2t) ? "" : "Recovery ", cmd->init_task_tag,
r2t->targ_xfer_tag, ntohl(hdr->statsn), r2t->r2t_sn,
r2t->offset, r2t->xfer_len, conn->cid);
- cmd->iov_misc_count = 1;
- cmd->tx_size = tx_size;
-
spin_lock_bh(&cmd->r2t_lock);
r2t->sent_r2t = 1;
spin_unlock_bh(&cmd->r2t_lock);
- ret = iscsit_send_tx_data(cmd, conn, 1);
+ ret = conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
if (ret < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
return ret;
}
@@ -3166,6 +3165,7 @@ int iscsit_build_r2ts_for_cmd(
return 0;
}
+EXPORT_SYMBOL(iscsit_build_r2ts_for_cmd);
void iscsit_build_rsp_pdu(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
bool inc_stat_sn, struct iscsi_scsi_rsp *hdr)
@@ -3204,18 +3204,12 @@ EXPORT_SYMBOL(iscsit_build_rsp_pdu);
static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)&cmd->pdu[0];
- struct kvec *iov;
- u32 padding = 0, tx_size = 0;
- int iov_count = 0;
bool inc_stat_sn = (cmd->i_state == ISTATE_SEND_STATUS);
+ void *data_buf = NULL;
+ u32 padding = 0, data_buf_len = 0;
iscsit_build_rsp_pdu(cmd, conn, inc_stat_sn, hdr);
- iov = &cmd->iov_misc[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
/*
* Attach SENSE DATA payload to iSCSI Response PDU
*/
@@ -3227,56 +3221,23 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
padding = -(cmd->se_cmd.scsi_sense_length) & 3;
hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
- iov[iov_count].iov_base = cmd->sense_buffer;
- iov[iov_count++].iov_len =
- (cmd->se_cmd.scsi_sense_length + padding);
- tx_size += cmd->se_cmd.scsi_sense_length;
+ data_buf = cmd->sense_buffer;
+ data_buf_len = cmd->se_cmd.scsi_sense_length + padding;
if (padding) {
memset(cmd->sense_buffer +
cmd->se_cmd.scsi_sense_length, 0, padding);
- tx_size += padding;
pr_debug("Adding %u bytes of padding to"
" SENSE.\n", padding);
}
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
- cmd->sense_buffer,
- (cmd->se_cmd.scsi_sense_length + padding),
- 0, NULL, (u8 *)&cmd->data_crc);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attaching CRC32 DataDigest for"
- " SENSE, %u bytes CRC 0x%08x\n",
- (cmd->se_cmd.scsi_sense_length + padding),
- cmd->data_crc);
- }
-
pr_debug("Attaching SENSE DATA: %u bytes to iSCSI"
" Response PDU\n",
cmd->se_cmd.scsi_sense_length);
}
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for Response"
- " PDU 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc_count = iov_count;
- cmd->tx_size = tx_size;
-
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, data_buf,
+ data_buf_len);
}
static u8 iscsit_convert_tcm_tmr_rsp(struct se_tmr_req *se_tmr)
@@ -3323,30 +3284,10 @@ static int
iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
{
struct iscsi_tm_rsp *hdr = (struct iscsi_tm_rsp *)&cmd->pdu[0];
- u32 tx_size = 0;
iscsit_build_task_mgt_rsp(cmd, conn, hdr);
- cmd->iov_misc[0].iov_base = cmd->pdu;
- cmd->iov_misc[0].iov_len = ISCSI_HDR_LEN;
- tx_size += ISCSI_HDR_LEN;
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for Task"
- " Mgmt Response PDU 0x%08x\n", *header_digest);
- }
-
- cmd->iov_misc_count = 1;
- cmd->tx_size = tx_size;
-
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL, NULL, 0);
}
static bool iscsit_check_inaddr_any(struct iscsi_np *np)
@@ -3583,53 +3524,16 @@ static int iscsit_send_text_rsp(
struct iscsi_conn *conn)
{
struct iscsi_text_rsp *hdr = (struct iscsi_text_rsp *)cmd->pdu;
- struct kvec *iov;
- u32 tx_size = 0;
- int text_length, iov_count = 0, rc;
-
- rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_TCP);
- if (rc < 0)
- return rc;
-
- text_length = rc;
- iov = &cmd->iov_misc[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- iov[iov_count].iov_base = cmd->buf_ptr;
- iov[iov_count++].iov_len = text_length;
-
- tx_size += (ISCSI_HDR_LEN + text_length);
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for"
- " Text Response PDU 0x%08x\n", *header_digest);
- }
-
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
- cmd->buf_ptr, text_length,
- 0, NULL, (u8 *)&cmd->data_crc);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
-
- pr_debug("Attaching DataDigest for %u bytes of text"
- " data, CRC 0x%08x\n", text_length,
- cmd->data_crc);
- }
+ int text_length;
- cmd->iov_misc_count = iov_count;
- cmd->tx_size = tx_size;
+ text_length = iscsit_build_text_rsp(cmd, conn, hdr,
+ conn->conn_transport->transport_type);
+ if (text_length < 0)
+ return text_length;
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+ cmd->buf_ptr,
+ text_length);
}
void
@@ -3654,49 +3558,15 @@ static int iscsit_send_reject(
struct iscsi_conn *conn)
{
struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
- struct kvec *iov;
- u32 iov_count = 0, tx_size;
iscsit_build_reject(cmd, conn, hdr);
- iov = &cmd->iov_misc[0];
- iov[iov_count].iov_base = cmd->pdu;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
- iov[iov_count].iov_base = cmd->buf_ptr;
- iov[iov_count++].iov_len = ISCSI_HDR_LEN;
-
- tx_size = (ISCSI_HDR_LEN + ISCSI_HDR_LEN);
-
- if (conn->conn_ops->HeaderDigest) {
- u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
-
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
-
- iov[0].iov_len += ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 HeaderDigest for"
- " REJECT PDU 0x%08x\n", *header_digest);
- }
-
- if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr,
- ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
-
- iov[iov_count].iov_base = &cmd->data_crc;
- iov[iov_count++].iov_len = ISCSI_CRC_LEN;
- tx_size += ISCSI_CRC_LEN;
- pr_debug("Attaching CRC32 DataDigest for REJECT"
- " PDU 0x%08x\n", cmd->data_crc);
- }
-
- cmd->iov_misc_count = iov_count;
- cmd->tx_size = tx_size;
-
pr_debug("Built Reject PDU StatSN: 0x%08x, Reason: 0x%02x,"
" CID: %hu\n", ntohl(hdr->statsn), hdr->reason, conn->cid);
- return 0;
+ return conn->conn_transport->iscsit_xmit_pdu(conn, cmd, NULL,
+ cmd->buf_ptr,
+ ISCSI_HDR_LEN);
}
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
@@ -3724,33 +3594,7 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
cpumask_setall(conn->conn_cpumask);
}
-static inline void iscsit_thread_check_cpumask(
- struct iscsi_conn *conn,
- struct task_struct *p,
- int mode)
-{
- /*
- * mode == 1 signals iscsi_target_tx_thread() usage.
- * mode == 0 signals iscsi_target_rx_thread() usage.
- */
- if (mode == 1) {
- if (!conn->conn_tx_reset_cpumask)
- return;
- conn->conn_tx_reset_cpumask = 0;
- } else {
- if (!conn->conn_rx_reset_cpumask)
- return;
- conn->conn_rx_reset_cpumask = 0;
- }
- /*
- * Update the CPU mask for this single kthread so that
- * both TX and RX kthreads are scheduled to run on the
- * same CPU.
- */
- set_cpus_allowed_ptr(p, conn->conn_cpumask);
-}
-
-static int
+int
iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
int ret;
@@ -3792,6 +3636,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
err:
return -1;
}
+EXPORT_SYMBOL(iscsit_immediate_queue);
static int
iscsit_handle_immediate_queue(struct iscsi_conn *conn)
@@ -3816,7 +3661,7 @@ iscsit_handle_immediate_queue(struct iscsi_conn *conn)
return 0;
}
-static int
+int
iscsit_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
int ret;
@@ -3889,13 +3734,6 @@ check_rsp_state:
if (ret < 0)
goto err;
- if (iscsit_send_tx_data(cmd, conn, 1) < 0) {
- iscsit_tx_thread_wait_for_tcp(conn);
- iscsit_unmap_iovec(cmd);
- goto err;
- }
- iscsit_unmap_iovec(cmd);
-
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
if (!iscsit_logout_post_handler(cmd, conn))
@@ -3928,6 +3766,7 @@ check_rsp_state:
err:
return -1;
}
+EXPORT_SYMBOL(iscsit_response_queue);
static int iscsit_handle_response_queue(struct iscsi_conn *conn)
{
@@ -4087,36 +3926,12 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
return ret;
}
-int iscsi_target_rx_thread(void *arg)
+static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
{
- int ret, rc;
+ int ret;
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
- struct iscsi_conn *conn = arg;
struct kvec iov;
- /*
- * Allow ourselves to be interrupted by SIGINT so that a
- * connection recovery / failure event can be triggered externally.
- */
- allow_signal(SIGINT);
- /*
- * Wait for iscsi_post_login_handler() to complete before allowing
- * incoming iscsi/tcp socket I/O, and/or failing the connection.
- */
- rc = wait_for_completion_interruptible(&conn->rx_login_comp);
- if (rc < 0 || iscsi_target_check_conn_state(conn))
- return 0;
-
- if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
- struct completion comp;
-
- init_completion(&comp);
- rc = wait_for_completion_interruptible(&comp);
- if (rc < 0)
- goto transport_err;
-
- goto transport_err;
- }
while (!kthread_should_stop()) {
/*
@@ -4134,7 +3949,7 @@ int iscsi_target_rx_thread(void *arg)
ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
if (ret != ISCSI_HDR_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- goto transport_err;
+ return;
}
if (conn->conn_ops->HeaderDigest) {
@@ -4144,7 +3959,7 @@ int iscsi_target_rx_thread(void *arg)
ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
if (ret != ISCSI_CRC_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- goto transport_err;
+ return;
}
iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
@@ -4168,7 +3983,7 @@ int iscsi_target_rx_thread(void *arg)
}
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
- goto transport_err;
+ return;
opcode = buffer[0] & ISCSI_OPCODE_MASK;
@@ -4179,15 +3994,38 @@ int iscsi_target_rx_thread(void *arg)
" while in Discovery Session, rejecting.\n", opcode);
iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buffer);
- goto transport_err;
+ return;
}
ret = iscsi_target_rx_opcode(conn, buffer);
if (ret < 0)
- goto transport_err;
+ return;
}
+}
+
+int iscsi_target_rx_thread(void *arg)
+{
+ int rc;
+ struct iscsi_conn *conn = arg;
+
+ /*
+ * Allow ourselves to be interrupted by SIGINT so that a
+ * connection recovery / failure event can be triggered externally.
+ */
+ allow_signal(SIGINT);
+ /*
+ * Wait for iscsi_post_login_handler() to complete before allowing
+ * incoming iscsi/tcp socket I/O, and/or failing the connection.
+ */
+ rc = wait_for_completion_interruptible(&conn->rx_login_comp);
+ if (rc < 0 || iscsi_target_check_conn_state(conn))
+ return 0;
+
+ if (!conn->conn_transport->iscsit_get_rx_pdu)
+ return 0;
+
+ conn->conn_transport->iscsit_get_rx_pdu(conn);
-transport_err:
if (!signal_pending(current))
atomic_set(&conn->transport_failed, 1);
iscsit_take_action_for_connection_exit(conn);
@@ -4240,16 +4078,17 @@ int iscsit_close_connection(
pr_debug("Closing iSCSI connection CID %hu on SID:"
" %u\n", conn->cid, sess->sid);
/*
- * Always up conn_logout_comp for the traditional TCP case just in case
- * the RX Thread in iscsi_target_rx_opcode() is sleeping and the logout
- * response never got sent because the connection failed.
+ * Always up conn_logout_comp for the traditional TCP and HW_OFFLOAD
+ * case just in case the RX Thread in iscsi_target_rx_opcode() is
+ * sleeping and the logout response never got sent because the
+ * connection failed.
*
* However for iser-target, isert_wait4logout() is using conn_logout_comp
* to signal logout response TX interrupt completion. Go ahead and skip
* this for iser since isert_rx_opcode() does not wait on logout failure,
* and to avoid iscsi_conn pointer dereference in iser-target code.
*/
- if (conn->conn_transport->transport_type == ISCSI_TCP)
+ if (!conn->conn_transport->rdma_shutdown)
complete(&conn->conn_logout_comp);
if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
@@ -4438,7 +4277,7 @@ int iscsit_close_connection(
if (!atomic_read(&sess->session_reinstatement) &&
atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
return 0;
} else if (atomic_read(&sess->session_logout)) {
@@ -4467,6 +4306,10 @@ int iscsit_close_connection(
}
}
+/*
+ * If the iSCSI Session for the iSCSI Initiator Node exists,
+ * forcefully shutdown the iSCSI NEXUS.
+ */
int iscsit_close_session(struct iscsi_session *sess)
{
struct iscsi_portal_group *tpg = sess->tpg;
@@ -4556,7 +4399,7 @@ static void iscsit_logout_post_handler_closesession(
* always sleep waiting for RX/TX thread shutdown to complete
* within iscsit_close_connection().
*/
- if (conn->conn_transport->transport_type == ISCSI_TCP)
+ if (!conn->conn_transport->rdma_shutdown)
sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
@@ -4565,7 +4408,7 @@ static void iscsit_logout_post_handler_closesession(
iscsit_dec_conn_usage_count(conn);
iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
}
static void iscsit_logout_post_handler_samecid(
@@ -4573,7 +4416,7 @@ static void iscsit_logout_post_handler_samecid(
{
int sleep = 1;
- if (conn->conn_transport->transport_type == ISCSI_TCP)
+ if (!conn->conn_transport->rdma_shutdown)
sleep = cmpxchg(&conn->tx_thread_active, true, false);
atomic_set(&conn->conn_logout_remove, 0);
@@ -4736,7 +4579,7 @@ int iscsit_free_session(struct iscsi_session *sess)
} else
spin_unlock_bh(&sess->conn_lock);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 97e5b69e0668..923c032f0b95 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -43,14 +43,15 @@ static inline struct iscsi_tpg_np *to_iscsi_tpg_np(struct config_item *item)
return container_of(to_tpg_np(item), struct iscsi_tpg_np, se_tpg_np);
}
-static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page)
+static ssize_t lio_target_np_driver_show(struct config_item *item, char *page,
+ enum iscsit_transport_type type)
{
struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
- struct iscsi_tpg_np *tpg_np_sctp;
+ struct iscsi_tpg_np *tpg_np_new;
ssize_t rb;
- tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
- if (tpg_np_sctp)
+ tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
+ if (tpg_np_new)
rb = sprintf(page, "1\n");
else
rb = sprintf(page, "0\n");
@@ -58,19 +59,20 @@ static ssize_t lio_target_np_sctp_show(struct config_item *item, char *page)
return rb;
}
-static ssize_t lio_target_np_sctp_store(struct config_item *item,
- const char *page, size_t count)
+static ssize_t lio_target_np_driver_store(struct config_item *item,
+ const char *page, size_t count, enum iscsit_transport_type type,
+ const char *mod_name)
{
struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
struct iscsi_np *np;
struct iscsi_portal_group *tpg;
- struct iscsi_tpg_np *tpg_np_sctp = NULL;
+ struct iscsi_tpg_np *tpg_np_new = NULL;
u32 op;
- int ret;
+ int rc;
- ret = kstrtou32(page, 0, &op);
- if (ret)
- return ret;
+ rc = kstrtou32(page, 0, &op);
+ if (rc)
+ return rc;
if ((op != 1) && (op != 0)) {
pr_err("Illegal value for tpg_enable: %u\n", op);
return -EINVAL;
@@ -87,107 +89,64 @@ static ssize_t lio_target_np_sctp_store(struct config_item *item,
return -EINVAL;
if (op) {
- /*
- * Use existing np->np_sockaddr for SCTP network portal reference
- */
- tpg_np_sctp = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
- tpg_np, ISCSI_SCTP_TCP);
- if (!tpg_np_sctp || IS_ERR(tpg_np_sctp))
- goto out;
- } else {
- tpg_np_sctp = iscsit_tpg_locate_child_np(tpg_np, ISCSI_SCTP_TCP);
- if (!tpg_np_sctp)
- goto out;
+ if (strlen(mod_name)) {
+ rc = request_module(mod_name);
+ if (rc != 0) {
+ pr_warn("Unable to request_module for %s\n",
+ mod_name);
+ rc = 0;
+ }
+ }
- ret = iscsit_tpg_del_network_portal(tpg, tpg_np_sctp);
- if (ret < 0)
+ tpg_np_new = iscsit_tpg_add_network_portal(tpg,
+ &np->np_sockaddr, tpg_np, type);
+ if (IS_ERR(tpg_np_new))
goto out;
+ } else {
+ tpg_np_new = iscsit_tpg_locate_child_np(tpg_np, type);
+ if (tpg_np_new) {
+ rc = iscsit_tpg_del_network_portal(tpg, tpg_np_new);
+ if (rc < 0)
+ goto out;
+ }
}
iscsit_put_tpg(tpg);
return count;
out:
iscsit_put_tpg(tpg);
- return -EINVAL;
+ return rc;
}
static ssize_t lio_target_np_iser_show(struct config_item *item, char *page)
{
- struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
- struct iscsi_tpg_np *tpg_np_iser;
- ssize_t rb;
-
- tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
- if (tpg_np_iser)
- rb = sprintf(page, "1\n");
- else
- rb = sprintf(page, "0\n");
-
- return rb;
+ return lio_target_np_driver_show(item, page, ISCSI_INFINIBAND);
}
static ssize_t lio_target_np_iser_store(struct config_item *item,
- const char *page, size_t count)
+ const char *page, size_t count)
{
- struct iscsi_tpg_np *tpg_np = to_iscsi_tpg_np(item);
- struct iscsi_np *np;
- struct iscsi_portal_group *tpg;
- struct iscsi_tpg_np *tpg_np_iser = NULL;
- char *endptr;
- u32 op;
- int rc = 0;
-
- op = simple_strtoul(page, &endptr, 0);
- if ((op != 1) && (op != 0)) {
- pr_err("Illegal value for tpg_enable: %u\n", op);
- return -EINVAL;
- }
- np = tpg_np->tpg_np;
- if (!np) {
- pr_err("Unable to locate struct iscsi_np from"
- " struct iscsi_tpg_np\n");
- return -EINVAL;
- }
-
- tpg = tpg_np->tpg;
- if (iscsit_get_tpg(tpg) < 0)
- return -EINVAL;
-
- if (op) {
- rc = request_module("ib_isert");
- if (rc != 0) {
- pr_warn("Unable to request_module for ib_isert\n");
- rc = 0;
- }
-
- tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
- tpg_np, ISCSI_INFINIBAND);
- if (IS_ERR(tpg_np_iser)) {
- rc = PTR_ERR(tpg_np_iser);
- goto out;
- }
- } else {
- tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
- if (tpg_np_iser) {
- rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
- if (rc < 0)
- goto out;
- }
- }
+ return lio_target_np_driver_store(item, page, count,
+ ISCSI_INFINIBAND, "ib_isert");
+}
+CONFIGFS_ATTR(lio_target_np_, iser);
- iscsit_put_tpg(tpg);
- return count;
-out:
- iscsit_put_tpg(tpg);
- return rc;
+static ssize_t lio_target_np_cxgbit_show(struct config_item *item, char *page)
+{
+ return lio_target_np_driver_show(item, page, ISCSI_CXGBIT);
}
-CONFIGFS_ATTR(lio_target_np_, sctp);
-CONFIGFS_ATTR(lio_target_np_, iser);
+static ssize_t lio_target_np_cxgbit_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ return lio_target_np_driver_store(item, page, count,
+ ISCSI_CXGBIT, "cxgbit");
+}
+CONFIGFS_ATTR(lio_target_np_, cxgbit);
static struct configfs_attribute *lio_target_portal_attrs[] = {
- &lio_target_np_attr_sctp,
&lio_target_np_attr_iser,
+ &lio_target_np_attr_cxgbit,
NULL,
};
@@ -1554,7 +1513,7 @@ static int lio_tpg_check_prot_fabric_only(
* This function calls iscsit_inc_session_usage_count() on the
* struct iscsi_session in question.
*/
-static int lio_tpg_shutdown_session(struct se_session *se_sess)
+static void lio_tpg_close_session(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
@@ -1566,7 +1525,7 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess)
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
spin_unlock_bh(&se_tpg->session_lock);
- return 0;
+ return;
}
atomic_set(&sess->session_reinstatement, 1);
spin_unlock(&sess->conn_lock);
@@ -1575,20 +1534,6 @@ static int lio_tpg_shutdown_session(struct se_session *se_sess)
spin_unlock_bh(&se_tpg->session_lock);
iscsit_stop_session(sess, 1, 1);
- return 1;
-}
-
-/*
- * Calls iscsit_dec_session_usage_count() as inverse of
- * lio_tpg_shutdown_session()
- */
-static void lio_tpg_close_session(struct se_session *se_sess)
-{
- struct iscsi_session *sess = se_sess->fabric_sess_ptr;
- /*
- * If the iSCSI Session for the iSCSI Initiator Node exists,
- * forcefully shutdown the iSCSI NEXUS.
- */
iscsit_close_session(sess);
}
@@ -1640,7 +1585,6 @@ const struct target_core_fabric_ops iscsi_ops = {
.tpg_get_inst_index = lio_tpg_get_inst_index,
.check_stop_free = lio_check_stop_free,
.release_cmd = lio_release_cmd,
- .shutdown_session = lio_tpg_shutdown_session,
.close_session = lio_tpg_close_session,
.sess_get_index = lio_sess_get_index,
.sess_get_initiator_sid = lio_sess_get_initiator_sid,
diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c
index fb3b52b124ac..647d4a5dca52 100644
--- a/drivers/target/iscsi/iscsi_target_datain_values.c
+++ b/drivers/target/iscsi/iscsi_target_datain_values.c
@@ -524,3 +524,4 @@ struct iscsi_datain_req *iscsit_get_datain_values(
return NULL;
}
+EXPORT_SYMBOL(iscsit_get_datain_values);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 210f6e4830e3..b54e72c7ab0f 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -786,7 +786,7 @@ static void iscsit_handle_time2retain_timeout(unsigned long data)
}
spin_unlock_bh(&se_tpg->session_lock);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
}
void iscsit_start_time2retain_handler(struct iscsi_session *sess)
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 8436d56c5f0c..b5212f0f9571 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -228,7 +228,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
if (sess->session_state == TARG_SESS_STATE_FAILED) {
spin_unlock_bh(&sess->conn_lock);
iscsit_dec_session_usage_count(sess);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
return 0;
}
spin_unlock_bh(&sess->conn_lock);
@@ -236,7 +236,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
iscsit_stop_session(sess, 1, 1);
iscsit_dec_session_usage_count(sess);
- target_put_session(sess->se_sess);
+ iscsit_close_session(sess);
return 0;
}
@@ -258,7 +258,7 @@ static void iscsi_login_set_conn_values(
mutex_unlock(&auth_id_lock);
}
-static __printf(2, 3) int iscsi_change_param_sprintf(
+__printf(2, 3) int iscsi_change_param_sprintf(
struct iscsi_conn *conn,
const char *fmt, ...)
{
@@ -279,6 +279,7 @@ static __printf(2, 3) int iscsi_change_param_sprintf(
return 0;
}
+EXPORT_SYMBOL(iscsi_change_param_sprintf);
/*
* This is the leading connection of a new session,
@@ -1387,6 +1388,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto old_sess_out;
}
+ if (conn->conn_transport->iscsit_validate_params) {
+ ret = conn->conn_transport->iscsit_validate_params(conn);
+ if (ret < 0) {
+ if (zero_tsih)
+ goto new_sess_out;
+ else
+ goto old_sess_out;
+ }
+ }
+
ret = iscsi_target_start_negotiation(login, conn);
if (ret < 0)
goto new_sess_out;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 9fc9117d0f22..89d34bd6d87f 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -269,6 +269,7 @@ int iscsi_target_check_login_request(
return 0;
}
+EXPORT_SYMBOL(iscsi_target_check_login_request);
static int iscsi_target_check_first_request(
struct iscsi_conn *conn,
@@ -1246,16 +1247,16 @@ int iscsi_target_start_negotiation(
{
int ret;
- ret = iscsi_target_do_login(conn, login);
- if (!ret) {
- if (conn->sock) {
- struct sock *sk = conn->sock->sk;
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
- write_lock_bh(&sk->sk_callback_lock);
- set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
- write_unlock_bh(&sk->sk_callback_lock);
- }
- } else if (ret < 0) {
+ write_lock_bh(&sk->sk_callback_lock);
+ set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+
+ ret = iscsi_target_do_login(conn, login);
+ if (ret < 0) {
cancel_delayed_work_sync(&conn->login_work);
cancel_delayed_work_sync(&conn->login_cleanup_work);
iscsi_target_restore_sock_callbacks(conn);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 3a1f9a7e6bb6..0efa80bb8962 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -680,6 +680,7 @@ struct iscsi_param *iscsi_find_param_from_key(
pr_err("Unable to locate key \"%s\".\n", key);
return NULL;
}
+EXPORT_SYMBOL(iscsi_find_param_from_key);
int iscsi_extract_key_value(char *textbuf, char **key, char **value)
{
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 57720385a751..1f38177207e0 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -514,6 +514,7 @@ void iscsit_add_cmd_to_immediate_queue(
wake_up(&conn->queues_wq);
}
+EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
{
@@ -725,6 +726,9 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
iscsit_remove_cmd_from_immediate_queue(cmd, conn);
iscsit_remove_cmd_from_response_queue(cmd, conn);
}
+
+ if (conn && conn->conn_transport->iscsit_release_cmd)
+ conn->conn_transport->iscsit_release_cmd(conn, cmd);
}
void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
@@ -773,6 +777,7 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
break;
}
}
+EXPORT_SYMBOL(iscsit_free_cmd);
int iscsit_check_session_usage_count(struct iscsi_session *sess)
{
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 0ad5ac541a7f..5091b31b3e56 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -601,16 +601,6 @@ static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
return tl_cmd->sc_cmd_state;
}
-static int tcm_loop_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void tcm_loop_close_session(struct se_session *se_sess)
-{
- return;
-};
-
static int tcm_loop_write_pending(struct se_cmd *se_cmd)
{
/*
@@ -1243,8 +1233,6 @@ static const struct target_core_fabric_ops loop_ops = {
.tpg_get_inst_index = tcm_loop_get_inst_index,
.check_stop_free = tcm_loop_check_stop_free,
.release_cmd = tcm_loop_release_cmd,
- .shutdown_session = tcm_loop_shutdown_session,
- .close_session = tcm_loop_close_session,
.sess_get_index = tcm_loop_sess_get_index,
.write_pending = tcm_loop_write_pending,
.write_pending_status = tcm_loop_write_pending_status,
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index c57e7884973d..58bb6ed18185 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1726,16 +1726,6 @@ static void sbp_release_cmd(struct se_cmd *se_cmd)
sbp_free_request(req);
}
-static int sbp_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void sbp_close_session(struct se_session *se_sess)
-{
- return;
-}
-
static u32 sbp_sess_get_index(struct se_session *se_sess)
{
return 0;
@@ -2349,8 +2339,6 @@ static const struct target_core_fabric_ops sbp_ops = {
.tpg_check_prod_mode_write_protect = sbp_check_false,
.tpg_get_inst_index = sbp_tpg_get_inst_index,
.release_cmd = sbp_release_cmd,
- .shutdown_session = sbp_shutdown_session,
- .close_session = sbp_close_session,
.sess_get_index = sbp_sess_get_index,
.write_pending = sbp_write_pending,
.write_pending_status = sbp_write_pending_status,
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 49aba4a31747..4c82bbe19003 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -932,7 +932,7 @@ static int core_alua_update_tpg_primary_metadata(
tg_pt_gp->tg_pt_gp_alua_access_status);
snprintf(path, ALUA_METADATA_PATH_LEN,
- "/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
+ "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
rc = core_alua_write_tpg_metadata(path, md_buf, len);
@@ -1275,8 +1275,8 @@ static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun)
atomic_read(&lun->lun_tg_pt_secondary_offline),
lun->lun_tg_pt_secondary_stat);
- snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%llu",
- se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
+ snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
lun->unpacked_lun);
rc = core_alua_write_tpg_metadata(path, md_buf, len);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index d498533f09ee..2001005bef45 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -99,6 +99,67 @@ static ssize_t target_core_item_version_show(struct config_item *item,
CONFIGFS_ATTR_RO(target_core_item_, version);
+char db_root[DB_ROOT_LEN] = DB_ROOT_DEFAULT;
+static char db_root_stage[DB_ROOT_LEN];
+
+static ssize_t target_core_item_dbroot_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%s\n", db_root);
+}
+
+static ssize_t target_core_item_dbroot_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ ssize_t read_bytes;
+ struct file *fp;
+
+ mutex_lock(&g_tf_lock);
+ if (!list_empty(&g_tf_list)) {
+ mutex_unlock(&g_tf_lock);
+ pr_err("db_root: cannot be changed: target drivers registered");
+ return -EINVAL;
+ }
+
+ if (count > (DB_ROOT_LEN - 1)) {
+ mutex_unlock(&g_tf_lock);
+ pr_err("db_root: count %d exceeds DB_ROOT_LEN-1: %u\n",
+ (int)count, DB_ROOT_LEN - 1);
+ return -EINVAL;
+ }
+
+ read_bytes = snprintf(db_root_stage, DB_ROOT_LEN, "%s", page);
+ if (!read_bytes) {
+ mutex_unlock(&g_tf_lock);
+ return -EINVAL;
+ }
+ if (db_root_stage[read_bytes - 1] == '\n')
+ db_root_stage[read_bytes - 1] = '\0';
+
+ /* validate new db root before accepting it */
+ fp = filp_open(db_root_stage, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ mutex_unlock(&g_tf_lock);
+ pr_err("db_root: cannot open: %s\n", db_root_stage);
+ return -EINVAL;
+ }
+ if (!S_ISDIR(fp->f_inode->i_mode)) {
+ filp_close(fp, 0);
+ mutex_unlock(&g_tf_lock);
+ pr_err("db_root: not a directory: %s\n", db_root_stage);
+ return -EINVAL;
+ }
+ filp_close(fp, 0);
+
+ strncpy(db_root, db_root_stage, read_bytes);
+
+ mutex_unlock(&g_tf_lock);
+
+ return read_bytes;
+}
+
+CONFIGFS_ATTR(target_core_item_, dbroot);
+
static struct target_fabric_configfs *target_core_get_fabric(
const char *name)
{
@@ -239,6 +300,7 @@ static struct configfs_group_operations target_core_fabric_group_ops = {
*/
static struct configfs_attribute *target_core_fabric_item_attrs[] = {
&target_core_item_attr_version,
+ &target_core_item_attr_dbroot,
NULL,
};
@@ -323,14 +385,6 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->release_cmd()\n");
return -EINVAL;
}
- if (!tfo->shutdown_session) {
- pr_err("Missing tfo->shutdown_session()\n");
- return -EINVAL;
- }
- if (!tfo->close_session) {
- pr_err("Missing tfo->close_session()\n");
- return -EINVAL;
- }
if (!tfo->sess_get_index) {
pr_err("Missing tfo->sess_get_index()\n");
return -EINVAL;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 86b4a8375628..fc91e85f54ba 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -155,4 +155,10 @@ void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
/* target_core_xcopy.c */
extern struct se_portal_group xcopy_pt_tpg;
+/* target_core_configfs.c */
+#define DB_ROOT_LEN 4096
+#define DB_ROOT_DEFAULT "/var/target"
+
+extern char db_root[];
+
#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index b1795735eafc..47463c99c318 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1985,7 +1985,7 @@ static int __core_scsi3_write_aptpl_to_file(
return -EMSGSIZE;
}
- snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
+ snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 47a833f3a145..24b36fd785f1 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -403,7 +403,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = RD_DEV(se_dev);
struct rd_dev_sg_table *prot_table;
- bool need_to_release = false;
struct scatterlist *prot_sg;
u32 sectors = cmd->data_length / se_dev->dev_attrib.block_size;
u32 prot_offset, prot_page;
@@ -432,9 +431,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
if (!rc)
sbc_dif_copy_prot(cmd, sectors, is_read, prot_sg, prot_offset);
- if (need_to_release)
- kfree(prot_sg);
-
return rc;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index ddf046080dc3..d99752c6cd60 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -336,44 +336,39 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
return acl;
}
-void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
+static void target_shutdown_sessions(struct se_node_acl *acl)
{
- struct se_portal_group *tpg = acl->se_tpg;
- LIST_HEAD(sess_list);
- struct se_session *sess, *sess_tmp;
+ struct se_session *sess;
unsigned long flags;
- int rc;
-
- mutex_lock(&tpg->acl_node_mutex);
- if (acl->dynamic_node_acl) {
- acl->dynamic_node_acl = 0;
- }
- list_del(&acl->acl_list);
- mutex_unlock(&tpg->acl_node_mutex);
+restart:
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
- acl->acl_stop = 1;
-
- list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
- sess_acl_list) {
- if (sess->sess_tearing_down != 0)
+ list_for_each_entry(sess, &acl->acl_sess_list, sess_acl_list) {
+ if (sess->sess_tearing_down)
continue;
- if (!target_get_session(sess))
- continue;
- list_move(&sess->sess_acl_list, &sess_list);
+ list_del_init(&sess->sess_acl_list);
+ spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
+ if (acl->se_tpg->se_tpg_tfo->close_session)
+ acl->se_tpg->se_tpg_tfo->close_session(sess);
+ goto restart;
}
spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+}
- list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
- list_del(&sess->sess_acl_list);
+void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
+{
+ struct se_portal_group *tpg = acl->se_tpg;
+
+ mutex_lock(&tpg->acl_node_mutex);
+ if (acl->dynamic_node_acl)
+ acl->dynamic_node_acl = 0;
+ list_del(&acl->acl_list);
+ mutex_unlock(&tpg->acl_node_mutex);
+
+ target_shutdown_sessions(acl);
- rc = tpg->se_tpg_tfo->shutdown_session(sess);
- target_put_session(sess);
- if (!rc)
- continue;
- target_put_session(sess);
- }
target_put_nacl(acl);
/*
* Wait for last target_put_nacl() to complete in target_complete_nacl()
@@ -400,11 +395,7 @@ int core_tpg_set_initiator_node_queue_depth(
struct se_node_acl *acl,
u32 queue_depth)
{
- LIST_HEAD(sess_list);
struct se_portal_group *tpg = acl->se_tpg;
- struct se_session *sess, *sess_tmp;
- unsigned long flags;
- int rc;
/*
* User has requested to change the queue depth for a Initiator Node.
@@ -413,30 +404,10 @@ int core_tpg_set_initiator_node_queue_depth(
*/
target_set_nacl_queue_depth(tpg, acl, queue_depth);
- spin_lock_irqsave(&acl->nacl_sess_lock, flags);
- list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
- sess_acl_list) {
- if (sess->sess_tearing_down != 0)
- continue;
- if (!target_get_session(sess))
- continue;
- spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
-
- /*
- * Finally call tpg->se_tpg_tfo->close_session() to force session
- * reinstatement to occur if there is an active session for the
- * $FABRIC_MOD Initiator Node in question.
- */
- rc = tpg->se_tpg_tfo->shutdown_session(sess);
- target_put_session(sess);
- if (!rc) {
- spin_lock_irqsave(&acl->nacl_sess_lock, flags);
- continue;
- }
- target_put_session(sess);
- spin_lock_irqsave(&acl->nacl_sess_lock, flags);
- }
- spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+ /*
+ * Shutdown all pending sessions to force session reinstatement.
+ */
+ target_shutdown_sessions(acl);
pr_debug("Successfully changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 590384a2bf8b..5ab3967dda43 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -239,7 +239,6 @@ struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock);
- kref_init(&se_sess->sess_kref);
se_sess->sup_prot_ops = sup_prot_ops;
return se_sess;
@@ -430,27 +429,6 @@ target_alloc_session(struct se_portal_group *tpg,
}
EXPORT_SYMBOL(target_alloc_session);
-static void target_release_session(struct kref *kref)
-{
- struct se_session *se_sess = container_of(kref,
- struct se_session, sess_kref);
- struct se_portal_group *se_tpg = se_sess->se_tpg;
-
- se_tpg->se_tpg_tfo->close_session(se_sess);
-}
-
-int target_get_session(struct se_session *se_sess)
-{
- return kref_get_unless_zero(&se_sess->sess_kref);
-}
-EXPORT_SYMBOL(target_get_session);
-
-void target_put_session(struct se_session *se_sess)
-{
- kref_put(&se_sess->sess_kref, target_release_session);
-}
-EXPORT_SYMBOL(target_put_session);
-
ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
{
struct se_session *se_sess;
@@ -499,8 +477,8 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
se_nacl = se_sess->se_node_acl;
if (se_nacl) {
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
- if (se_nacl->acl_stop == 0)
- list_del(&se_sess->sess_acl_list);
+ if (!list_empty(&se_sess->sess_acl_list))
+ list_del_init(&se_sess->sess_acl_list);
/*
* If the session list is empty, then clear the pointer.
* Otherwise, set the struct se_session pointer from the tail
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index c30003bd4ff0..e28209b99b59 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -139,7 +139,6 @@ extern unsigned int ft_debug_logging;
* Session ops.
*/
void ft_sess_put(struct ft_sess *);
-int ft_sess_shutdown(struct se_session *);
void ft_sess_close(struct se_session *);
u32 ft_sess_get_index(struct se_session *);
u32 ft_sess_get_port_name(struct se_session *, unsigned char *, u32);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 4d375e95841b..42ee91123dca 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -442,7 +442,6 @@ static const struct target_core_fabric_ops ft_fabric_ops = {
.tpg_get_inst_index = ft_tpg_get_inst_index,
.check_stop_free = ft_check_stop_free,
.release_cmd = ft_release_cmd,
- .shutdown_session = ft_sess_shutdown,
.close_session = ft_sess_close,
.sess_get_index = ft_sess_get_index,
.sess_get_initiator_sid = NULL,
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index d0c3e1894c61..f5186a744399 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -303,18 +303,6 @@ static void ft_sess_delete_all(struct ft_tport *tport)
*/
/*
- * Determine whether session is allowed to be shutdown in the current context.
- * Returns non-zero if the session should be shutdown.
- */
-int ft_sess_shutdown(struct se_session *se_sess)
-{
- struct ft_sess *sess = se_sess->fabric_sess_ptr;
-
- pr_debug("port_id %x\n", sess->port_id);
- return 1;
-}
-
-/*
* Remove session and send PRLO.
* This is called when the ACL is being deleted or queue depth is changing.
*/
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index d89d60c8b6cf..2d702ca6556f 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -260,16 +260,6 @@ config ARMADA_THERMAL
Enable this option if you want to have support for thermal management
controller present in Armada 370 and Armada XP SoC.
-config TEGRA_SOCTHERM
- tristate "Tegra SOCTHERM thermal management"
- depends on ARCH_TEGRA
- help
- Enable this option for integrated thermal management support on NVIDIA
- Tegra124 systems-on-chip. The driver supports four thermal zones
- (CPU, GPU, MEM, PLLX). Cooling devices can be bound to the thermal
- zones to manage temperatures. This option is also required for the
- emergency thermal reset (thermtrip) feature to function.
-
config DB8500_CPUFREQ_COOLING
tristate "DB8500 cpufreq cooling"
depends on ARCH_U8500 || COMPILE_TEST
@@ -377,6 +367,17 @@ depends on ARCH_STI && OF
source "drivers/thermal/st/Kconfig"
endmenu
+config TANGO_THERMAL
+ tristate "Tango thermal management"
+ depends on ARCH_TANGO || COMPILE_TEST
+ help
+ Enable the Tango thermal driver, which supports the primitive
+ temperature sensor embedded in Tango chips since the SMP8758.
+ This sensor only generates a 1-bit signal to indicate whether
+ the die temperature exceeds a programmable threshold.
+
+source "drivers/thermal/tegra/Kconfig"
+
config QCOM_SPMI_TEMP_ALARM
tristate "Qualcomm SPMI PMIC Temperature Alarm"
depends on OF && SPMI && IIO
@@ -388,4 +389,14 @@ config QCOM_SPMI_TEMP_ALARM
real time die temperature if an ADC is present or an estimate of the
temperature based upon the over temperature stage value.
+config GENERIC_ADC_THERMAL
+ tristate "Generic ADC based thermal sensor"
+ depends on IIO
+ help
+ This enabled a thermal sysfs driver for the temperature sensor
+ which is connected to the General Purpose ADC. The ADC channel
+ is read via IIO framework and the channel information is provided
+ to this driver. This driver reports the temperature by reading ADC
+ channel and converts it to temperature based on lookup table.
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 8e9cbc3b5679..10b07c14f8a9 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -35,6 +35,7 @@ obj-y += samsung/
obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
+obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o
obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
-obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
+obj-$(CONFIG_TEGRA_SOCTHERM) += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
+obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index 70836c5b89bc..fc52016d4e85 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -29,7 +29,13 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
struct thermal_instance *instance;
tz->ops->get_trip_temp(tz, trip, &trip_temp);
- tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
+
+ if (!tz->ops->get_trip_hyst) {
+ pr_warn_once("Undefined get_trip_hyst for thermal zone %s - "
+ "running with default hysteresis zero\n", tz->type);
+ trip_hyst = 0;
+ } else
+ tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
trip, trip_temp, tz->temperature,
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 5e820b541506..97fad8f51e1c 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -160,7 +160,7 @@ static int hisi_thermal_get_temp(void *_sensor, int *temp)
struct hisi_thermal_sensor *sensor = _sensor;
struct hisi_thermal_data *data = sensor->thermal;
- int sensor_id = 0, i;
+ int sensor_id = -1, i;
long max_temp = 0;
*temp = hisi_thermal_get_sensor_temp(data, sensor);
@@ -168,12 +168,19 @@ static int hisi_thermal_get_temp(void *_sensor, int *temp)
sensor->sensor_temp = *temp;
for (i = 0; i < HISI_MAX_SENSORS; i++) {
+ if (!data->sensors[i].tzd)
+ continue;
+
if (data->sensors[i].sensor_temp >= max_temp) {
max_temp = data->sensors[i].sensor_temp;
sensor_id = i;
}
}
+ /* If no sensor has been enabled, then skip to enable irq */
+ if (sensor_id == -1)
+ return 0;
+
mutex_lock(&data->thermal_lock);
data->irq_bind_sensor = sensor_id;
mutex_unlock(&data->thermal_lock);
@@ -226,8 +233,12 @@ static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
sensor->thres_temp / 1000);
mutex_unlock(&data->thermal_lock);
- for (i = 0; i < HISI_MAX_SENSORS; i++)
+ for (i = 0; i < HISI_MAX_SENSORS; i++) {
+ if (!data->sensors[i].tzd)
+ continue;
+
thermal_zone_device_update(data->sensors[i].tzd);
+ }
return IRQ_HANDLED;
}
@@ -243,10 +254,11 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
sensor->id = index;
sensor->thermal = data;
- sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, sensor->id,
- sensor, &hisi_of_thermal_ops);
+ sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ sensor->id, sensor, &hisi_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
ret = PTR_ERR(sensor->tzd);
+ sensor->tzd = NULL;
dev_err(&pdev->dev, "failed to register sensor id %d: %d\n",
sensor->id, ret);
return ret;
@@ -331,28 +343,21 @@ static int hisi_thermal_probe(struct platform_device *pdev)
return ret;
}
+ hisi_thermal_enable_bind_irq_sensor(data);
+ irq_get_irqchip_state(data->irq, IRQCHIP_STATE_MASKED,
+ &data->irq_enabled);
+
for (i = 0; i < HISI_MAX_SENSORS; ++i) {
ret = hisi_thermal_register_sensor(pdev, data,
&data->sensors[i], i);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev,
"failed to register thermal sensor: %d\n", ret);
- goto err_get_sensor_data;
- }
+ else
+ hisi_thermal_toggle_sensor(&data->sensors[i], true);
}
- hisi_thermal_enable_bind_irq_sensor(data);
- data->irq_enabled = true;
-
- for (i = 0; i < HISI_MAX_SENSORS; i++)
- hisi_thermal_toggle_sensor(&data->sensors[i], true);
-
return 0;
-
-err_get_sensor_data:
- clk_disable_unprepare(data->clk);
-
- return ret;
}
static int hisi_thermal_remove(struct platform_device *pdev)
@@ -363,8 +368,10 @@ static int hisi_thermal_remove(struct platform_device *pdev)
for (i = 0; i < HISI_MAX_SENSORS; i++) {
struct hisi_thermal_sensor *sensor = &data->sensors[i];
+ if (!sensor->tzd)
+ continue;
+
hisi_thermal_toggle_sensor(sensor, false);
- thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
}
hisi_thermal_disable_sensor(data);
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 36fa724a36c8..42c1ac057bad 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -198,49 +198,33 @@ static struct thermal_zone_device_ops proc_thermal_local_ops = {
.get_temp = proc_thermal_get_zone_temp,
};
-static int proc_thermal_add(struct device *dev,
- struct proc_thermal_device **priv)
+static int proc_thermal_read_ppcc(struct proc_thermal_device *proc_priv)
{
- struct proc_thermal_device *proc_priv;
- struct acpi_device *adev;
+ int i;
acpi_status status;
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *elements, *ppcc;
union acpi_object *p;
- unsigned long long tmp;
- struct thermal_zone_device_ops *ops = NULL;
- int i;
- int ret;
-
- adev = ACPI_COMPANION(dev);
- if (!adev)
- return -ENODEV;
+ int ret = 0;
- status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
+ status = acpi_evaluate_object(proc_priv->adev->handle, "PPCC",
+ NULL, &buf);
if (ACPI_FAILURE(status))
return -ENODEV;
p = buf.pointer;
if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
- dev_err(dev, "Invalid PPCC data\n");
+ dev_err(proc_priv->dev, "Invalid PPCC data\n");
ret = -EFAULT;
goto free_buffer;
}
+
if (!p->package.count) {
- dev_err(dev, "Invalid PPCC package size\n");
+ dev_err(proc_priv->dev, "Invalid PPCC package size\n");
ret = -EFAULT;
goto free_buffer;
}
- proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
- if (!proc_priv) {
- ret = -ENOMEM;
- goto free_buffer;
- }
-
- proc_priv->dev = dev;
- proc_priv->adev = adev;
-
for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
elements = &(p->package.elements[i+1]);
if (elements->type != ACPI_TYPE_PACKAGE ||
@@ -257,12 +241,62 @@ static int proc_thermal_add(struct device *dev,
proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
}
+free_buffer:
+ kfree(buf.pointer);
+
+ return ret;
+}
+
+#define PROC_POWER_CAPABILITY_CHANGED 0x83
+static void proc_thermal_notify(acpi_handle handle, u32 event, void *data)
+{
+ struct proc_thermal_device *proc_priv = data;
+
+ if (!proc_priv)
+ return;
+
+ switch (event) {
+ case PROC_POWER_CAPABILITY_CHANGED:
+ proc_thermal_read_ppcc(proc_priv);
+ int340x_thermal_zone_device_update(proc_priv->int340x_zone);
+ break;
+ default:
+ dev_err(proc_priv->dev, "Unsupported event [0x%x]\n", event);
+ break;
+ }
+}
+
+
+static int proc_thermal_add(struct device *dev,
+ struct proc_thermal_device **priv)
+{
+ struct proc_thermal_device *proc_priv;
+ struct acpi_device *adev;
+ acpi_status status;
+ unsigned long long tmp;
+ struct thermal_zone_device_ops *ops = NULL;
+ int ret;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
+
+ proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
+ if (!proc_priv)
+ return -ENOMEM;
+
+ proc_priv->dev = dev;
+ proc_priv->adev = adev;
*priv = proc_priv;
- ret = sysfs_create_group(&dev->kobj,
- &power_limit_attribute_group);
+ ret = proc_thermal_read_ppcc(proc_priv);
+ if (!ret) {
+ ret = sysfs_create_group(&dev->kobj,
+ &power_limit_attribute_group);
+
+ }
if (ret)
- goto free_buffer;
+ return ret;
status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp);
if (ACPI_FAILURE(status)) {
@@ -274,20 +308,32 @@ static int proc_thermal_add(struct device *dev,
proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
if (IS_ERR(proc_priv->int340x_zone)) {
- sysfs_remove_group(&proc_priv->dev->kobj,
- &power_limit_attribute_group);
ret = PTR_ERR(proc_priv->int340x_zone);
+ goto remove_group;
} else
ret = 0;
-free_buffer:
- kfree(buf.pointer);
+ ret = acpi_install_notify_handler(adev->handle, ACPI_DEVICE_NOTIFY,
+ proc_thermal_notify,
+ (void *)proc_priv);
+ if (ret)
+ goto remove_zone;
+
+ return 0;
+
+remove_zone:
+ int340x_thermal_zone_remove(proc_priv->int340x_zone);
+remove_group:
+ sysfs_remove_group(&proc_priv->dev->kobj,
+ &power_limit_attribute_group);
return ret;
}
static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
{
+ acpi_remove_notify_handler(proc_priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, proc_thermal_notify);
int340x_thermal_zone_remove(proc_priv->int340x_zone);
sysfs_remove_group(&proc_priv->dev->kobj,
&power_limit_attribute_group);
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index 6c79588251d5..015ce2eb6eb7 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -510,12 +510,6 @@ static int start_power_clamp(void)
unsigned long cpu;
struct task_struct *thread;
- /* check if pkg cstate counter is completely 0, abort in this case */
- if (!has_pkg_state_counter()) {
- pr_err("pkg cstate counter not functional, abort\n");
- return -EINVAL;
- }
-
set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
/* prevent cpu hotplug */
get_online_cpus();
@@ -672,35 +666,11 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
.set_cur_state = powerclamp_set_cur_state,
};
-/* runs on Nehalem and later */
static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
- { X86_VENDOR_INTEL, 6, 0x1a},
- { X86_VENDOR_INTEL, 6, 0x1c},
- { X86_VENDOR_INTEL, 6, 0x1e},
- { X86_VENDOR_INTEL, 6, 0x1f},
- { X86_VENDOR_INTEL, 6, 0x25},
- { X86_VENDOR_INTEL, 6, 0x26},
- { X86_VENDOR_INTEL, 6, 0x2a},
- { X86_VENDOR_INTEL, 6, 0x2c},
- { X86_VENDOR_INTEL, 6, 0x2d},
- { X86_VENDOR_INTEL, 6, 0x2e},
- { X86_VENDOR_INTEL, 6, 0x2f},
- { X86_VENDOR_INTEL, 6, 0x37},
- { X86_VENDOR_INTEL, 6, 0x3a},
- { X86_VENDOR_INTEL, 6, 0x3c},
- { X86_VENDOR_INTEL, 6, 0x3d},
- { X86_VENDOR_INTEL, 6, 0x3e},
- { X86_VENDOR_INTEL, 6, 0x3f},
- { X86_VENDOR_INTEL, 6, 0x45},
- { X86_VENDOR_INTEL, 6, 0x46},
- { X86_VENDOR_INTEL, 6, 0x47},
- { X86_VENDOR_INTEL, 6, 0x4c},
- { X86_VENDOR_INTEL, 6, 0x4d},
- { X86_VENDOR_INTEL, 6, 0x4e},
- { X86_VENDOR_INTEL, 6, 0x4f},
- { X86_VENDOR_INTEL, 6, 0x56},
- { X86_VENDOR_INTEL, 6, 0x57},
- { X86_VENDOR_INTEL, 6, 0x5e},
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_ARAT },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_NONSTOP_TSC },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_CONSTANT_TSC},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
@@ -712,11 +682,12 @@ static int __init powerclamp_probe(void)
boot_cpu_data.x86, boot_cpu_data.x86_model);
return -ENODEV;
}
- if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC) ||
- !boot_cpu_has(X86_FEATURE_CONSTANT_TSC) ||
- !boot_cpu_has(X86_FEATURE_MWAIT) ||
- !boot_cpu_has(X86_FEATURE_ARAT))
+
+ /* The goal for idle time alignment is to achieve package cstate. */
+ if (!has_pkg_state_counter()) {
+ pr_info("No package C-state available");
return -ENODEV;
+ }
/* find the deepest mwait value */
find_target_mwait();
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 507632b9648e..262ab0a2266f 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -144,7 +144,6 @@ struct mtk_thermal {
s32 o_slope;
s32 vts[MT8173_NUM_SENSORS];
- struct thermal_zone_device *tzd;
};
struct mtk_thermal_bank_cfg {
@@ -572,16 +571,11 @@ static int mtk_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mt);
- mt->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
- &mtk_thermal_ops);
- if (IS_ERR(mt->tzd))
- goto err_register;
+ devm_thermal_zone_of_sensor_register(&pdev->dev, 0, mt,
+ &mtk_thermal_ops);
return 0;
-err_register:
- clk_disable_unprepare(mt->clk_peri_therm);
-
err_disable_clk_auxadc:
clk_disable_unprepare(mt->clk_auxadc);
@@ -592,8 +586,6 @@ static int mtk_thermal_remove(struct platform_device *pdev)
{
struct mtk_thermal *mt = platform_get_drvdata(pdev);
- thermal_zone_of_sensor_unregister(&pdev->dev, mt->tzd);
-
clk_disable_unprepare(mt->clk_peri_therm);
clk_disable_unprepare(mt->clk_auxadc);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index d8ec44b194d6..b8e509c60848 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -331,6 +331,14 @@ static int of_thermal_set_trip_temp(struct thermal_zone_device *tz, int trip,
if (trip >= data->ntrips || trip < 0)
return -EDOM;
+ if (data->ops->set_trip_temp) {
+ int ret;
+
+ ret = data->ops->set_trip_temp(data->sensor_data, trip, temp);
+ if (ret)
+ return ret;
+ }
+
/* thermal framework should take care of data->mask & (1 << trip) */
data->trips[trip].temperature = temp;
@@ -906,7 +914,7 @@ finish:
return tz;
free_tbps:
- for (i = 0; i < tz->num_tbps; i++)
+ for (i = i - 1; i >= 0; i--)
of_node_put(tz->tbps[i].cooling_device);
kfree(tz->tbps);
free_trips:
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index b677aada5b52..f8a3c60bef94 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -260,7 +260,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
if (ret < 0)
goto fail;
- chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
+ chip->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
&qpnp_tm_sensor_ops);
if (IS_ERR(chip->tz_dev)) {
dev_err(&pdev->dev, "failed to register sensor\n");
@@ -281,7 +281,6 @@ static int qpnp_tm_remove(struct platform_device *pdev)
{
struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev);
- thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev);
if (!IS_ERR(chip->adc))
iio_channel_release(chip->adc);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 82daba09e150..71a339271fa5 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -492,7 +492,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
goto error_unregister;
if (of_data == USE_OF_THERMAL)
- priv->zone = thermal_zone_of_sensor_register(
+ priv->zone = devm_thermal_zone_of_sensor_register(
dev, i, priv,
&rcar_thermal_zone_of_ops);
else
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 233a564442a0..5d491f16a866 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -1,7 +1,5 @@
/*
- * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
- *
- * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
+ * Copyright (c) 2014-2016, Fuzhou Rockchip Electronics Co., Ltd
* Caesar Wang <wxt@rock-chips.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -23,8 +21,10 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/thermal.h>
+#include <linux/mfd/syscon.h>
#include <linux/pinctrl/consumer.h>
/**
@@ -73,7 +73,7 @@ enum adc_sort_mode {
#define SOC_MAX_SENSORS 2
/**
- * struct chip_tsadc_table: hold information about chip-specific differences
+ * struct chip_tsadc_table - hold information about chip-specific differences
* @id: conversion table
* @length: size of conversion table
* @data_mask: mask to apply on data inputs
@@ -86,6 +86,20 @@ struct chip_tsadc_table {
enum adc_sort_mode mode;
};
+/**
+ * struct rockchip_tsadc_chip - hold the private data of tsadc chip
+ * @chn_id[SOC_MAX_SENSORS]: the sensor id of chip correspond to the channel
+ * @chn_num: the channel number of tsadc chip
+ * @tshut_temp: the hardware-controlled shutdown temperature value
+ * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
+ * @initialize: SoC special initialize tsadc controller method
+ * @irq_ack: clear the interrupt
+ * @get_temp: get the temperature
+ * @set_tshut_temp: set the hardware-controlled shutdown temperature
+ * @set_tshut_mode: set the hardware-controlled shutdown mode
+ * @table: the chip-specific conversion table
+ */
struct rockchip_tsadc_chip {
/* The sensor id of chip correspond to the ADC channel */
int chn_id[SOC_MAX_SENSORS];
@@ -97,7 +111,8 @@ struct rockchip_tsadc_chip {
enum tshut_polarity tshut_polarity;
/* Chip-wide methods */
- void (*initialize)(void __iomem *reg, enum tshut_polarity p);
+ void (*initialize)(struct regmap *grf,
+ void __iomem *reg, enum tshut_polarity p);
void (*irq_ack)(void __iomem *reg);
void (*control)(void __iomem *reg, bool on);
@@ -112,12 +127,32 @@ struct rockchip_tsadc_chip {
struct chip_tsadc_table table;
};
+/**
+ * struct rockchip_thermal_sensor - hold the information of thermal sensor
+ * @thermal: pointer to the platform/configuration data
+ * @tzd: pointer to a thermal zone
+ * @id: identifier of the thermal sensor
+ */
struct rockchip_thermal_sensor {
struct rockchip_thermal_data *thermal;
struct thermal_zone_device *tzd;
int id;
};
+/**
+ * struct rockchip_thermal_data - hold the private data of thermal driver
+ * @chip: pointer to the platform/configuration data
+ * @pdev: platform device of thermal
+ * @reset: the reset controller of tsadc
+ * @sensors[SOC_MAX_SENSORS]: the thermal sensor
+ * @clk: the controller clock is divided by the exteral 24MHz
+ * @pclk: the advanced peripherals bus clock
+ * @grf: the general register file will be used to do static set by software
+ * @regs: the base address of tsadc controller
+ * @tshut_temp: the hardware-controlled shutdown temperature value
+ * @tshut_mode: the hardware-controlled shutdown mode (0:CRU 1:GPIO)
+ * @tshut_polarity: the hardware-controlled active polarity (0:LOW 1:HIGH)
+ */
struct rockchip_thermal_data {
const struct rockchip_tsadc_chip *chip;
struct platform_device *pdev;
@@ -128,6 +163,7 @@ struct rockchip_thermal_data {
struct clk *clk;
struct clk *pclk;
+ struct regmap *grf;
void __iomem *regs;
int tshut_temp;
@@ -142,6 +178,7 @@ struct rockchip_thermal_data {
* TSADCV3_* are used for newer SoCs than RK3288. (e.g: RK3228, RK3399)
*
*/
+#define TSADCV2_USER_CON 0x00
#define TSADCV2_AUTO_CON 0x04
#define TSADCV2_INT_EN 0x08
#define TSADCV2_INT_PD 0x0c
@@ -155,12 +192,7 @@ struct rockchip_thermal_data {
#define TSADCV2_AUTO_EN BIT(0)
#define TSADCV2_AUTO_SRC_EN(chn) BIT(4 + (chn))
#define TSADCV2_AUTO_TSHUT_POLARITY_HIGH BIT(8)
-/**
- * TSADCV1_AUTO_Q_SEL_EN:
- * whether select (1024 - tsadc_q) as output
- * 1'b0:use tsadc_q as output(temperature-code is rising sequence)
- * 1'b1:use(1024 - tsadc_q) as output (temperature-code is falling sequence)
- */
+
#define TSADCV3_AUTO_Q_SEL_EN BIT(1)
#define TSADCV2_INT_SRC_EN(chn) BIT(chn)
@@ -177,19 +209,32 @@ struct rockchip_thermal_data {
#define TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT 4
#define TSADCV2_AUTO_PERIOD_TIME 250 /* msec */
#define TSADCV2_AUTO_PERIOD_HT_TIME 50 /* msec */
+#define TSADCV2_USER_INTER_PD_SOC 0x340 /* 13 clocks */
-struct tsadc_table {
- u32 code;
- int temp;
-};
+#define GRF_SARADC_TESTBIT 0x0e644
+#define GRF_TSADC_TESTBIT_L 0x0e648
+#define GRF_TSADC_TESTBIT_H 0x0e64c
+
+#define GRF_TSADC_TSEN_PD_ON (0x30003 << 0)
+#define GRF_TSADC_TSEN_PD_OFF (0x30000 << 0)
+#define GRF_SARADC_TESTBIT_ON (0x10001 << 2)
+#define GRF_TSADC_TESTBIT_H_ON (0x10001 << 2)
/**
+ * struct tsadc_table - code to temperature conversion table
+ * @code: the value of adc channel
+ * @temp: the temperature
* Note:
- * Code to Temperature mapping of the Temperature sensor is a piece wise linear
+ * code to temperature mapping of the temperature sensor is a piece wise linear
* curve.Any temperature, code faling between to 2 give temperatures can be
* linearly interpolated.
- * Code to Temperature mapping should be updated based on sillcon results.
+ * Code to Temperature mapping should be updated based on manufacturer results.
*/
+struct tsadc_table {
+ u32 code;
+ int temp;
+};
+
static const struct tsadc_table rk3228_code_table[] = {
{0, -40000},
{588, -40000},
@@ -308,40 +353,40 @@ static const struct tsadc_table rk3368_code_table[] = {
static const struct tsadc_table rk3399_code_table[] = {
{0, -40000},
- {593, -40000},
- {598, -35000},
- {603, -30000},
- {609, -25000},
- {614, -20000},
- {619, -15000},
- {625, -10000},
- {630, -5000},
- {635, 0},
- {641, 5000},
- {646, 10000},
- {651, 15000},
- {657, 20000},
- {662, 25000},
- {667, 30000},
- {673, 35000},
- {678, 40000},
- {684, 45000},
- {689, 50000},
- {694, 55000},
- {700, 60000},
- {705, 65000},
- {711, 70000},
- {716, 75000},
- {722, 80000},
- {727, 85000},
- {733, 90000},
- {738, 95000},
- {743, 100000},
- {749, 105000},
- {754, 110000},
- {760, 115000},
- {765, 120000},
- {771, 125000},
+ {402, -40000},
+ {410, -35000},
+ {419, -30000},
+ {427, -25000},
+ {436, -20000},
+ {444, -15000},
+ {453, -10000},
+ {461, -5000},
+ {470, 0},
+ {478, 5000},
+ {487, 10000},
+ {496, 15000},
+ {504, 20000},
+ {513, 25000},
+ {521, 30000},
+ {530, 35000},
+ {538, 40000},
+ {547, 45000},
+ {555, 50000},
+ {564, 55000},
+ {573, 60000},
+ {581, 65000},
+ {590, 70000},
+ {599, 75000},
+ {607, 80000},
+ {616, 85000},
+ {624, 90000},
+ {633, 95000},
+ {642, 100000},
+ {650, 105000},
+ {659, 110000},
+ {668, 115000},
+ {677, 120000},
+ {685, 125000},
{TSADCV3_DATA_MASK, 125000},
};
@@ -405,8 +450,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
return -EAGAIN; /* Incorrect reading */
while (low <= high) {
- if (code >= table.id[mid - 1].code &&
- code < table.id[mid].code)
+ if (code <= table.id[mid].code &&
+ code > table.id[mid - 1].code)
break;
else if (code > table.id[mid].code)
low = mid + 1;
@@ -449,7 +494,7 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
* If the temperature is higher than COMP_INT or COMP_SHUT for
* "debounce" times, TSADC controller will generate interrupt or TSHUT.
*/
-static void rk_tsadcv2_initialize(void __iomem *regs,
+static void rk_tsadcv2_initialize(struct regmap *grf, void __iomem *regs,
enum tshut_polarity tshut_polarity)
{
if (tshut_polarity == TSHUT_HIGH_ACTIVE)
@@ -466,6 +511,62 @@ static void rk_tsadcv2_initialize(void __iomem *regs,
regs + TSADCV2_AUTO_PERIOD_HT);
writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
+
+ if (IS_ERR(grf)) {
+ pr_warn("%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+}
+
+/**
+ * rk_tsadcv3_initialize - initialize TASDC Controller.
+ *
+ * (1) The tsadc control power sequence.
+ *
+ * (2) Set TSADC_V2_AUTO_PERIOD:
+ * Configure the interleave between every two accessing of
+ * TSADC in normal operation.
+ *
+ * (2) Set TSADCV2_AUTO_PERIOD_HT:
+ * Configure the interleave between every two accessing of
+ * TSADC after the temperature is higher than COM_SHUT or COM_INT.
+ *
+ * (3) Set TSADCV2_HIGH_INT_DEBOUNCE and TSADC_HIGHT_TSHUT_DEBOUNCE:
+ * If the temperature is higher than COMP_INT or COMP_SHUT for
+ * "debounce" times, TSADC controller will generate interrupt or TSHUT.
+ */
+static void rk_tsadcv3_initialize(struct regmap *grf, void __iomem *regs,
+ enum tshut_polarity tshut_polarity)
+{
+ /* The tsadc control power sequence */
+ if (IS_ERR(grf)) {
+ /* Set interleave value to workround ic time sync issue */
+ writel_relaxed(TSADCV2_USER_INTER_PD_SOC, regs +
+ TSADCV2_USER_CON);
+ } else {
+ regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_ON);
+ mdelay(10);
+ regmap_write(grf, GRF_TSADC_TESTBIT_L, GRF_TSADC_TSEN_PD_OFF);
+ usleep_range(15, 100); /* The spec note says at least 15 us */
+ regmap_write(grf, GRF_SARADC_TESTBIT, GRF_SARADC_TESTBIT_ON);
+ regmap_write(grf, GRF_TSADC_TESTBIT_H, GRF_TSADC_TESTBIT_H_ON);
+ usleep_range(90, 200); /* The spec note says at least 90 us */
+ }
+
+ if (tshut_polarity == TSHUT_HIGH_ACTIVE)
+ writel_relaxed(0U | TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
+ regs + TSADCV2_AUTO_CON);
+ else
+ writel_relaxed(0U & ~TSADCV2_AUTO_TSHUT_POLARITY_HIGH,
+ regs + TSADCV2_AUTO_CON);
+
+ writel_relaxed(TSADCV2_AUTO_PERIOD_TIME, regs + TSADCV2_AUTO_PERIOD);
+ writel_relaxed(TSADCV2_HIGHT_INT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_INT_DEBOUNCE);
+ writel_relaxed(TSADCV2_AUTO_PERIOD_HT_TIME,
+ regs + TSADCV2_AUTO_PERIOD_HT);
+ writel_relaxed(TSADCV2_HIGHT_TSHUT_DEBOUNCE_COUNT,
+ regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
}
static void rk_tsadcv2_irq_ack(void __iomem *regs)
@@ -498,10 +599,11 @@ static void rk_tsadcv2_control(void __iomem *regs, bool enable)
}
/**
- * @rk_tsadcv3_control:
- * TSADC controller works at auto mode, and some SoCs need set the tsadc_q_sel
- * bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output adc value if
- * setting this bit to enable.
+ * rk_tsadcv3_control - the tsadc controller is enabled or disabled.
+ *
+ * NOTE: TSADC controller works at auto mode, and some SoCs need set the
+ * tsadc_q_sel bit on TSADCV2_AUTO_CON[1]. The (1024 - tsadc_q) as output
+ * adc value if setting this bit to enable.
*/
static void rk_tsadcv3_control(void __iomem *regs, bool enable)
{
@@ -603,6 +705,30 @@ static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
},
};
+static const struct rockchip_tsadc_chip rk3366_tsadc_data = {
+ .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+ .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
+ .chn_num = 2, /* two channels for tsadc */
+
+ .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+ .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+ .tshut_temp = 95000,
+
+ .initialize = rk_tsadcv3_initialize,
+ .irq_ack = rk_tsadcv3_irq_ack,
+ .control = rk_tsadcv3_control,
+ .get_temp = rk_tsadcv2_get_temp,
+ .set_tshut_temp = rk_tsadcv2_tshut_temp,
+ .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+ .table = {
+ .id = rk3228_code_table,
+ .length = ARRAY_SIZE(rk3228_code_table),
+ .data_mask = TSADCV3_DATA_MASK,
+ .mode = ADC_INCREMENT,
+ },
+};
+
static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
.chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
.chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
@@ -636,7 +762,7 @@ static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
.tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
.tshut_temp = 95000,
- .initialize = rk_tsadcv2_initialize,
+ .initialize = rk_tsadcv3_initialize,
.irq_ack = rk_tsadcv3_irq_ack,
.control = rk_tsadcv3_control,
.get_temp = rk_tsadcv2_get_temp,
@@ -661,6 +787,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
.data = (void *)&rk3288_tsadc_data,
},
{
+ .compatible = "rockchip,rk3366-tsadc",
+ .data = (void *)&rk3366_tsadc_data,
+ },
+ {
.compatible = "rockchip,rk3368-tsadc",
.data = (void *)&rk3368_tsadc_data,
},
@@ -768,6 +898,11 @@ static int rockchip_configure_from_dt(struct device *dev,
return -EINVAL;
}
+ /* The tsadc wont to handle the error in here since some SoCs didn't
+ * need this property.
+ */
+ thermal->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+
return 0;
}
@@ -786,8 +921,8 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
sensor->thermal = thermal;
sensor->id = id;
- sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, id, sensor,
- &rockchip_of_thermal_ops);
+ sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, id,
+ sensor, &rockchip_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
error = PTR_ERR(sensor->tzd);
dev_err(&pdev->dev, "failed to register sensor %d: %d\n",
@@ -815,7 +950,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct resource *res;
int irq;
- int i, j;
+ int i;
int error;
match = of_match_node(of_rockchip_thermal_match, np);
@@ -888,7 +1023,8 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
goto err_disable_pclk;
}
- thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
+ thermal->chip->initialize(thermal->grf, thermal->regs,
+ thermal->tshut_polarity);
for (i = 0; i < thermal->chip->chn_num; i++) {
error = rockchip_thermal_register_sensor(pdev, thermal,
@@ -898,9 +1034,6 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"failed to register sensor[%d] : error = %d\n",
i, error);
- for (j = 0; j < i; j++)
- thermal_zone_of_sensor_unregister(&pdev->dev,
- thermal->sensors[j].tzd);
goto err_disable_pclk;
}
}
@@ -912,7 +1045,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
if (error) {
dev_err(&pdev->dev,
"failed to request tsadc irq: %d\n", error);
- goto err_unregister_sensor;
+ goto err_disable_pclk;
}
thermal->chip->control(thermal->regs, true);
@@ -924,11 +1057,6 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
return 0;
-err_unregister_sensor:
- while (i--)
- thermal_zone_of_sensor_unregister(&pdev->dev,
- thermal->sensors[i].tzd);
-
err_disable_pclk:
clk_disable_unprepare(thermal->pclk);
err_disable_clk:
@@ -946,7 +1074,6 @@ static int rockchip_thermal_remove(struct platform_device *pdev)
struct rockchip_thermal_sensor *sensor = &thermal->sensors[i];
rockchip_thermal_toggle_sensor(sensor, false);
- thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd);
}
thermal->chip->control(thermal->regs, false);
@@ -988,12 +1115,15 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
return error;
error = clk_enable(thermal->pclk);
- if (error)
+ if (error) {
+ clk_disable(thermal->clk);
return error;
+ }
rockchip_thermal_reset_controller(thermal->reset);
- thermal->chip->initialize(thermal->regs, thermal->tshut_polarity);
+ thermal->chip->initialize(thermal->grf, thermal->regs,
+ thermal->tshut_polarity);
for (i = 0; i < thermal->chip->chn_num; i++) {
int id = thermal->sensors[i].id;
diff --git a/drivers/thermal/tango_thermal.c b/drivers/thermal/tango_thermal.c
new file mode 100644
index 000000000000..70e0d9f406e9
--- /dev/null
+++ b/drivers/thermal/tango_thermal.c
@@ -0,0 +1,109 @@
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/thermal.h>
+#include <linux/platform_device.h>
+
+/*
+ * According to a data sheet draft, "this temperature sensor uses a bandgap
+ * type of circuit to compare a voltage which has a negative temperature
+ * coefficient with a voltage that is proportional to absolute temperature.
+ * A resistor bank allows 41 different temperature thresholds to be selected
+ * and the logic output will then indicate whether the actual die temperature
+ * lies above or below the selected threshold."
+ */
+
+#define TEMPSI_CMD 0
+#define TEMPSI_RES 4
+#define TEMPSI_CFG 8
+
+#define CMD_OFF 0
+#define CMD_ON 1
+#define CMD_READ 2
+
+#define IDX_MIN 15
+#define IDX_MAX 40
+
+struct tango_thermal_priv {
+ void __iomem *base;
+ int thresh_idx;
+};
+
+static bool temp_above_thresh(void __iomem *base, int thresh_idx)
+{
+ writel(CMD_READ | thresh_idx << 8, base + TEMPSI_CMD);
+ usleep_range(10, 20);
+ writel(CMD_READ | thresh_idx << 8, base + TEMPSI_CMD);
+
+ return readl(base + TEMPSI_RES);
+}
+
+static int tango_get_temp(void *arg, int *res)
+{
+ struct tango_thermal_priv *priv = arg;
+ int idx = priv->thresh_idx;
+
+ if (temp_above_thresh(priv->base, idx)) {
+ /* Search upward by incrementing thresh_idx */
+ while (idx < IDX_MAX && temp_above_thresh(priv->base, ++idx))
+ cpu_relax();
+ idx = idx - 1; /* always return lower bound */
+ } else {
+ /* Search downward by decrementing thresh_idx */
+ while (idx > IDX_MIN && !temp_above_thresh(priv->base, --idx))
+ cpu_relax();
+ }
+
+ *res = (idx * 9 / 2 - 38) * 1000; /* millidegrees Celsius */
+ priv->thresh_idx = idx;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops ops = {
+ .get_temp = tango_get_temp,
+};
+
+static int tango_thermal_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct tango_thermal_priv *priv;
+ struct thermal_zone_device *tzdev;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->thresh_idx = IDX_MIN;
+ writel(0, priv->base + TEMPSI_CFG);
+ writel(CMD_ON, priv->base + TEMPSI_CMD);
+
+ tzdev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, priv, &ops);
+ return PTR_ERR_OR_ZERO(tzdev);
+}
+
+static const struct of_device_id tango_sensor_ids[] = {
+ {
+ .compatible = "sigma,smp8758-thermal",
+ },
+ { /* sentinel */ }
+};
+
+static struct platform_driver tango_thermal_driver = {
+ .probe = tango_thermal_probe,
+ .driver = {
+ .name = "tango-thermal",
+ .of_match_table = tango_sensor_ids,
+ },
+};
+
+module_platform_driver(tango_thermal_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sigma Designs");
+MODULE_DESCRIPTION("Tango temperature sensor");
diff --git a/drivers/thermal/tegra/Kconfig b/drivers/thermal/tegra/Kconfig
new file mode 100644
index 000000000000..cec586ec7e4b
--- /dev/null
+++ b/drivers/thermal/tegra/Kconfig
@@ -0,0 +1,13 @@
+menu "NVIDIA Tegra thermal drivers"
+depends on ARCH_TEGRA
+
+config TEGRA_SOCTHERM
+ tristate "Tegra SOCTHERM thermal management"
+ help
+ Enable this option for integrated thermal management support on NVIDIA
+ Tegra systems-on-chip. The driver supports four thermal zones
+ (CPU, GPU, MEM, PLLX). Cooling devices can be bound to the thermal
+ zones to manage temperatures. This option is also required for the
+ emergency thermal reset (thermtrip) feature to function.
+
+endmenu
diff --git a/drivers/thermal/tegra/Makefile b/drivers/thermal/tegra/Makefile
new file mode 100644
index 000000000000..1ce1af2cf0f5
--- /dev/null
+++ b/drivers/thermal/tegra/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_TEGRA_SOCTHERM) += tegra-soctherm.o
+
+tegra-soctherm-y := soctherm.o soctherm-fuse.o
+tegra-soctherm-$(CONFIG_ARCH_TEGRA_124_SOC) += tegra124-soctherm.o
+tegra-soctherm-$(CONFIG_ARCH_TEGRA_132_SOC) += tegra132-soctherm.o
+tegra-soctherm-$(CONFIG_ARCH_TEGRA_210_SOC) += tegra210-soctherm.o
diff --git a/drivers/thermal/tegra/soctherm-fuse.c b/drivers/thermal/tegra/soctherm-fuse.c
new file mode 100644
index 000000000000..29963180c453
--- /dev/null
+++ b/drivers/thermal/tegra/soctherm-fuse.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <soc/tegra/fuse.h>
+
+#include "soctherm.h"
+
+#define NOMINAL_CALIB_FT 105
+#define NOMINAL_CALIB_CP 25
+
+#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK 0x1fff
+#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK (0x1fff << 13)
+#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13
+
+#define FUSE_TSENSOR_COMMON 0x180
+
+/*
+ * Tegra210: Layout of bits in FUSE_TSENSOR_COMMON:
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | BASE_FT | BASE_CP | SHFT_FT | SHIFT_CP |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Tegra12x, etc:
+ * In chips prior to Tegra210, this fuse was incorrectly sized as 26 bits,
+ * and didn't hold SHIFT_CP in [31:26]. Therefore these missing six bits
+ * were obtained via the FUSE_SPARE_REALIGNMENT_REG register [5:0].
+ *
+ * FUSE_TSENSOR_COMMON:
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |-----------| SHFT_FT | BASE_FT | BASE_CP |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * FUSE_SPARE_REALIGNMENT_REG:
+ * 3 2 1 0
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |---------------------------------------------------| SHIFT_CP |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+#define CALIB_COEFFICIENT 1000000LL
+
+/**
+ * div64_s64_precise() - wrapper for div64_s64()
+ * @a: the dividend
+ * @b: the divisor
+ *
+ * Implements division with fairly accurate rounding instead of truncation by
+ * shifting the dividend to the left by 16 so that the quotient has a
+ * much higher precision.
+ *
+ * Return: the quotient of a / b.
+ */
+static s64 div64_s64_precise(s64 a, s32 b)
+{
+ s64 r, al;
+
+ /* Scale up for increased precision division */
+ al = a << 16;
+
+ r = div64_s64(al * 2 + 1, 2 * b);
+ return r >> 16;
+}
+
+int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
+ struct tsensor_shared_calib *shared)
+{
+ u32 val;
+ s32 shifted_cp, shifted_ft;
+ int err;
+
+ err = tegra_fuse_readl(FUSE_TSENSOR_COMMON, &val);
+ if (err)
+ return err;
+
+ shared->base_cp = (val & tfuse->fuse_base_cp_mask) >>
+ tfuse->fuse_base_cp_shift;
+ shared->base_ft = (val & tfuse->fuse_base_ft_mask) >>
+ tfuse->fuse_base_ft_shift;
+
+ shifted_ft = (val & tfuse->fuse_shift_ft_mask) >>
+ tfuse->fuse_shift_ft_shift;
+ shifted_ft = sign_extend32(shifted_ft, 4);
+
+ if (tfuse->fuse_spare_realignment) {
+ err = tegra_fuse_readl(tfuse->fuse_spare_realignment, &val);
+ if (err)
+ return err;
+ }
+
+ shifted_cp = sign_extend32(val, 5);
+
+ shared->actual_temp_cp = 2 * NOMINAL_CALIB_CP + shifted_cp;
+ shared->actual_temp_ft = 2 * NOMINAL_CALIB_FT + shifted_ft;
+
+ return 0;
+}
+
+int tegra_calc_tsensor_calib(const struct tegra_tsensor *sensor,
+ const struct tsensor_shared_calib *shared,
+ u32 *calibration)
+{
+ const struct tegra_tsensor_group *sensor_group;
+ u32 val, calib;
+ s32 actual_tsensor_ft, actual_tsensor_cp;
+ s32 delta_sens, delta_temp;
+ s32 mult, div;
+ s16 therma, thermb;
+ s64 temp;
+ int err;
+
+ sensor_group = sensor->group;
+
+ err = tegra_fuse_readl(sensor->calib_fuse_offset, &val);
+ if (err)
+ return err;
+
+ actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12);
+ val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK) >>
+ FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT;
+ actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12);
+
+ delta_sens = actual_tsensor_ft - actual_tsensor_cp;
+ delta_temp = shared->actual_temp_ft - shared->actual_temp_cp;
+
+ mult = sensor_group->pdiv * sensor->config->tsample_ate;
+ div = sensor->config->tsample * sensor_group->pdiv_ate;
+
+ temp = (s64)delta_temp * (1LL << 13) * mult;
+ therma = div64_s64_precise(temp, (s64)delta_sens * div);
+
+ temp = ((s64)actual_tsensor_ft * shared->actual_temp_cp) -
+ ((s64)actual_tsensor_cp * shared->actual_temp_ft);
+ thermb = div64_s64_precise(temp, delta_sens);
+
+ temp = (s64)therma * sensor->fuse_corr_alpha;
+ therma = div64_s64_precise(temp, CALIB_COEFFICIENT);
+
+ temp = (s64)thermb * sensor->fuse_corr_alpha + sensor->fuse_corr_beta;
+ thermb = div64_s64_precise(temp, CALIB_COEFFICIENT);
+
+ calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) |
+ ((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT);
+
+ *calibration = calib;
+
+ return 0;
+}
+
+MODULE_AUTHOR("Wei Ni <wni@nvidia.com>");
+MODULE_DESCRIPTION("Tegra SOCTHERM fuse management");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
new file mode 100644
index 000000000000..b8651726201e
--- /dev/null
+++ b/drivers/thermal/tegra/soctherm.c
@@ -0,0 +1,685 @@
+/*
+ * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author:
+ * Mikko Perttunen <mperttunen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define SENSOR_CONFIG0 0
+#define SENSOR_CONFIG0_STOP BIT(0)
+#define SENSOR_CONFIG0_CPTR_OVER BIT(2)
+#define SENSOR_CONFIG0_OVER BIT(3)
+#define SENSOR_CONFIG0_TCALC_OVER BIT(4)
+#define SENSOR_CONFIG0_TALL_MASK (0xfffff << 8)
+#define SENSOR_CONFIG0_TALL_SHIFT 8
+
+#define SENSOR_CONFIG1 4
+#define SENSOR_CONFIG1_TSAMPLE_MASK 0x3ff
+#define SENSOR_CONFIG1_TSAMPLE_SHIFT 0
+#define SENSOR_CONFIG1_TIDDQ_EN_MASK (0x3f << 15)
+#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15
+#define SENSOR_CONFIG1_TEN_COUNT_MASK (0x3f << 24)
+#define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24
+#define SENSOR_CONFIG1_TEMP_ENABLE BIT(31)
+
+/*
+ * SENSOR_CONFIG2 is defined in soctherm.h
+ * because, it will be used by tegra_soctherm_fuse.c
+ */
+
+#define SENSOR_STATUS0 0xc
+#define SENSOR_STATUS0_VALID_MASK BIT(31)
+#define SENSOR_STATUS0_CAPTURE_MASK 0xffff
+
+#define SENSOR_STATUS1 0x10
+#define SENSOR_STATUS1_TEMP_VALID_MASK BIT(31)
+#define SENSOR_STATUS1_TEMP_MASK 0xffff
+
+#define READBACK_VALUE_MASK 0xff00
+#define READBACK_VALUE_SHIFT 8
+#define READBACK_ADD_HALF BIT(7)
+#define READBACK_NEGATE BIT(0)
+
+/* get val from register(r) mask bits(m) */
+#define REG_GET_MASK(r, m) (((r) & (m)) >> (ffs(m) - 1))
+/* set val(v) to mask bits(m) of register(r) */
+#define REG_SET_MASK(r, m, v) (((r) & ~(m)) | \
+ (((v) & (m >> (ffs(m) - 1))) << (ffs(m) - 1)))
+
+static const int min_low_temp = -127000;
+static const int max_high_temp = 127000;
+
+struct tegra_thermctl_zone {
+ void __iomem *reg;
+ struct device *dev;
+ struct thermal_zone_device *tz;
+ const struct tegra_tsensor_group *sg;
+};
+
+struct tegra_soctherm {
+ struct reset_control *reset;
+ struct clk *clock_tsensor;
+ struct clk *clock_soctherm;
+ void __iomem *regs;
+ struct thermal_zone_device **thermctl_tzs;
+
+ u32 *calib;
+ struct tegra_soctherm_soc *soc;
+
+ struct dentry *debugfs_dir;
+};
+
+static void enable_tsensor(struct tegra_soctherm *tegra, unsigned int i)
+{
+ const struct tegra_tsensor *sensor = &tegra->soc->tsensors[i];
+ void __iomem *base = tegra->regs + sensor->base;
+ unsigned int val;
+
+ val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
+ writel(val, base + SENSOR_CONFIG0);
+
+ val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
+ val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
+ val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
+ val |= SENSOR_CONFIG1_TEMP_ENABLE;
+ writel(val, base + SENSOR_CONFIG1);
+
+ writel(tegra->calib[i], base + SENSOR_CONFIG2);
+}
+
+/*
+ * Translate from soctherm readback format to millicelsius.
+ * The soctherm readback format in bits is as follows:
+ * TTTTTTTT H______N
+ * where T's contain the temperature in Celsius,
+ * H denotes an addition of 0.5 Celsius and N denotes negation
+ * of the final value.
+ */
+static int translate_temp(u16 val)
+{
+ int t;
+
+ t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
+ if (val & READBACK_ADD_HALF)
+ t += 500;
+ if (val & READBACK_NEGATE)
+ t *= -1;
+
+ return t;
+}
+
+static int tegra_thermctl_get_temp(void *data, int *out_temp)
+{
+ struct tegra_thermctl_zone *zone = data;
+ u32 val;
+
+ val = readl(zone->reg);
+ val = REG_GET_MASK(val, zone->sg->sensor_temp_mask);
+ *out_temp = translate_temp(val);
+
+ return 0;
+}
+
+static int
+thermtrip_program(struct device *dev, const struct tegra_tsensor_group *sg,
+ int trip_temp);
+
+static int tegra_thermctl_set_trip_temp(void *data, int trip, int temp)
+{
+ struct tegra_thermctl_zone *zone = data;
+ struct thermal_zone_device *tz = zone->tz;
+ const struct tegra_tsensor_group *sg = zone->sg;
+ struct device *dev = zone->dev;
+ enum thermal_trip_type type;
+ int ret;
+
+ if (!tz)
+ return -EINVAL;
+
+ ret = tz->ops->get_trip_type(tz, trip, &type);
+ if (ret)
+ return ret;
+
+ if (type != THERMAL_TRIP_CRITICAL)
+ return 0;
+
+ return thermtrip_program(dev, sg, temp);
+}
+
+static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
+ .get_temp = tegra_thermctl_get_temp,
+ .set_trip_temp = tegra_thermctl_set_trip_temp,
+};
+
+/**
+ * enforce_temp_range() - check and enforce temperature range [min, max]
+ * @trip_temp: the trip temperature to check
+ *
+ * Checks and enforces the permitted temperature range that SOC_THERM
+ * HW can support This is
+ * done while taking care of precision.
+ *
+ * Return: The precision adjusted capped temperature in millicelsius.
+ */
+static int enforce_temp_range(struct device *dev, int trip_temp)
+{
+ int temp;
+
+ temp = clamp_val(trip_temp, min_low_temp, max_high_temp);
+ if (temp != trip_temp)
+ dev_info(dev, "soctherm: trip temperature %d forced to %d\n",
+ trip_temp, temp);
+ return temp;
+}
+
+/**
+ * thermtrip_program() - Configures the hardware to shut down the
+ * system if a given sensor group reaches a given temperature
+ * @dev: ptr to the struct device for the SOC_THERM IP block
+ * @sg: pointer to the sensor group to set the thermtrip temperature for
+ * @trip_temp: the temperature in millicelsius to trigger the thermal trip at
+ *
+ * Sets the thermal trip threshold of the given sensor group to be the
+ * @trip_temp. If this threshold is crossed, the hardware will shut
+ * down.
+ *
+ * Note that, although @trip_temp is specified in millicelsius, the
+ * hardware is programmed in degrees Celsius.
+ *
+ * Return: 0 upon success, or %-EINVAL upon failure.
+ */
+static int thermtrip_program(struct device *dev,
+ const struct tegra_tsensor_group *sg,
+ int trip_temp)
+{
+ struct tegra_soctherm *ts = dev_get_drvdata(dev);
+ int temp;
+ u32 r;
+
+ if (!sg || !sg->thermtrip_threshold_mask)
+ return -EINVAL;
+
+ temp = enforce_temp_range(dev, trip_temp) / ts->soc->thresh_grain;
+
+ r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
+ r = REG_SET_MASK(r, sg->thermtrip_threshold_mask, temp);
+ r = REG_SET_MASK(r, sg->thermtrip_enable_mask, 1);
+ r = REG_SET_MASK(r, sg->thermtrip_any_en_mask, 0);
+ writel(r, ts->regs + THERMCTL_THERMTRIP_CTL);
+
+ return 0;
+}
+
+/**
+ * tegra_soctherm_set_hwtrips() - set HW trip point from DT data
+ * @dev: struct device * of the SOC_THERM instance
+ *
+ * Configure the SOC_THERM HW trip points, setting "THERMTRIP"
+ * trip points , using "critical" type trip_temp from thermal
+ * zone.
+ * After they have been configured, THERMTRIP will take action
+ * when the configured SoC thermal sensor group reaches a
+ * certain temperature.
+ *
+ * Return: 0 upon success, or a negative error code on failure.
+ * "Success" does not mean that trips was enabled; it could also
+ * mean that no node was found in DT.
+ * THERMTRIP has been enabled successfully when a message similar to
+ * this one appears on the serial console:
+ * "thermtrip: will shut down when sensor group XXX reaches YYYYYY mC"
+ */
+static int tegra_soctherm_set_hwtrips(struct device *dev,
+ const struct tegra_tsensor_group *sg,
+ struct thermal_zone_device *tz)
+{
+ int temperature;
+ int ret;
+
+ ret = tz->ops->get_crit_temp(tz, &temperature);
+ if (ret) {
+ dev_warn(dev, "thermtrip: %s: missing critical temperature\n",
+ sg->name);
+ return ret;
+ }
+
+ ret = thermtrip_program(dev, sg, temperature);
+ if (ret) {
+ dev_err(dev, "thermtrip: %s: error during enable\n",
+ sg->name);
+ return ret;
+ }
+
+ dev_info(dev,
+ "thermtrip: will shut down when %s reaches %d mC\n",
+ sg->name, temperature);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int regs_show(struct seq_file *s, void *data)
+{
+ struct platform_device *pdev = s->private;
+ struct tegra_soctherm *ts = platform_get_drvdata(pdev);
+ const struct tegra_tsensor *tsensors = ts->soc->tsensors;
+ const struct tegra_tsensor_group **ttgs = ts->soc->ttgs;
+ u32 r, state;
+ int i;
+
+ seq_puts(s, "-----TSENSE (convert HW)-----\n");
+
+ for (i = 0; i < ts->soc->num_tsensors; i++) {
+ r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG1);
+ state = REG_GET_MASK(r, SENSOR_CONFIG1_TEMP_ENABLE);
+
+ seq_printf(s, "%s: ", tsensors[i].name);
+ seq_printf(s, "En(%d) ", state);
+
+ if (!state) {
+ seq_puts(s, "\n");
+ continue;
+ }
+
+ state = REG_GET_MASK(r, SENSOR_CONFIG1_TIDDQ_EN_MASK);
+ seq_printf(s, "tiddq(%d) ", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG1_TEN_COUNT_MASK);
+ seq_printf(s, "ten_count(%d) ", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG1_TSAMPLE_MASK);
+ seq_printf(s, "tsample(%d) ", state + 1);
+
+ r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS1);
+ state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_VALID_MASK);
+ seq_printf(s, "Temp(%d/", state);
+ state = REG_GET_MASK(r, SENSOR_STATUS1_TEMP_MASK);
+ seq_printf(s, "%d) ", translate_temp(state));
+
+ r = readl(ts->regs + tsensors[i].base + SENSOR_STATUS0);
+ state = REG_GET_MASK(r, SENSOR_STATUS0_VALID_MASK);
+ seq_printf(s, "Capture(%d/", state);
+ state = REG_GET_MASK(r, SENSOR_STATUS0_CAPTURE_MASK);
+ seq_printf(s, "%d) ", state);
+
+ r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG0);
+ state = REG_GET_MASK(r, SENSOR_CONFIG0_STOP);
+ seq_printf(s, "Stop(%d) ", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG0_TALL_MASK);
+ seq_printf(s, "Tall(%d) ", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG0_TCALC_OVER);
+ seq_printf(s, "Over(%d/", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG0_OVER);
+ seq_printf(s, "%d/", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG0_CPTR_OVER);
+ seq_printf(s, "%d) ", state);
+
+ r = readl(ts->regs + tsensors[i].base + SENSOR_CONFIG2);
+ state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMA_MASK);
+ seq_printf(s, "Therm_A/B(%d/", state);
+ state = REG_GET_MASK(r, SENSOR_CONFIG2_THERMB_MASK);
+ seq_printf(s, "%d)\n", (s16)state);
+ }
+
+ r = readl(ts->regs + SENSOR_PDIV);
+ seq_printf(s, "PDIV: 0x%x\n", r);
+
+ r = readl(ts->regs + SENSOR_HOTSPOT_OFF);
+ seq_printf(s, "HOTSPOT: 0x%x\n", r);
+
+ seq_puts(s, "\n");
+ seq_puts(s, "-----SOC_THERM-----\n");
+
+ r = readl(ts->regs + SENSOR_TEMP1);
+ state = REG_GET_MASK(r, SENSOR_TEMP1_CPU_TEMP_MASK);
+ seq_printf(s, "Temperatures: CPU(%d) ", translate_temp(state));
+ state = REG_GET_MASK(r, SENSOR_TEMP1_GPU_TEMP_MASK);
+ seq_printf(s, " GPU(%d) ", translate_temp(state));
+ r = readl(ts->regs + SENSOR_TEMP2);
+ state = REG_GET_MASK(r, SENSOR_TEMP2_PLLX_TEMP_MASK);
+ seq_printf(s, " PLLX(%d) ", translate_temp(state));
+ state = REG_GET_MASK(r, SENSOR_TEMP2_MEM_TEMP_MASK);
+ seq_printf(s, " MEM(%d)\n", translate_temp(state));
+
+ r = readl(ts->regs + THERMCTL_THERMTRIP_CTL);
+ state = REG_GET_MASK(r, ttgs[0]->thermtrip_any_en_mask);
+ seq_printf(s, "Thermtrip Any En(%d)\n", state);
+ for (i = 0; i < ts->soc->num_ttgs; i++) {
+ state = REG_GET_MASK(r, ttgs[i]->thermtrip_enable_mask);
+ seq_printf(s, " %s En(%d) ", ttgs[i]->name, state);
+ state = REG_GET_MASK(r, ttgs[i]->thermtrip_threshold_mask);
+ state *= ts->soc->thresh_grain;
+ seq_printf(s, "Thresh(%d)\n", state);
+ }
+
+ return 0;
+}
+
+static int regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, regs_show, inode->i_private);
+}
+
+static const struct file_operations regs_fops = {
+ .open = regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void soctherm_debug_init(struct platform_device *pdev)
+{
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+ struct dentry *root, *file;
+
+ root = debugfs_create_dir("soctherm", NULL);
+ if (!root) {
+ dev_err(&pdev->dev, "failed to create debugfs directory\n");
+ return;
+ }
+
+ tegra->debugfs_dir = root;
+
+ file = debugfs_create_file("reg_contents", 0644, root,
+ pdev, &regs_fops);
+ if (!file) {
+ dev_err(&pdev->dev, "failed to create debugfs file\n");
+ debugfs_remove_recursive(tegra->debugfs_dir);
+ tegra->debugfs_dir = NULL;
+ }
+}
+#else
+static inline void soctherm_debug_init(struct platform_device *pdev) {}
+#endif
+
+static int soctherm_clk_enable(struct platform_device *pdev, bool enable)
+{
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+ int err;
+
+ if (!tegra->clock_soctherm || !tegra->clock_tsensor)
+ return -EINVAL;
+
+ reset_control_assert(tegra->reset);
+
+ if (enable) {
+ err = clk_prepare_enable(tegra->clock_soctherm);
+ if (err) {
+ reset_control_deassert(tegra->reset);
+ return err;
+ }
+
+ err = clk_prepare_enable(tegra->clock_tsensor);
+ if (err) {
+ clk_disable_unprepare(tegra->clock_soctherm);
+ reset_control_deassert(tegra->reset);
+ return err;
+ }
+ } else {
+ clk_disable_unprepare(tegra->clock_tsensor);
+ clk_disable_unprepare(tegra->clock_soctherm);
+ }
+
+ reset_control_deassert(tegra->reset);
+
+ return 0;
+}
+
+static void soctherm_init(struct platform_device *pdev)
+{
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+ const struct tegra_tsensor_group **ttgs = tegra->soc->ttgs;
+ int i;
+ u32 pdiv, hotspot;
+
+ /* Initialize raw sensors */
+ for (i = 0; i < tegra->soc->num_tsensors; ++i)
+ enable_tsensor(tegra, i);
+
+ /* program pdiv and hotspot offsets per THERM */
+ pdiv = readl(tegra->regs + SENSOR_PDIV);
+ hotspot = readl(tegra->regs + SENSOR_HOTSPOT_OFF);
+ for (i = 0; i < tegra->soc->num_ttgs; ++i) {
+ pdiv = REG_SET_MASK(pdiv, ttgs[i]->pdiv_mask,
+ ttgs[i]->pdiv);
+ /* hotspot offset from PLLX, doesn't need to configure PLLX */
+ if (ttgs[i]->id == TEGRA124_SOCTHERM_SENSOR_PLLX)
+ continue;
+ hotspot = REG_SET_MASK(hotspot,
+ ttgs[i]->pllx_hotspot_mask,
+ ttgs[i]->pllx_hotspot_diff);
+ }
+ writel(pdiv, tegra->regs + SENSOR_PDIV);
+ writel(hotspot, tegra->regs + SENSOR_HOTSPOT_OFF);
+}
+
+static const struct of_device_id tegra_soctherm_of_match[] = {
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+ {
+ .compatible = "nvidia,tegra124-soctherm",
+ .data = &tegra124_soctherm,
+ },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+ {
+ .compatible = "nvidia,tegra132-soctherm",
+ .data = &tegra132_soctherm,
+ },
+#endif
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+ {
+ .compatible = "nvidia,tegra210-soctherm",
+ .data = &tegra210_soctherm,
+ },
+#endif
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
+
+static int tegra_soctherm_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct tegra_soctherm *tegra;
+ struct thermal_zone_device *z;
+ struct tsensor_shared_calib shared_calib;
+ struct resource *res;
+ struct tegra_soctherm_soc *soc;
+ unsigned int i;
+ int err;
+
+ match = of_match_node(tegra_soctherm_of_match, pdev->dev.of_node);
+ if (!match)
+ return -ENODEV;
+
+ soc = (struct tegra_soctherm_soc *)match->data;
+ if (soc->num_ttgs > TEGRA124_SOCTHERM_SENSOR_NUM)
+ return -EINVAL;
+
+ tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+ if (!tegra)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, tegra);
+
+ tegra->soc = soc;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tegra->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tegra->regs))
+ return PTR_ERR(tegra->regs);
+
+ tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
+ if (IS_ERR(tegra->reset)) {
+ dev_err(&pdev->dev, "can't get soctherm reset\n");
+ return PTR_ERR(tegra->reset);
+ }
+
+ tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
+ if (IS_ERR(tegra->clock_tsensor)) {
+ dev_err(&pdev->dev, "can't get tsensor clock\n");
+ return PTR_ERR(tegra->clock_tsensor);
+ }
+
+ tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
+ if (IS_ERR(tegra->clock_soctherm)) {
+ dev_err(&pdev->dev, "can't get soctherm clock\n");
+ return PTR_ERR(tegra->clock_soctherm);
+ }
+
+ tegra->calib = devm_kzalloc(&pdev->dev,
+ sizeof(u32) * soc->num_tsensors,
+ GFP_KERNEL);
+ if (!tegra->calib)
+ return -ENOMEM;
+
+ /* calculate shared calibration data */
+ err = tegra_calc_shared_calib(soc->tfuse, &shared_calib);
+ if (err)
+ return err;
+
+ /* calculate tsensor calibaration data */
+ for (i = 0; i < soc->num_tsensors; ++i) {
+ err = tegra_calc_tsensor_calib(&soc->tsensors[i],
+ &shared_calib,
+ &tegra->calib[i]);
+ if (err)
+ return err;
+ }
+
+ tegra->thermctl_tzs = devm_kzalloc(&pdev->dev,
+ sizeof(*z) * soc->num_ttgs,
+ GFP_KERNEL);
+ if (!tegra->thermctl_tzs)
+ return -ENOMEM;
+
+ err = soctherm_clk_enable(pdev, true);
+ if (err)
+ return err;
+
+ soctherm_init(pdev);
+
+ for (i = 0; i < soc->num_ttgs; ++i) {
+ struct tegra_thermctl_zone *zone =
+ devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
+ if (!zone) {
+ err = -ENOMEM;
+ goto disable_clocks;
+ }
+
+ zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset;
+ zone->dev = &pdev->dev;
+ zone->sg = soc->ttgs[i];
+
+ z = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ soc->ttgs[i]->id, zone,
+ &tegra_of_thermal_ops);
+ if (IS_ERR(z)) {
+ err = PTR_ERR(z);
+ dev_err(&pdev->dev, "failed to register sensor: %d\n",
+ err);
+ goto disable_clocks;
+ }
+
+ zone->tz = z;
+ tegra->thermctl_tzs[soc->ttgs[i]->id] = z;
+
+ /* Configure hw trip points */
+ tegra_soctherm_set_hwtrips(&pdev->dev, soc->ttgs[i], z);
+ }
+
+ soctherm_debug_init(pdev);
+
+ return 0;
+
+disable_clocks:
+ soctherm_clk_enable(pdev, false);
+
+ return err;
+}
+
+static int tegra_soctherm_remove(struct platform_device *pdev)
+{
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(tegra->debugfs_dir);
+
+ soctherm_clk_enable(pdev, false);
+
+ return 0;
+}
+
+static int __maybe_unused soctherm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ soctherm_clk_enable(pdev, false);
+
+ return 0;
+}
+
+static int __maybe_unused soctherm_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
+ struct tegra_soctherm_soc *soc = tegra->soc;
+ int err, i;
+
+ err = soctherm_clk_enable(pdev, true);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Resume failed: enable clocks failed\n");
+ return err;
+ }
+
+ soctherm_init(pdev);
+
+ for (i = 0; i < soc->num_ttgs; ++i) {
+ struct thermal_zone_device *tz;
+
+ tz = tegra->thermctl_tzs[soc->ttgs[i]->id];
+ tegra_soctherm_set_hwtrips(dev, soc->ttgs[i], tz);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tegra_soctherm_pm, soctherm_suspend, soctherm_resume);
+
+static struct platform_driver tegra_soctherm_driver = {
+ .probe = tegra_soctherm_probe,
+ .remove = tegra_soctherm_remove,
+ .driver = {
+ .name = "tegra_soctherm",
+ .pm = &tegra_soctherm_pm,
+ .of_match_table = tegra_soctherm_of_match,
+ },
+};
+module_platform_driver(tegra_soctherm_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/tegra/soctherm.h b/drivers/thermal/tegra/soctherm.h
new file mode 100644
index 000000000000..28e18ec4b4c3
--- /dev/null
+++ b/drivers/thermal/tegra/soctherm.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DRIVERS_THERMAL_TEGRA_SOCTHERM_H
+#define __DRIVERS_THERMAL_TEGRA_SOCTHERM_H
+
+#define SENSOR_CONFIG2 8
+#define SENSOR_CONFIG2_THERMA_MASK (0xffff << 16)
+#define SENSOR_CONFIG2_THERMA_SHIFT 16
+#define SENSOR_CONFIG2_THERMB_MASK 0xffff
+#define SENSOR_CONFIG2_THERMB_SHIFT 0
+
+#define THERMCTL_THERMTRIP_CTL 0x80
+/* BITs are defined in device file */
+
+#define SENSOR_PDIV 0x1c0
+#define SENSOR_PDIV_CPU_MASK (0xf << 12)
+#define SENSOR_PDIV_GPU_MASK (0xf << 8)
+#define SENSOR_PDIV_MEM_MASK (0xf << 4)
+#define SENSOR_PDIV_PLLX_MASK (0xf << 0)
+
+#define SENSOR_HOTSPOT_OFF 0x1c4
+#define SENSOR_HOTSPOT_CPU_MASK (0xff << 16)
+#define SENSOR_HOTSPOT_GPU_MASK (0xff << 8)
+#define SENSOR_HOTSPOT_MEM_MASK (0xff << 0)
+
+#define SENSOR_TEMP1 0x1c8
+#define SENSOR_TEMP1_CPU_TEMP_MASK (0xffff << 16)
+#define SENSOR_TEMP1_GPU_TEMP_MASK 0xffff
+#define SENSOR_TEMP2 0x1cc
+#define SENSOR_TEMP2_MEM_TEMP_MASK (0xffff << 16)
+#define SENSOR_TEMP2_PLLX_TEMP_MASK 0xffff
+
+/**
+ * struct tegra_tsensor_group - SOC_THERM sensor group data
+ * @name: short name of the temperature sensor group
+ * @id: numeric ID of the temperature sensor group
+ * @sensor_temp_offset: offset of the SENSOR_TEMP* register
+ * @sensor_temp_mask: bit mask for this sensor group in SENSOR_TEMP* register
+ * @pdiv: the sensor count post-divider to use during runtime
+ * @pdiv_ate: the sensor count post-divider used during automated test
+ * @pdiv_mask: register bitfield mask for the PDIV field for this sensor
+ * @pllx_hotspot_diff: hotspot offset from the PLLX sensor, must be 0 for
+ PLLX sensor group
+ * @pllx_hotspot_mask: register bitfield mask for the HOTSPOT field
+ */
+struct tegra_tsensor_group {
+ const char *name;
+ u8 id;
+ u16 sensor_temp_offset;
+ u32 sensor_temp_mask;
+ u32 pdiv, pdiv_ate, pdiv_mask;
+ u32 pllx_hotspot_diff, pllx_hotspot_mask;
+ u32 thermtrip_enable_mask;
+ u32 thermtrip_any_en_mask;
+ u32 thermtrip_threshold_mask;
+};
+
+struct tegra_tsensor_configuration {
+ u32 tall, tiddq_en, ten_count, pdiv, pdiv_ate, tsample, tsample_ate;
+};
+
+struct tegra_tsensor {
+ const char *name;
+ const u32 base;
+ const struct tegra_tsensor_configuration *config;
+ const u32 calib_fuse_offset;
+ /*
+ * Correction values used to modify values read from
+ * calibration fuses
+ */
+ const s32 fuse_corr_alpha, fuse_corr_beta;
+ const struct tegra_tsensor_group *group;
+};
+
+struct tegra_soctherm_fuse {
+ u32 fuse_base_cp_mask, fuse_base_cp_shift;
+ u32 fuse_base_ft_mask, fuse_base_ft_shift;
+ u32 fuse_shift_ft_mask, fuse_shift_ft_shift;
+ u32 fuse_spare_realignment;
+};
+
+struct tsensor_shared_calib {
+ u32 base_cp, base_ft;
+ u32 actual_temp_cp, actual_temp_ft;
+};
+
+struct tegra_soctherm_soc {
+ const struct tegra_tsensor *tsensors;
+ const unsigned int num_tsensors;
+ const struct tegra_tsensor_group **ttgs;
+ const unsigned int num_ttgs;
+ const struct tegra_soctherm_fuse *tfuse;
+ const int thresh_grain;
+};
+
+int tegra_calc_shared_calib(const struct tegra_soctherm_fuse *tfuse,
+ struct tsensor_shared_calib *shared);
+int tegra_calc_tsensor_calib(const struct tegra_tsensor *sensor,
+ const struct tsensor_shared_calib *shared,
+ u32 *calib);
+
+#ifdef CONFIG_ARCH_TEGRA_124_SOC
+extern const struct tegra_soctherm_soc tegra124_soctherm;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_132_SOC
+extern const struct tegra_soctherm_soc tegra132_soctherm;
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_210_SOC
+extern const struct tegra_soctherm_soc tegra210_soctherm;
+#endif
+
+#endif
+
diff --git a/drivers/thermal/tegra/tegra124-soctherm.c b/drivers/thermal/tegra/tegra124-soctherm.c
new file mode 100644
index 000000000000..beb9d36b9c8a
--- /dev/null
+++ b/drivers/thermal/tegra/tegra124-soctherm.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define TEGRA124_THERMTRIP_ANY_EN_MASK (0x1 << 28)
+#define TEGRA124_THERMTRIP_MEM_EN_MASK (0x1 << 27)
+#define TEGRA124_THERMTRIP_GPU_EN_MASK (0x1 << 26)
+#define TEGRA124_THERMTRIP_CPU_EN_MASK (0x1 << 25)
+#define TEGRA124_THERMTRIP_TSENSE_EN_MASK (0x1 << 24)
+#define TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK (0xff << 16)
+#define TEGRA124_THERMTRIP_CPU_THRESH_MASK (0xff << 8)
+#define TEGRA124_THERMTRIP_TSENSE_THRESH_MASK 0xff
+
+#define TEGRA124_THRESH_GRAIN 1000
+
+static const struct tegra_tsensor_configuration tegra124_tsensor_config = {
+ .tall = 16300,
+ .tiddq_en = 1,
+ .ten_count = 1,
+ .tsample = 120,
+ .tsample_ate = 480,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_cpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_CPU,
+ .name = "cpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_CPU_MASK,
+ .pllx_hotspot_diff = 10,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
+ .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA124_THERMTRIP_CPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA124_THERMTRIP_CPU_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_gpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_GPU,
+ .name = "gpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_GPU_MASK,
+ .pllx_hotspot_diff = 5,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
+ .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA124_THERMTRIP_GPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_pll = {
+ .id = TEGRA124_SOCTHERM_SENSOR_PLLX,
+ .name = "pll",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_PLLX_MASK,
+ .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA124_THERMTRIP_TSENSE_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA124_THERMTRIP_TSENSE_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra124_tsensor_group_mem = {
+ .id = TEGRA124_SOCTHERM_SENSOR_MEM,
+ .name = "mem",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_MEM_MASK,
+ .pllx_hotspot_diff = 0,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
+ .thermtrip_any_en_mask = TEGRA124_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA124_THERMTRIP_MEM_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA124_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group *tegra124_tsensor_groups[] = {
+ &tegra124_tsensor_group_cpu,
+ &tegra124_tsensor_group_gpu,
+ &tegra124_tsensor_group_pll,
+ &tegra124_tsensor_group_mem,
+};
+
+static const struct tegra_tsensor tegra124_tsensors[] = {
+ {
+ .name = "cpu0",
+ .base = 0xc0,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x098,
+ .fuse_corr_alpha = 1135400,
+ .fuse_corr_beta = -6266900,
+ .group = &tegra124_tsensor_group_cpu,
+ }, {
+ .name = "cpu1",
+ .base = 0xe0,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x084,
+ .fuse_corr_alpha = 1122220,
+ .fuse_corr_beta = -5700700,
+ .group = &tegra124_tsensor_group_cpu,
+ }, {
+ .name = "cpu2",
+ .base = 0x100,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x088,
+ .fuse_corr_alpha = 1127000,
+ .fuse_corr_beta = -6768200,
+ .group = &tegra124_tsensor_group_cpu,
+ }, {
+ .name = "cpu3",
+ .base = 0x120,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x12c,
+ .fuse_corr_alpha = 1110900,
+ .fuse_corr_beta = -6232000,
+ .group = &tegra124_tsensor_group_cpu,
+ }, {
+ .name = "mem0",
+ .base = 0x140,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x158,
+ .fuse_corr_alpha = 1122300,
+ .fuse_corr_beta = -5936400,
+ .group = &tegra124_tsensor_group_mem,
+ }, {
+ .name = "mem1",
+ .base = 0x160,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x15c,
+ .fuse_corr_alpha = 1145700,
+ .fuse_corr_beta = -7124600,
+ .group = &tegra124_tsensor_group_mem,
+ }, {
+ .name = "gpu",
+ .base = 0x180,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x154,
+ .fuse_corr_alpha = 1120100,
+ .fuse_corr_beta = -6000500,
+ .group = &tegra124_tsensor_group_gpu,
+ }, {
+ .name = "pllx",
+ .base = 0x1a0,
+ .config = &tegra124_tsensor_config,
+ .calib_fuse_offset = 0x160,
+ .fuse_corr_alpha = 1106500,
+ .fuse_corr_beta = -6729300,
+ .group = &tegra124_tsensor_group_pll,
+ },
+};
+
+/*
+ * Mask/shift bits in FUSE_TSENSOR_COMMON and
+ * FUSE_TSENSOR_COMMON, which are described in
+ * tegra_soctherm_fuse.c
+ */
+static const struct tegra_soctherm_fuse tegra124_soctherm_fuse = {
+ .fuse_base_cp_mask = 0x3ff,
+ .fuse_base_cp_shift = 0,
+ .fuse_base_ft_mask = 0x7ff << 10,
+ .fuse_base_ft_shift = 10,
+ .fuse_shift_ft_mask = 0x1f << 21,
+ .fuse_shift_ft_shift = 21,
+ .fuse_spare_realignment = 0x1fc,
+};
+
+const struct tegra_soctherm_soc tegra124_soctherm = {
+ .tsensors = tegra124_tsensors,
+ .num_tsensors = ARRAY_SIZE(tegra124_tsensors),
+ .ttgs = tegra124_tsensor_groups,
+ .num_ttgs = ARRAY_SIZE(tegra124_tsensor_groups),
+ .tfuse = &tegra124_soctherm_fuse,
+ .thresh_grain = TEGRA124_THRESH_GRAIN,
+};
diff --git a/drivers/thermal/tegra/tegra132-soctherm.c b/drivers/thermal/tegra/tegra132-soctherm.c
new file mode 100644
index 000000000000..e2aa84e1b307
--- /dev/null
+++ b/drivers/thermal/tegra/tegra132-soctherm.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define TEGRA132_THERMTRIP_ANY_EN_MASK (0x1 << 28)
+#define TEGRA132_THERMTRIP_MEM_EN_MASK (0x1 << 27)
+#define TEGRA132_THERMTRIP_GPU_EN_MASK (0x1 << 26)
+#define TEGRA132_THERMTRIP_CPU_EN_MASK (0x1 << 25)
+#define TEGRA132_THERMTRIP_TSENSE_EN_MASK (0x1 << 24)
+#define TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK (0xff << 16)
+#define TEGRA132_THERMTRIP_CPU_THRESH_MASK (0xff << 8)
+#define TEGRA132_THERMTRIP_TSENSE_THRESH_MASK 0xff
+
+#define TEGRA132_THRESH_GRAIN 1000
+
+static const struct tegra_tsensor_configuration tegra132_tsensor_config = {
+ .tall = 16300,
+ .tiddq_en = 1,
+ .ten_count = 1,
+ .tsample = 120,
+ .tsample_ate = 480,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_cpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_CPU,
+ .name = "cpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_CPU_MASK,
+ .pllx_hotspot_diff = 10,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
+ .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA132_THERMTRIP_CPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA132_THERMTRIP_CPU_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_gpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_GPU,
+ .name = "gpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_GPU_MASK,
+ .pllx_hotspot_diff = 5,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
+ .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA132_THERMTRIP_GPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_pll = {
+ .id = TEGRA124_SOCTHERM_SENSOR_PLLX,
+ .name = "pll",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_PLLX_MASK,
+ .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA132_THERMTRIP_TSENSE_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA132_THERMTRIP_TSENSE_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra132_tsensor_group_mem = {
+ .id = TEGRA124_SOCTHERM_SENSOR_MEM,
+ .name = "mem",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_MEM_MASK,
+ .pllx_hotspot_diff = 0,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
+ .thermtrip_any_en_mask = TEGRA132_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA132_THERMTRIP_MEM_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA132_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group *tegra132_tsensor_groups[] = {
+ &tegra132_tsensor_group_cpu,
+ &tegra132_tsensor_group_gpu,
+ &tegra132_tsensor_group_pll,
+ &tegra132_tsensor_group_mem,
+};
+
+static struct tegra_tsensor tegra132_tsensors[] = {
+ {
+ .name = "cpu0",
+ .base = 0xc0,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x098,
+ .fuse_corr_alpha = 1126600,
+ .fuse_corr_beta = -9433500,
+ .group = &tegra132_tsensor_group_cpu,
+ }, {
+ .name = "cpu1",
+ .base = 0xe0,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x084,
+ .fuse_corr_alpha = 1110800,
+ .fuse_corr_beta = -7383000,
+ .group = &tegra132_tsensor_group_cpu,
+ }, {
+ .name = "cpu2",
+ .base = 0x100,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x088,
+ .fuse_corr_alpha = 1113800,
+ .fuse_corr_beta = -6215200,
+ .group = &tegra132_tsensor_group_cpu,
+ }, {
+ .name = "cpu3",
+ .base = 0x120,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x12c,
+ .fuse_corr_alpha = 1129600,
+ .fuse_corr_beta = -8196100,
+ .group = &tegra132_tsensor_group_cpu,
+ }, {
+ .name = "mem0",
+ .base = 0x140,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x158,
+ .fuse_corr_alpha = 1132900,
+ .fuse_corr_beta = -6755300,
+ .group = &tegra132_tsensor_group_mem,
+ }, {
+ .name = "mem1",
+ .base = 0x160,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x15c,
+ .fuse_corr_alpha = 1142300,
+ .fuse_corr_beta = -7374200,
+ .group = &tegra132_tsensor_group_mem,
+ }, {
+ .name = "gpu",
+ .base = 0x180,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x154,
+ .fuse_corr_alpha = 1125100,
+ .fuse_corr_beta = -6350400,
+ .group = &tegra132_tsensor_group_gpu,
+ }, {
+ .name = "pllx",
+ .base = 0x1a0,
+ .config = &tegra132_tsensor_config,
+ .calib_fuse_offset = 0x160,
+ .fuse_corr_alpha = 1118100,
+ .fuse_corr_beta = -8208800,
+ .group = &tegra132_tsensor_group_pll,
+ },
+};
+
+/*
+ * Mask/shift bits in FUSE_TSENSOR_COMMON and
+ * FUSE_TSENSOR_COMMON, which are described in
+ * tegra_soctherm_fuse.c
+ */
+static const struct tegra_soctherm_fuse tegra132_soctherm_fuse = {
+ .fuse_base_cp_mask = 0x3ff,
+ .fuse_base_cp_shift = 0,
+ .fuse_base_ft_mask = 0x7ff << 10,
+ .fuse_base_ft_shift = 10,
+ .fuse_shift_ft_mask = 0x1f << 21,
+ .fuse_shift_ft_shift = 21,
+ .fuse_spare_realignment = 0x1fc,
+};
+
+const struct tegra_soctherm_soc tegra132_soctherm = {
+ .tsensors = tegra132_tsensors,
+ .num_tsensors = ARRAY_SIZE(tegra132_tsensors),
+ .ttgs = tegra132_tsensor_groups,
+ .num_ttgs = ARRAY_SIZE(tegra132_tsensor_groups),
+ .tfuse = &tegra132_soctherm_fuse,
+ .thresh_grain = TEGRA132_THRESH_GRAIN,
+};
diff --git a/drivers/thermal/tegra/tegra210-soctherm.c b/drivers/thermal/tegra/tegra210-soctherm.c
new file mode 100644
index 000000000000..19cc0ab66f0e
--- /dev/null
+++ b/drivers/thermal/tegra/tegra210-soctherm.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <soc/tegra/fuse.h>
+
+#include <dt-bindings/thermal/tegra124-soctherm.h>
+
+#include "soctherm.h"
+
+#define TEGRA210_THERMTRIP_ANY_EN_MASK (0x1 << 31)
+#define TEGRA210_THERMTRIP_MEM_EN_MASK (0x1 << 30)
+#define TEGRA210_THERMTRIP_GPU_EN_MASK (0x1 << 29)
+#define TEGRA210_THERMTRIP_CPU_EN_MASK (0x1 << 28)
+#define TEGRA210_THERMTRIP_TSENSE_EN_MASK (0x1 << 27)
+#define TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK (0x1ff << 18)
+#define TEGRA210_THERMTRIP_CPU_THRESH_MASK (0x1ff << 9)
+#define TEGRA210_THERMTRIP_TSENSE_THRESH_MASK 0x1ff
+
+#define TEGRA210_THRESH_GRAIN 500
+
+static const struct tegra_tsensor_configuration tegra210_tsensor_config = {
+ .tall = 16300,
+ .tiddq_en = 1,
+ .ten_count = 1,
+ .tsample = 120,
+ .tsample_ate = 480,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_cpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_CPU,
+ .name = "cpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_CPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_CPU_MASK,
+ .pllx_hotspot_diff = 10,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_CPU_MASK,
+ .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA210_THERMTRIP_CPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA210_THERMTRIP_CPU_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_gpu = {
+ .id = TEGRA124_SOCTHERM_SENSOR_GPU,
+ .name = "gpu",
+ .sensor_temp_offset = SENSOR_TEMP1,
+ .sensor_temp_mask = SENSOR_TEMP1_GPU_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_GPU_MASK,
+ .pllx_hotspot_diff = 5,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_GPU_MASK,
+ .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA210_THERMTRIP_GPU_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_pll = {
+ .id = TEGRA124_SOCTHERM_SENSOR_PLLX,
+ .name = "pll",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_PLLX_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_PLLX_MASK,
+ .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA210_THERMTRIP_TSENSE_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA210_THERMTRIP_TSENSE_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group tegra210_tsensor_group_mem = {
+ .id = TEGRA124_SOCTHERM_SENSOR_MEM,
+ .name = "mem",
+ .sensor_temp_offset = SENSOR_TEMP2,
+ .sensor_temp_mask = SENSOR_TEMP2_MEM_TEMP_MASK,
+ .pdiv = 8,
+ .pdiv_ate = 8,
+ .pdiv_mask = SENSOR_PDIV_MEM_MASK,
+ .pllx_hotspot_diff = 0,
+ .pllx_hotspot_mask = SENSOR_HOTSPOT_MEM_MASK,
+ .thermtrip_any_en_mask = TEGRA210_THERMTRIP_ANY_EN_MASK,
+ .thermtrip_enable_mask = TEGRA210_THERMTRIP_MEM_EN_MASK,
+ .thermtrip_threshold_mask = TEGRA210_THERMTRIP_GPUMEM_THRESH_MASK,
+};
+
+static const struct tegra_tsensor_group *tegra210_tsensor_groups[] = {
+ &tegra210_tsensor_group_cpu,
+ &tegra210_tsensor_group_gpu,
+ &tegra210_tsensor_group_pll,
+ &tegra210_tsensor_group_mem,
+};
+
+static const struct tegra_tsensor tegra210_tsensors[] = {
+ {
+ .name = "cpu0",
+ .base = 0xc0,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x098,
+ .fuse_corr_alpha = 1085000,
+ .fuse_corr_beta = 3244200,
+ .group = &tegra210_tsensor_group_cpu,
+ }, {
+ .name = "cpu1",
+ .base = 0xe0,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x084,
+ .fuse_corr_alpha = 1126200,
+ .fuse_corr_beta = -67500,
+ .group = &tegra210_tsensor_group_cpu,
+ }, {
+ .name = "cpu2",
+ .base = 0x100,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x088,
+ .fuse_corr_alpha = 1098400,
+ .fuse_corr_beta = 2251100,
+ .group = &tegra210_tsensor_group_cpu,
+ }, {
+ .name = "cpu3",
+ .base = 0x120,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x12c,
+ .fuse_corr_alpha = 1108000,
+ .fuse_corr_beta = 602700,
+ .group = &tegra210_tsensor_group_cpu,
+ }, {
+ .name = "mem0",
+ .base = 0x140,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x158,
+ .fuse_corr_alpha = 1069200,
+ .fuse_corr_beta = 3549900,
+ .group = &tegra210_tsensor_group_mem,
+ }, {
+ .name = "mem1",
+ .base = 0x160,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x15c,
+ .fuse_corr_alpha = 1173700,
+ .fuse_corr_beta = -6263600,
+ .group = &tegra210_tsensor_group_mem,
+ }, {
+ .name = "gpu",
+ .base = 0x180,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x154,
+ .fuse_corr_alpha = 1074300,
+ .fuse_corr_beta = 2734900,
+ .group = &tegra210_tsensor_group_gpu,
+ }, {
+ .name = "pllx",
+ .base = 0x1a0,
+ .config = &tegra210_tsensor_config,
+ .calib_fuse_offset = 0x160,
+ .fuse_corr_alpha = 1039700,
+ .fuse_corr_beta = 6829100,
+ .group = &tegra210_tsensor_group_pll,
+ },
+};
+
+/*
+ * Mask/shift bits in FUSE_TSENSOR_COMMON and
+ * FUSE_TSENSOR_COMMON, which are described in
+ * tegra_soctherm_fuse.c
+ */
+static const struct tegra_soctherm_fuse tegra210_soctherm_fuse = {
+ .fuse_base_cp_mask = 0x3ff << 11,
+ .fuse_base_cp_shift = 11,
+ .fuse_base_ft_mask = 0x7ff << 21,
+ .fuse_base_ft_shift = 21,
+ .fuse_shift_ft_mask = 0x1f << 6,
+ .fuse_shift_ft_shift = 6,
+ .fuse_spare_realignment = 0,
+};
+
+const struct tegra_soctherm_soc tegra210_soctherm = {
+ .tsensors = tegra210_tsensors,
+ .num_tsensors = ARRAY_SIZE(tegra210_tsensors),
+ .ttgs = tegra210_tsensor_groups,
+ .num_ttgs = ARRAY_SIZE(tegra210_tsensor_groups),
+ .tfuse = &tegra210_soctherm_fuse,
+ .thresh_grain = TEGRA210_THRESH_GRAIN,
+};
diff --git a/drivers/thermal/tegra_soctherm.c b/drivers/thermal/tegra_soctherm.c
deleted file mode 100644
index 136975220c92..000000000000
--- a/drivers/thermal/tegra_soctherm.c
+++ /dev/null
@@ -1,476 +0,0 @@
-/*
- * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
- *
- * Author:
- * Mikko Perttunen <mperttunen@nvidia.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-#include <linux/reset.h>
-#include <linux/thermal.h>
-
-#include <soc/tegra/fuse.h>
-
-#define SENSOR_CONFIG0 0
-#define SENSOR_CONFIG0_STOP BIT(0)
-#define SENSOR_CONFIG0_TALL_SHIFT 8
-#define SENSOR_CONFIG0_TCALC_OVER BIT(4)
-#define SENSOR_CONFIG0_OVER BIT(3)
-#define SENSOR_CONFIG0_CPTR_OVER BIT(2)
-
-#define SENSOR_CONFIG1 4
-#define SENSOR_CONFIG1_TSAMPLE_SHIFT 0
-#define SENSOR_CONFIG1_TIDDQ_EN_SHIFT 15
-#define SENSOR_CONFIG1_TEN_COUNT_SHIFT 24
-#define SENSOR_CONFIG1_TEMP_ENABLE BIT(31)
-
-#define SENSOR_CONFIG2 8
-#define SENSOR_CONFIG2_THERMA_SHIFT 16
-#define SENSOR_CONFIG2_THERMB_SHIFT 0
-
-#define SENSOR_PDIV 0x1c0
-#define SENSOR_PDIV_T124 0x8888
-#define SENSOR_HOTSPOT_OFF 0x1c4
-#define SENSOR_HOTSPOT_OFF_T124 0x00060600
-#define SENSOR_TEMP1 0x1c8
-#define SENSOR_TEMP2 0x1cc
-
-#define SENSOR_TEMP_MASK 0xffff
-#define READBACK_VALUE_MASK 0xff00
-#define READBACK_VALUE_SHIFT 8
-#define READBACK_ADD_HALF BIT(7)
-#define READBACK_NEGATE BIT(0)
-
-#define FUSE_TSENSOR8_CALIB 0x180
-#define FUSE_SPARE_REALIGNMENT_REG_0 0x1fc
-
-#define FUSE_TSENSOR_CALIB_CP_TS_BASE_MASK 0x1fff
-#define FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK (0x1fff << 13)
-#define FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT 13
-
-#define FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK 0x3ff
-#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK (0x7ff << 10)
-#define FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT 10
-
-#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_CP_MASK 0x3f
-#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK (0x1f << 21)
-#define FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT 21
-
-#define NOMINAL_CALIB_FT_T124 105
-#define NOMINAL_CALIB_CP_T124 25
-
-struct tegra_tsensor_configuration {
- u32 tall, tsample, tiddq_en, ten_count, pdiv, tsample_ate, pdiv_ate;
-};
-
-struct tegra_tsensor {
- const struct tegra_tsensor_configuration *config;
- u32 base, calib_fuse_offset;
- /* Correction values used to modify values read from calibration fuses */
- s32 fuse_corr_alpha, fuse_corr_beta;
-};
-
-struct tegra_thermctl_zone {
- void __iomem *reg;
- unsigned int shift;
-};
-
-static const struct tegra_tsensor_configuration t124_tsensor_config = {
- .tall = 16300,
- .tsample = 120,
- .tiddq_en = 1,
- .ten_count = 1,
- .pdiv = 8,
- .tsample_ate = 480,
- .pdiv_ate = 8
-};
-
-static const struct tegra_tsensor t124_tsensors[] = {
- {
- .config = &t124_tsensor_config,
- .base = 0xc0,
- .calib_fuse_offset = 0x098,
- .fuse_corr_alpha = 1135400,
- .fuse_corr_beta = -6266900,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0xe0,
- .calib_fuse_offset = 0x084,
- .fuse_corr_alpha = 1122220,
- .fuse_corr_beta = -5700700,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x100,
- .calib_fuse_offset = 0x088,
- .fuse_corr_alpha = 1127000,
- .fuse_corr_beta = -6768200,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x120,
- .calib_fuse_offset = 0x12c,
- .fuse_corr_alpha = 1110900,
- .fuse_corr_beta = -6232000,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x140,
- .calib_fuse_offset = 0x158,
- .fuse_corr_alpha = 1122300,
- .fuse_corr_beta = -5936400,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x160,
- .calib_fuse_offset = 0x15c,
- .fuse_corr_alpha = 1145700,
- .fuse_corr_beta = -7124600,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x180,
- .calib_fuse_offset = 0x154,
- .fuse_corr_alpha = 1120100,
- .fuse_corr_beta = -6000500,
- },
- {
- .config = &t124_tsensor_config,
- .base = 0x1a0,
- .calib_fuse_offset = 0x160,
- .fuse_corr_alpha = 1106500,
- .fuse_corr_beta = -6729300,
- },
-};
-
-struct tegra_soctherm {
- struct reset_control *reset;
- struct clk *clock_tsensor;
- struct clk *clock_soctherm;
- void __iomem *regs;
-
- struct thermal_zone_device *thermctl_tzs[4];
-};
-
-struct tsensor_shared_calibration {
- u32 base_cp, base_ft;
- u32 actual_temp_cp, actual_temp_ft;
-};
-
-static int calculate_shared_calibration(struct tsensor_shared_calibration *r)
-{
- u32 val, shifted_cp, shifted_ft;
- int err;
-
- err = tegra_fuse_readl(FUSE_TSENSOR8_CALIB, &val);
- if (err)
- return err;
- r->base_cp = val & FUSE_TSENSOR8_CALIB_CP_TS_BASE_MASK;
- r->base_ft = (val & FUSE_TSENSOR8_CALIB_FT_TS_BASE_MASK)
- >> FUSE_TSENSOR8_CALIB_FT_TS_BASE_SHIFT;
- val = ((val & FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_MASK)
- >> FUSE_SPARE_REALIGNMENT_REG_SHIFT_FT_SHIFT);
- shifted_ft = sign_extend32(val, 4);
-
- err = tegra_fuse_readl(FUSE_SPARE_REALIGNMENT_REG_0, &val);
- if (err)
- return err;
- shifted_cp = sign_extend32(val, 5);
-
- r->actual_temp_cp = 2 * NOMINAL_CALIB_CP_T124 + shifted_cp;
- r->actual_temp_ft = 2 * NOMINAL_CALIB_FT_T124 + shifted_ft;
-
- return 0;
-}
-
-static s64 div64_s64_precise(s64 a, s64 b)
-{
- s64 r, al;
-
- /* Scale up for increased precision division */
- al = a << 16;
-
- r = div64_s64(al * 2 + 1, 2 * b);
- return r >> 16;
-}
-
-static int
-calculate_tsensor_calibration(const struct tegra_tsensor *sensor,
- const struct tsensor_shared_calibration *shared,
- u32 *calib)
-{
- u32 val;
- s32 actual_tsensor_ft, actual_tsensor_cp, delta_sens, delta_temp,
- mult, div;
- s16 therma, thermb;
- s64 tmp;
- int err;
-
- err = tegra_fuse_readl(sensor->calib_fuse_offset, &val);
- if (err)
- return err;
-
- actual_tsensor_cp = (shared->base_cp * 64) + sign_extend32(val, 12);
- val = (val & FUSE_TSENSOR_CALIB_FT_TS_BASE_MASK)
- >> FUSE_TSENSOR_CALIB_FT_TS_BASE_SHIFT;
- actual_tsensor_ft = (shared->base_ft * 32) + sign_extend32(val, 12);
-
- delta_sens = actual_tsensor_ft - actual_tsensor_cp;
- delta_temp = shared->actual_temp_ft - shared->actual_temp_cp;
-
- mult = sensor->config->pdiv * sensor->config->tsample_ate;
- div = sensor->config->tsample * sensor->config->pdiv_ate;
-
- therma = div64_s64_precise((s64) delta_temp * (1LL << 13) * mult,
- (s64) delta_sens * div);
-
- tmp = (s64)actual_tsensor_ft * shared->actual_temp_cp -
- (s64)actual_tsensor_cp * shared->actual_temp_ft;
- thermb = div64_s64_precise(tmp, (s64)delta_sens);
-
- therma = div64_s64_precise((s64)therma * sensor->fuse_corr_alpha,
- (s64)1000000LL);
- thermb = div64_s64_precise((s64)thermb * sensor->fuse_corr_alpha +
- sensor->fuse_corr_beta, (s64)1000000LL);
-
- *calib = ((u16)therma << SENSOR_CONFIG2_THERMA_SHIFT) |
- ((u16)thermb << SENSOR_CONFIG2_THERMB_SHIFT);
-
- return 0;
-}
-
-static int enable_tsensor(struct tegra_soctherm *tegra,
- const struct tegra_tsensor *sensor,
- const struct tsensor_shared_calibration *shared)
-{
- void __iomem *base = tegra->regs + sensor->base;
- unsigned int val;
- u32 calib;
- int err;
-
- err = calculate_tsensor_calibration(sensor, shared, &calib);
- if (err)
- return err;
-
- val = sensor->config->tall << SENSOR_CONFIG0_TALL_SHIFT;
- writel(val, base + SENSOR_CONFIG0);
-
- val = (sensor->config->tsample - 1) << SENSOR_CONFIG1_TSAMPLE_SHIFT;
- val |= sensor->config->tiddq_en << SENSOR_CONFIG1_TIDDQ_EN_SHIFT;
- val |= sensor->config->ten_count << SENSOR_CONFIG1_TEN_COUNT_SHIFT;
- val |= SENSOR_CONFIG1_TEMP_ENABLE;
- writel(val, base + SENSOR_CONFIG1);
-
- writel(calib, base + SENSOR_CONFIG2);
-
- return 0;
-}
-
-/*
- * Translate from soctherm readback format to millicelsius.
- * The soctherm readback format in bits is as follows:
- * TTTTTTTT H______N
- * where T's contain the temperature in Celsius,
- * H denotes an addition of 0.5 Celsius and N denotes negation
- * of the final value.
- */
-static int translate_temp(u16 val)
-{
- long t;
-
- t = ((val & READBACK_VALUE_MASK) >> READBACK_VALUE_SHIFT) * 1000;
- if (val & READBACK_ADD_HALF)
- t += 500;
- if (val & READBACK_NEGATE)
- t *= -1;
-
- return t;
-}
-
-static int tegra_thermctl_get_temp(void *data, int *out_temp)
-{
- struct tegra_thermctl_zone *zone = data;
- u32 val;
-
- val = (readl(zone->reg) >> zone->shift) & SENSOR_TEMP_MASK;
- *out_temp = translate_temp(val);
-
- return 0;
-}
-
-static const struct thermal_zone_of_device_ops tegra_of_thermal_ops = {
- .get_temp = tegra_thermctl_get_temp,
-};
-
-static const struct of_device_id tegra_soctherm_of_match[] = {
- { .compatible = "nvidia,tegra124-soctherm" },
- { },
-};
-MODULE_DEVICE_TABLE(of, tegra_soctherm_of_match);
-
-struct thermctl_zone_desc {
- unsigned int offset;
- unsigned int shift;
-};
-
-static const struct thermctl_zone_desc t124_thermctl_temp_zones[] = {
- { SENSOR_TEMP1, 16 },
- { SENSOR_TEMP2, 16 },
- { SENSOR_TEMP1, 0 },
- { SENSOR_TEMP2, 0 }
-};
-
-static int tegra_soctherm_probe(struct platform_device *pdev)
-{
- struct tegra_soctherm *tegra;
- struct thermal_zone_device *tz;
- struct tsensor_shared_calibration shared_calib;
- struct resource *res;
- unsigned int i;
- int err;
-
- const struct tegra_tsensor *tsensors = t124_tsensors;
-
- tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
- if (!tegra)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tegra->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(tegra->regs))
- return PTR_ERR(tegra->regs);
-
- tegra->reset = devm_reset_control_get(&pdev->dev, "soctherm");
- if (IS_ERR(tegra->reset)) {
- dev_err(&pdev->dev, "can't get soctherm reset\n");
- return PTR_ERR(tegra->reset);
- }
-
- tegra->clock_tsensor = devm_clk_get(&pdev->dev, "tsensor");
- if (IS_ERR(tegra->clock_tsensor)) {
- dev_err(&pdev->dev, "can't get tsensor clock\n");
- return PTR_ERR(tegra->clock_tsensor);
- }
-
- tegra->clock_soctherm = devm_clk_get(&pdev->dev, "soctherm");
- if (IS_ERR(tegra->clock_soctherm)) {
- dev_err(&pdev->dev, "can't get soctherm clock\n");
- return PTR_ERR(tegra->clock_soctherm);
- }
-
- reset_control_assert(tegra->reset);
-
- err = clk_prepare_enable(tegra->clock_soctherm);
- if (err)
- return err;
-
- err = clk_prepare_enable(tegra->clock_tsensor);
- if (err) {
- clk_disable_unprepare(tegra->clock_soctherm);
- return err;
- }
-
- reset_control_deassert(tegra->reset);
-
- /* Initialize raw sensors */
-
- err = calculate_shared_calibration(&shared_calib);
- if (err)
- goto disable_clocks;
-
- for (i = 0; i < ARRAY_SIZE(t124_tsensors); ++i) {
- err = enable_tsensor(tegra, tsensors + i, &shared_calib);
- if (err)
- goto disable_clocks;
- }
-
- writel(SENSOR_PDIV_T124, tegra->regs + SENSOR_PDIV);
- writel(SENSOR_HOTSPOT_OFF_T124, tegra->regs + SENSOR_HOTSPOT_OFF);
-
- /* Initialize thermctl sensors */
-
- for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) {
- struct tegra_thermctl_zone *zone =
- devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
- if (!zone) {
- err = -ENOMEM;
- goto unregister_tzs;
- }
-
- zone->reg = tegra->regs + t124_thermctl_temp_zones[i].offset;
- zone->shift = t124_thermctl_temp_zones[i].shift;
-
- tz = thermal_zone_of_sensor_register(&pdev->dev, i, zone,
- &tegra_of_thermal_ops);
- if (IS_ERR(tz)) {
- err = PTR_ERR(tz);
- dev_err(&pdev->dev, "failed to register sensor: %d\n",
- err);
- goto unregister_tzs;
- }
-
- tegra->thermctl_tzs[i] = tz;
- }
-
- return 0;
-
-unregister_tzs:
- while (i--)
- thermal_zone_of_sensor_unregister(&pdev->dev,
- tegra->thermctl_tzs[i]);
-
-disable_clocks:
- clk_disable_unprepare(tegra->clock_tsensor);
- clk_disable_unprepare(tegra->clock_soctherm);
-
- return err;
-}
-
-static int tegra_soctherm_remove(struct platform_device *pdev)
-{
- struct tegra_soctherm *tegra = platform_get_drvdata(pdev);
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(tegra->thermctl_tzs); ++i) {
- thermal_zone_of_sensor_unregister(&pdev->dev,
- tegra->thermctl_tzs[i]);
- }
-
- clk_disable_unprepare(tegra->clock_tsensor);
- clk_disable_unprepare(tegra->clock_soctherm);
-
- return 0;
-}
-
-static struct platform_driver tegra_soctherm_driver = {
- .probe = tegra_soctherm_probe,
- .remove = tegra_soctherm_remove,
- .driver = {
- .name = "tegra-soctherm",
- .of_match_table = tegra_soctherm_of_match,
- },
-};
-module_platform_driver(tegra_soctherm_driver);
-
-MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
-MODULE_DESCRIPTION("NVIDIA Tegra SOCTHERM thermal management driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
new file mode 100644
index 000000000000..73f55d6a1721
--- /dev/null
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -0,0 +1,182 @@
+/*
+ * Generic ADC thermal driver
+ *
+ * Copyright (C) 2016 NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/iio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+struct gadc_thermal_info {
+ struct device *dev;
+ struct thermal_zone_device *tz_dev;
+ struct iio_channel *channel;
+ s32 *lookup_table;
+ int nlookup_table;
+};
+
+static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
+{
+ int temp, adc_hi, adc_lo;
+ int i;
+
+ for (i = 0; i < gti->nlookup_table; i++) {
+ if (val >= gti->lookup_table[2 * i + 1])
+ break;
+ }
+
+ if (i == 0) {
+ temp = gti->lookup_table[0];
+ } else if (i >= (gti->nlookup_table - 1)) {
+ temp = gti->lookup_table[2 * (gti->nlookup_table - 1)];
+ } else {
+ adc_hi = gti->lookup_table[2 * i - 1];
+ adc_lo = gti->lookup_table[2 * i + 1];
+ temp = gti->lookup_table[2 * i];
+ temp -= ((val - adc_lo) * 1000) / (adc_hi - adc_lo);
+ }
+
+ return temp;
+}
+
+static int gadc_thermal_get_temp(void *data, int *temp)
+{
+ struct gadc_thermal_info *gti = data;
+ int val;
+ int ret;
+
+ ret = iio_read_channel_processed(gti->channel, &val);
+ if (ret < 0) {
+ dev_err(gti->dev, "IIO channel read failed %d\n", ret);
+ return ret;
+ }
+ *temp = gadc_thermal_adc_to_temp(gti, val);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops gadc_thermal_ops = {
+ .get_temp = gadc_thermal_get_temp,
+};
+
+static int gadc_thermal_read_linear_lookup_table(struct device *dev,
+ struct gadc_thermal_info *gti)
+{
+ struct device_node *np = dev->of_node;
+ int ntable;
+ int ret;
+
+ ntable = of_property_count_elems_of_size(np, "temperature-lookup-table",
+ sizeof(u32));
+ if (ntable < 0) {
+ dev_err(dev, "Lookup table is not provided\n");
+ return ntable;
+ }
+
+ if (ntable % 2) {
+ dev_err(dev, "Pair of temperature vs ADC read value missing\n");
+ return -EINVAL;
+ }
+
+ gti->lookup_table = devm_kzalloc(dev, sizeof(*gti->lookup_table) *
+ ntable, GFP_KERNEL);
+ if (!gti->lookup_table)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, "temperature-lookup-table",
+ (u32 *)gti->lookup_table, ntable);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read temperature lookup table: %d\n",
+ ret);
+ return ret;
+ }
+
+ gti->nlookup_table = ntable / 2;
+
+ return 0;
+}
+
+static int gadc_thermal_probe(struct platform_device *pdev)
+{
+ struct gadc_thermal_info *gti;
+ int ret;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "Only DT based supported\n");
+ return -ENODEV;
+ }
+
+ gti = devm_kzalloc(&pdev->dev, sizeof(*gti), GFP_KERNEL);
+ if (!gti)
+ return -ENOMEM;
+
+ ret = gadc_thermal_read_linear_lookup_table(&pdev->dev, gti);
+ if (ret < 0)
+ return ret;
+
+ gti->dev = &pdev->dev;
+ platform_set_drvdata(pdev, gti);
+
+ gti->channel = iio_channel_get(&pdev->dev, "sensor-channel");
+ if (IS_ERR(gti->channel)) {
+ ret = PTR_ERR(gti->channel);
+ dev_err(&pdev->dev, "IIO channel not found: %d\n", ret);
+ return ret;
+ }
+
+ gti->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0,
+ gti, &gadc_thermal_ops);
+ if (IS_ERR(gti->tz_dev)) {
+ ret = PTR_ERR(gti->tz_dev);
+ dev_err(&pdev->dev, "Thermal zone sensor register failed: %d\n",
+ ret);
+ goto sensor_fail;
+ }
+
+ return 0;
+
+sensor_fail:
+ iio_channel_release(gti->channel);
+
+ return ret;
+}
+
+static int gadc_thermal_remove(struct platform_device *pdev)
+{
+ struct gadc_thermal_info *gti = platform_get_drvdata(pdev);
+
+ thermal_zone_of_sensor_unregister(&pdev->dev, gti->tz_dev);
+ iio_channel_release(gti->channel);
+
+ return 0;
+}
+
+static const struct of_device_id of_adc_thermal_match[] = {
+ { .compatible = "generic-adc-thermal", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_adc_thermal_match);
+
+static struct platform_driver gadc_thermal_driver = {
+ .driver = {
+ .name = "generic-adc-thermal",
+ .of_match_table = of_adc_thermal_match,
+ },
+ .probe = gadc_thermal_probe,
+ .remove = gadc_thermal_remove,
+};
+
+module_platform_driver(gadc_thermal_driver);
+
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_DESCRIPTION("Generic ADC thermal driver using IIO framework with DT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index b213a1222295..15c0a9ac2209 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -337,7 +337,7 @@ int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id,
return -EINVAL;
/* in case this is specified by DT */
- data->ti_thermal = thermal_zone_of_sensor_register(bgp->dev, id,
+ data->ti_thermal = devm_thermal_zone_of_sensor_register(bgp->dev, id,
data, &ti_of_thermal_ops);
if (IS_ERR(data->ti_thermal)) {
/* Create thermal zone */
@@ -368,9 +368,6 @@ int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id)
if (data && data->ti_thermal) {
if (data->our_zone)
thermal_zone_device_unregister(data->ti_thermal);
- else
- thermal_zone_of_sensor_unregister(bgp->dev,
- data->ti_thermal);
}
return 0;
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 7fc919f7da4d..97f0a2bd93ed 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -555,7 +555,7 @@ static int pkg_temp_thermal_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long) hcpu;
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
get_core_online(cpu);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index a2aa655f56c4..1b7331e40d79 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2360,7 +2360,7 @@ static int pl011_probe_dt_alias(int index, struct device *dev)
return ret;
ret = of_alias_get_id(np, "serial");
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
seen_dev_without_alias = true;
ret = index;
} else {
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 18971063f95f..699447aa8b43 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -654,7 +654,7 @@ static int sprd_probe_dt_alias(int index, struct device *dev)
return ret;
ret = of_alias_get_id(np, "serial");
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
ret = index;
else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) {
dev_warn(dev, "requested serial port %d not available.\n", ret);
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index 2ace0295408e..35fe3c80cfc0 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1290,15 +1290,6 @@ static void usbg_release_cmd(struct se_cmd *se_cmd)
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
-static int usbg_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void usbg_close_session(struct se_session *se_sess)
-{
-}
-
static u32 usbg_sess_get_index(struct se_session *se_sess)
{
return 0;
@@ -1735,8 +1726,6 @@ static const struct target_core_fabric_ops usbg_ops = {
.tpg_check_prod_mode_write_protect = usbg_check_false,
.tpg_get_inst_index = usbg_tpg_get_inst_index,
.release_cmd = usbg_release_cmd,
- .shutdown_session = usbg_shutdown_session,
- .close_session = usbg_close_session,
.sess_get_index = usbg_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = usbg_send_write_request,
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0e6fd556c982..9d6320e8ff3e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -333,16 +333,6 @@ static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
-static int vhost_scsi_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void vhost_scsi_close_session(struct se_session *se_sess)
-{
- return;
-}
-
static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
{
return 0;
@@ -2114,8 +2104,6 @@ static struct target_core_fabric_ops vhost_scsi_ops = {
.tpg_get_inst_index = vhost_scsi_tpg_get_inst_index,
.release_cmd = vhost_scsi_release_cmd,
.check_stop_free = vhost_scsi_check_stop_free,
- .shutdown_session = vhost_scsi_shutdown_session,
- .close_session = vhost_scsi_close_session,
.sess_get_index = vhost_scsi_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = vhost_scsi_write_pending,
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index d8d583d32a37..c229b1a0d13b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -713,7 +713,7 @@ static int da8xx_fb_config_clk_divider(struct da8xx_fb_par *par,
if (par->lcdc_clk_rate != lcdc_clk_rate) {
ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(par->dev,
"unable to set clock rate at %u\n",
lcdc_clk_rate);
@@ -784,7 +784,7 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
int ret = 0;
ret = da8xx_fb_calc_config_clk_divider(par, panel);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(par->dev, "unable to configure clock\n");
return ret;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ff932624eaad..d6950e0802b7 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1399,15 +1399,6 @@ static void scsiback_release_cmd(struct se_cmd *se_cmd)
percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
}
-static int scsiback_shutdown_session(struct se_session *se_sess)
-{
- return 0;
-}
-
-static void scsiback_close_session(struct se_session *se_sess)
-{
-}
-
static u32 scsiback_sess_get_index(struct se_session *se_sess)
{
return 0;
@@ -1841,8 +1832,6 @@ static const struct target_core_fabric_ops scsiback_ops = {
.tpg_get_inst_index = scsiback_tpg_get_inst_index,
.check_stop_free = scsiback_check_stop_free,
.release_cmd = scsiback_release_cmd,
- .shutdown_session = scsiback_shutdown_session,
- .close_session = scsiback_close_session,
.sess_get_index = scsiback_sess_get_index,
.sess_get_initiator_sid = NULL,
.write_pending = scsiback_write_pending,
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index eb3589edf485..0576eaeb60b9 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -239,13 +239,13 @@ static int v9fs_xattr_get_acl(const struct xattr_handler *handler,
}
static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
int retval;
struct posix_acl *acl;
struct v9fs_session_info *v9ses;
- struct inode *inode = d_inode(dentry);
v9ses = v9fs_dentry2v9ses(dentry);
/*
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index 18c62bae9591..a6bd349bab23 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -147,8 +147,9 @@ static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
}
static int v9fs_xattr_handler_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
const char *full_name = xattr_full_name(handler, name);
diff --git a/fs/Kconfig b/fs/Kconfig
index 6725f59c18e6..b8fcb416be72 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -52,6 +52,7 @@ config FS_DAX_PMD
depends on FS_DAX
depends on ZONE_DEVICE
depends on TRANSPARENT_HUGEPAGE
+ depends on BROKEN
endif # BLOCK
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 2a6713b6b9f4..d6384863192c 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -528,7 +528,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
char *prefix = NULL;
new_opts = kstrdup(data, GFP_KERNEL);
- if (!new_opts)
+ if (data && !new_opts)
return -ENOMEM;
pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
@@ -546,7 +546,8 @@ affs_remount(struct super_block *sb, int *flags, char *data)
}
flush_delayed_work(&sbi->sb_work);
- replace_mount_options(sb, new_opts);
+ if (new_opts)
+ replace_mount_options(sb, new_opts);
sbi->s_flags = mount_flags;
sbi->s_mode = mode;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 65de439bdc4f..14d506efd1aa 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -643,10 +643,6 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
return 0;
result = generic_file_write_iter(iocb, from);
- if (IS_ERR_VALUE(result)) {
- _leave(" = %zd", result);
- return result;
- }
_leave(" = %zd", result);
return result;
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 72e35b721608..3ba385eaa26e 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -100,8 +100,8 @@ static int bad_inode_setattr(struct dentry *direntry, struct iattr *attrs)
return -EIO;
}
-static int bad_inode_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+static int bad_inode_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags)
{
return -EIO;
}
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 2fab9f130e51..ae1b5404fced 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -127,12 +127,8 @@ static int set_brk(unsigned long start, unsigned long end)
{
start = PAGE_ALIGN(start);
end = PAGE_ALIGN(end);
- if (end > start) {
- unsigned long addr;
- addr = vm_brk(start, end - start);
- if (BAD_ADDR(addr))
- return addr;
- }
+ if (end > start)
+ return vm_brk(start, end - start);
return 0;
}
@@ -275,7 +271,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
map_size = ex.a_text+ex.a_data;
#endif
error = vm_brk(text_addr & PAGE_MASK, map_size);
- if (error != (text_addr & PAGE_MASK))
+ if (error)
return error;
error = read_code(bprm->file, text_addr, pos,
@@ -298,7 +294,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
- if (IS_ERR_VALUE(error))
+ if (error)
return error;
read_code(bprm->file, N_TXTADDR(ex), fd_offset,
@@ -382,7 +378,7 @@ static int load_aout_library(struct file *file)
file);
}
retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
- if (IS_ERR_VALUE(retval))
+ if (retval)
goto out;
read_code(file, start_addr, N_TXTOFF(ex),
@@ -402,9 +398,8 @@ static int load_aout_library(struct file *file)
len = PAGE_ALIGN(ex.a_text + ex.a_data);
bss = ex.a_text + ex.a_data + ex.a_bss;
if (bss > len) {
- error = vm_brk(start_addr + len, bss - len);
- retval = error;
- if (error != start_addr + len)
+ retval = vm_brk(start_addr + len, bss - len);
+ if (retval)
goto out;
}
retval = 0;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 938fc4ede764..e158b22ef32f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -96,10 +96,9 @@ static int set_brk(unsigned long start, unsigned long end)
start = ELF_PAGEALIGN(start);
end = ELF_PAGEALIGN(end);
if (end > start) {
- unsigned long addr;
- addr = vm_brk(start, end - start);
- if (BAD_ADDR(addr))
- return addr;
+ int error = vm_brk(start, end - start);
+ if (error)
+ return error;
}
current->mm->start_brk = current->mm->brk = end;
return 0;
@@ -629,7 +628,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
/* Map the last of the bss segment */
error = vm_brk(elf_bss, last_bss - elf_bss);
- if (BAD_ADDR(error))
+ if (error)
goto out;
}
@@ -1178,7 +1177,7 @@ static int load_elf_library(struct file *file)
bss = eppnt->p_memsz + eppnt->p_vaddr;
if (bss > len) {
error = vm_brk(len, bss - len);
- if (BAD_ADDR(error))
+ if (error)
goto out_free_ph;
}
error = 0;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index f723cd3a455c..caf9e39bb82b 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -337,7 +337,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
"(%d != %d)", (unsigned) r, curid, id);
goto failed;
} else if ( ! p->lib_list[id].loaded &&
- IS_ERR_VALUE(load_flat_shared_library(id, p))) {
+ load_flat_shared_library(id, p) < 0) {
printk("BINFMT_FLAT: failed to load library %d", id);
goto failed;
}
@@ -837,7 +837,7 @@ static int load_flat_shared_library(int id, struct lib_info *libs)
res = prepare_binprm(&bprm);
- if (!IS_ERR_VALUE(res))
+ if (!res)
res = load_flat_file(&bprm, libs, id, NULL);
abort_creds(bprm.cred);
@@ -883,7 +883,7 @@ static int load_flat_binary(struct linux_binprm * bprm)
stack_len += FLAT_STACK_ALIGN - 1; /* reserve for upcoming alignment */
res = load_flat_file(bprm, &libinfo, 0, &stack_len);
- if (IS_ERR_VALUE(res))
+ if (res < 0)
return res;
/* Update data segment pointers for all libraries */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1089dbf25925..71ccab1d22c6 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -51,6 +51,18 @@ struct block_device *I_BDEV(struct inode *inode)
}
EXPORT_SYMBOL(I_BDEV);
+void __vfs_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk_ratelimited("%sVFS (%s): %pV\n", prefix, sb->s_id, &vaf);
+ va_end(args);
+}
+
static void bdev_write_inode(struct block_device *bdev)
{
struct inode *inode = bdev->bd_inode;
@@ -489,7 +501,7 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
sector += get_start_sect(bdev);
if (sector % (PAGE_SIZE / 512))
return -EINVAL;
- avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn);
+ avail = ops->direct_access(bdev, sector, &dax->addr, &dax->pfn, size);
if (!avail)
return -ERANGE;
if (avail > 0 && avail & ~PAGE_MASK)
@@ -498,6 +510,75 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
}
EXPORT_SYMBOL_GPL(bdev_direct_access);
+/**
+ * bdev_dax_supported() - Check if the device supports dax for filesystem
+ * @sb: The superblock of the device
+ * @blocksize: The block size of the device
+ *
+ * This is a library function for filesystems to check if the block device
+ * can be mounted with dax option.
+ *
+ * Return: negative errno if unsupported, 0 if supported.
+ */
+int bdev_dax_supported(struct super_block *sb, int blocksize)
+{
+ struct blk_dax_ctl dax = {
+ .sector = 0,
+ .size = PAGE_SIZE,
+ };
+ int err;
+
+ if (blocksize != PAGE_SIZE) {
+ vfs_msg(sb, KERN_ERR, "error: unsupported blocksize for dax");
+ return -EINVAL;
+ }
+
+ err = bdev_direct_access(sb->s_bdev, &dax);
+ if (err < 0) {
+ switch (err) {
+ case -EOPNOTSUPP:
+ vfs_msg(sb, KERN_ERR,
+ "error: device does not support dax");
+ break;
+ case -EINVAL:
+ vfs_msg(sb, KERN_ERR,
+ "error: unaligned partition for dax");
+ break;
+ default:
+ vfs_msg(sb, KERN_ERR,
+ "error: dax access failed (%d)", err);
+ }
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bdev_dax_supported);
+
+/**
+ * bdev_dax_capable() - Return if the raw device is capable for dax
+ * @bdev: The device for raw block device access
+ */
+bool bdev_dax_capable(struct block_device *bdev)
+{
+ struct blk_dax_ctl dax = {
+ .size = PAGE_SIZE,
+ };
+
+ if (!IS_ENABLED(CONFIG_FS_DAX))
+ return false;
+
+ dax.sector = 0;
+ if (bdev_direct_access(bdev, &dax) < 0)
+ return false;
+
+ dax.sector = bdev->bd_part->nr_sects - (PAGE_SIZE / 512);
+ if (bdev_direct_access(bdev, &dax) < 0)
+ return false;
+
+ return true;
+}
+
/*
* pseudo-fs
*/
@@ -1160,33 +1241,6 @@ void bd_set_size(struct block_device *bdev, loff_t size)
}
EXPORT_SYMBOL(bd_set_size);
-static bool blkdev_dax_capable(struct block_device *bdev)
-{
- struct gendisk *disk = bdev->bd_disk;
-
- if (!disk->fops->direct_access || !IS_ENABLED(CONFIG_FS_DAX))
- return false;
-
- /*
- * If the partition is not aligned on a page boundary, we can't
- * do dax I/O to it.
- */
- if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512))
- || (bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
- return false;
-
- /*
- * If the device has known bad blocks, force all I/O through the
- * driver / page cache.
- *
- * TODO: support finer grained dax error handling
- */
- if (disk->bb && disk->bb->count)
- return false;
-
- return true;
-}
-
static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
/*
@@ -1266,7 +1320,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
if (!ret) {
bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
- if (!blkdev_dax_capable(bdev))
+ if (!bdev_dax_capable(bdev))
bdev->bd_inode->i_flags &= ~S_DAX;
}
@@ -1303,7 +1357,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
goto out_clear;
}
bd_set_size(bdev, (loff_t)bdev->bd_part->nr_sects << 9);
- if (!blkdev_dax_capable(bdev))
+ if (!bdev_dax_capable(bdev))
bdev->bd_inode->i_flags &= ~S_DAX;
}
} else {
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index d3090187fd76..8bb3509099e8 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1939,7 +1939,7 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
* from ipath->fspath->val[i].
* when it returns, there are ipath->fspath->elem_cnt number of paths available
* in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
- * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
+ * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
* it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
* have been needed to return all paths.
*/
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 1da5753d886d..4919aedb5fc1 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -313,7 +313,7 @@ struct btrfs_dio_private {
struct bio *dio_bio;
/*
- * The original bio may be splited to several sub-bios, this is
+ * The original bio may be split to several sub-bios, this is
* done during endio of sub-bios
*/
int (*subio_endio)(struct inode *, struct btrfs_io_bio *, int);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 516e19d1d202..b677a6ea6001 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1939,7 +1939,7 @@ again:
/*
* Clear all references of this block. Do not free
* the block itself even if is not referenced anymore
- * because it still carries valueable information
+ * because it still carries valuable information
* like whether it was ever written and IO completed.
*/
list_for_each_entry_safe(l, tmp, &block->ref_to_list,
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index decd0a3f5d61..427c36b430a6 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -156,7 +156,7 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
/*
* RCU really hurts here, we could free up the root node because
- * it was cow'ed but we may not get the new root node yet so do
+ * it was COWed but we may not get the new root node yet so do
* the inc_not_zero dance and if it doesn't work then
* synchronize_rcu and try again.
*/
@@ -955,7 +955,7 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
struct extent_buffer *buf)
{
/*
- * Tree blocks not in refernece counted trees and tree roots
+ * Tree blocks not in reference counted trees and tree roots
* are never shared. If a block was allocated after the last
* snapshot and the block was not allocated by tree relocation,
* we know the block is not shared.
@@ -1270,7 +1270,7 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
/*
* tm is a pointer to the first operation to rewind within eb. then, all
- * previous operations will be rewinded (until we reach something older than
+ * previous operations will be rewound (until we reach something older than
* time_seq).
*/
static void
@@ -1345,7 +1345,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
}
/*
- * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
+ * Called with eb read locked. If the buffer cannot be rewound, the same buffer
* is returned. If rewind operations happen, a fresh buffer is returned. The
* returned buffer is always read-locked. If the returned buffer is not the
* input buffer, the lock on the input buffer is released and the input buffer
@@ -1516,7 +1516,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
* 3) the root is not forced COW.
*
* What is forced COW:
- * when we create snapshot during commiting the transaction,
+ * when we create snapshot during committing the transaction,
* after we've finished coping src root, we must COW the shared
* block to ensure the metadata consistency.
*/
@@ -1531,7 +1531,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
/*
* cows a single block, see __btrfs_cow_block for the real work.
- * This version of it has extra checks so that a block isn't cow'd more than
+ * This version of it has extra checks so that a block isn't COWed more than
* once per transaction, as long as it hasn't been written yet
*/
noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
@@ -2986,7 +2986,7 @@ again:
btrfs_unlock_up_safe(p, level + 1);
/*
- * Since we can unwind eb's we want to do a real search every
+ * Since we can unwind ebs we want to do a real search every
* time.
*/
prev_cmp = -1;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index ddcc58f03c79..101c3cfd3f7c 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -89,7 +89,7 @@ static const int btrfs_csum_sizes[] = { 4 };
/* four bytes for CRC32 */
#define BTRFS_EMPTY_DIR_SIZE 0
-/* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */
+/* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */
#define REQ_GET_READ_MIRRORS (1 << 30)
/* ioprio of readahead is set to idle */
@@ -431,7 +431,7 @@ struct btrfs_space_info {
* bytes_pinned does not reflect the bytes that will be pinned once the
* delayed refs are flushed, so this counter is inc'ed every time we
* call btrfs_free_extent so it is a realtime count of what will be
- * freed once the transaction is committed. It will be zero'ed every
+ * freed once the transaction is committed. It will be zeroed every
* time the transaction commits.
*/
struct percpu_counter total_bytes_pinned;
@@ -1401,7 +1401,7 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
token->kaddr = NULL;
}
-/* some macros to generate set/get funcs for the struct fields. This
+/* some macros to generate set/get functions for the struct fields. This
* assumes there is a lefoo_to_cpu for every type, so lets make a simple
* one for u8:
*/
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index c24b653c7343..5fca9534a271 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -188,7 +188,7 @@ struct btrfs_delayed_ref_root {
/*
* To make qgroup to skip given root.
- * This is for snapshot, as btrfs_qgroup_inherit() will manully
+ * This is for snapshot, as btrfs_qgroup_inherit() will manually
* modify counters for snapshot and its source, so we should skip
* the snapshot in new_root/old_roots or it will get calculated twice
*/
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 85f12e6e28d2..63ef9cdf0144 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -450,7 +450,7 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
}
/*
- * blocked until all flighting bios are finished.
+ * blocked until all in-flight bios operations are finished.
*/
static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
{
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 91d123938cef..6628fca9f4ed 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -384,7 +384,7 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
/*
* Things reading via commit roots that don't have normal protection,
* like send, can have a really old block in cache that may point at a
- * block that has been free'd and re-allocated. So don't clear uptodate
+ * block that has been freed and re-allocated. So don't clear uptodate
* if we find an eb that is under IO (dirty/writeback) because we could
* end up reading in the stale data and then writing it back out and
* making everybody very sad.
@@ -418,7 +418,7 @@ static int btrfs_check_super_csum(char *raw_disk_sb)
/*
* The super_block structure does not span the whole
* BTRFS_SUPER_INFO_SIZE range, we expect that the unused space
- * is filled with zeros and is included in the checkum.
+ * is filled with zeros and is included in the checksum.
*/
crc = btrfs_csum_data(raw_disk_sb + BTRFS_CSUM_SIZE,
crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
@@ -600,7 +600,7 @@ static noinline int check_leaf(struct btrfs_root *root,
/*
* Check to make sure that we don't point outside of the leaf,
- * just incase all the items are consistent to eachother, but
+ * just in case all the items are consistent to each other, but
* all point outside of the leaf.
*/
if (btrfs_item_end_nr(leaf, slot) >
@@ -3022,7 +3022,7 @@ retry_root_backup:
}
/*
- * Mount does not set all options immediatelly, we can do it now and do
+ * Mount does not set all options immediately, we can do it now and do
* not have to wait for transaction commit
*/
btrfs_apply_pending_changes(fs_info);
@@ -3255,7 +3255,7 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
btrfs_warn_rl_in_rcu(device->dev_root->fs_info,
"lost page write due to IO error on %s",
rcu_str_deref(device->name));
- /* note, we dont' set_buffer_write_io_error because we have
+ /* note, we don't set_buffer_write_io_error because we have
* our own ways of dealing with the IO errors
*/
clear_buffer_uptodate(bh);
@@ -4367,7 +4367,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
if (ret)
break;
- clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
+ clear_extent_bits(dirty_pages, start, end, mark);
while (start <= end) {
eb = btrfs_find_tree_block(root->fs_info, start);
start += root->nodesize;
@@ -4402,7 +4402,7 @@ again:
if (ret)
break;
- clear_extent_dirty(unpin, start, end, GFP_NOFS);
+ clear_extent_dirty(unpin, start, end);
btrfs_error_unpin_extent_range(root, start, end);
cond_resched();
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9424864fd01a..a400951e8678 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -231,9 +231,9 @@ static int add_excluded_extent(struct btrfs_root *root,
{
u64 end = start + num_bytes - 1;
set_extent_bits(&root->fs_info->freed_extents[0],
- start, end, EXTENT_UPTODATE, GFP_NOFS);
+ start, end, EXTENT_UPTODATE);
set_extent_bits(&root->fs_info->freed_extents[1],
- start, end, EXTENT_UPTODATE, GFP_NOFS);
+ start, end, EXTENT_UPTODATE);
return 0;
}
@@ -246,9 +246,9 @@ static void free_excluded_extents(struct btrfs_root *root,
end = start + cache->key.offset - 1;
clear_extent_bits(&root->fs_info->freed_extents[0],
- start, end, EXTENT_UPTODATE, GFP_NOFS);
+ start, end, EXTENT_UPTODATE);
clear_extent_bits(&root->fs_info->freed_extents[1],
- start, end, EXTENT_UPTODATE, GFP_NOFS);
+ start, end, EXTENT_UPTODATE);
}
static int exclude_super_stripes(struct btrfs_root *root,
@@ -980,7 +980,7 @@ out_free:
* event that tree block loses its owner tree's reference and do the
* back refs conversion.
*
- * When a tree block is COW'd through a tree, there are four cases:
+ * When a tree block is COWed through a tree, there are four cases:
*
* The reference count of the block is one and the tree is the block's
* owner tree. Nothing to do in this case.
@@ -2595,7 +2595,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
}
/*
- * Need to drop our head ref lock and re-aqcuire the
+ * Need to drop our head ref lock and re-acquire the
* delayed ref lock and then re-check to make sure
* nobody got added.
*/
@@ -2747,7 +2747,7 @@ static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
/*
* We don't ever fill up leaves all the way so multiply by 2 just to be
- * closer to what we're really going to want to ouse.
+ * closer to what we're really going to want to use.
*/
return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
}
@@ -2851,7 +2851,7 @@ static void delayed_ref_async_start(struct btrfs_work *work)
}
/*
- * trans->sync means that when we call end_transaciton, we won't
+ * trans->sync means that when we call end_transaction, we won't
* wait on delayed refs
*/
trans->sync = true;
@@ -4296,7 +4296,7 @@ void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
* Called if we need to clear a data reservation for this inode
* Normally in a error case.
*
- * This one will handle the per-indoe data rsv map for accurate reserved
+ * This one will handle the per-inode data rsv map for accurate reserved
* space framework.
*/
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
@@ -4967,7 +4967,7 @@ void btrfs_init_async_reclaim_work(struct work_struct *work)
* @orig_bytes - the number of bytes we want
* @flush - whether or not we can flush to make our reservation
*
- * This will reserve orgi_bytes number of bytes from the space info associated
+ * This will reserve orig_bytes number of bytes from the space info associated
* with the block_rsv. If there is not enough space it will make an attempt to
* flush out space to make room. It will do this by flushing delalloc if
* possible or committing the transaction. If flush is 0 then no attempts to
@@ -5572,7 +5572,7 @@ void btrfs_orphan_release_metadata(struct inode *inode)
* common file/directory operations, they change two fs/file trees
* and root tree, the number of items that the qgroup reserves is
* different with the free space reservation. So we can not use
- * the space reseravtion mechanism in start_transaction().
+ * the space reservation mechanism in start_transaction().
*/
int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
@@ -5621,7 +5621,7 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
/**
* drop_outstanding_extent - drop an outstanding extent
* @inode: the inode we're dropping the extent for
- * @num_bytes: the number of bytes we're relaseing.
+ * @num_bytes: the number of bytes we're releasing.
*
* This is called when we are freeing up an outstanding extent, either called
* after an error or after an extent is written. This will return the number of
@@ -5647,7 +5647,7 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
drop_inode_space = 1;
/*
- * If we have more or the same amount of outsanding extents than we have
+ * If we have more or the same amount of outstanding extents than we have
* reserved then we need to leave the reserved extents count alone.
*/
if (BTRFS_I(inode)->outstanding_extents >=
@@ -5661,8 +5661,8 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
}
/**
- * calc_csum_metadata_size - return the amount of metada space that must be
- * reserved/free'd for the given bytes.
+ * calc_csum_metadata_size - return the amount of metadata space that must be
+ * reserved/freed for the given bytes.
* @inode: the inode we're manipulating
* @num_bytes: the number of bytes in question
* @reserve: 1 if we are reserving space, 0 if we are freeing space
@@ -5814,7 +5814,7 @@ out_fail:
/*
* This is tricky, but first we need to figure out how much we
- * free'd from any free-ers that occurred during this
+ * freed from any free-ers that occurred during this
* reservation, so we reset ->csum_bytes to the csum_bytes
* before we dropped our lock, and then call the free for the
* number of bytes that were freed while we were trying our
@@ -5836,7 +5836,7 @@ out_fail:
/*
* Now reset ->csum_bytes to what it should be. If bytes is
- * more than to_free then we would have free'd more space had we
+ * more than to_free then we would have freed more space had we
* not had an artificially high ->csum_bytes, so we need to free
* the remainder. If bytes is the same or less then we don't
* need to do anything, the other free-ers did the correct
@@ -6515,7 +6515,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
ret = btrfs_discard_extent(root, start,
end + 1 - start, NULL);
- clear_extent_dirty(unpin, start, end, GFP_NOFS);
+ clear_extent_dirty(unpin, start, end);
unpin_extent_range(root, start, end, true);
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
@@ -7578,7 +7578,7 @@ loop:
if (loop == LOOP_CACHING_NOWAIT) {
/*
* We want to skip the LOOP_CACHING_WAIT step if we
- * don't have any unached bgs and we've alrelady done a
+ * don't have any uncached bgs and we've already done a
* full search through.
*/
if (orig_have_caching_bg || !full_search)
@@ -7982,7 +7982,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
/*
* Mixed block groups will exclude before processing the log so we only
- * need to do the exlude dance if this fs isn't mixed.
+ * need to do the exclude dance if this fs isn't mixed.
*/
if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
@@ -8032,7 +8032,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
buf->start + buf->len - 1, GFP_NOFS);
else
set_extent_new(&root->dirty_log_pages, buf->start,
- buf->start + buf->len - 1, GFP_NOFS);
+ buf->start + buf->len - 1);
} else {
buf->log_index = -1;
set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
@@ -9426,7 +9426,7 @@ u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
u64 free_bytes = 0;
int factor;
- /* It's df, we don't care if it's racey */
+ /* It's df, we don't care if it's racy */
if (list_empty(&sinfo->ro_bgs))
return 0;
@@ -10635,14 +10635,14 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
*/
mutex_lock(&fs_info->unused_bg_unpin_mutex);
ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
- EXTENT_DIRTY, GFP_NOFS);
+ EXTENT_DIRTY);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
btrfs_dec_block_group_ro(root, block_group);
goto end_trans;
}
ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
- EXTENT_DIRTY, GFP_NOFS);
+ EXTENT_DIRTY);
if (ret) {
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
btrfs_dec_block_group_ro(root, block_group);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2f83448d34fe..3cd57825c75f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -726,14 +726,6 @@ next:
start = last_end + 1;
if (start <= end && state && !need_resched())
goto hit_next;
- goto search_again;
-
-out:
- spin_unlock(&tree->lock);
- if (prealloc)
- free_extent_state(prealloc);
-
- return 0;
search_again:
if (start > end)
@@ -742,6 +734,14 @@ search_again:
if (gfpflags_allow_blocking(mask))
cond_resched();
goto again;
+
+out:
+ spin_unlock(&tree->lock);
+ if (prealloc)
+ free_extent_state(prealloc);
+
+ return 0;
+
}
static void wait_on_state(struct extent_io_tree *tree,
@@ -873,8 +873,14 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
bits |= EXTENT_FIRST_DELALLOC;
again:
if (!prealloc && gfpflags_allow_blocking(mask)) {
+ /*
+ * Don't care for allocation failure here because we might end
+ * up not needing the pre-allocated extent state at all, which
+ * is the case if we only have in the tree extent states that
+ * cover our input range and don't cover too any other range.
+ * If we end up needing a new extent state we allocate it later.
+ */
prealloc = alloc_extent_state(mask);
- BUG_ON(!prealloc);
}
spin_lock(&tree->lock);
@@ -1037,7 +1043,13 @@ hit_next:
goto out;
}
- goto search_again;
+search_again:
+ if (start > end)
+ goto out;
+ spin_unlock(&tree->lock);
+ if (gfpflags_allow_blocking(mask))
+ cond_resched();
+ goto again;
out:
spin_unlock(&tree->lock);
@@ -1046,13 +1058,6 @@ out:
return err;
-search_again:
- if (start > end)
- goto out;
- spin_unlock(&tree->lock);
- if (gfpflags_allow_blocking(mask))
- cond_resched();
- goto again;
}
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
@@ -1073,17 +1078,18 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* @bits: the bits to set in this range
* @clear_bits: the bits to clear in this range
* @cached_state: state that we're going to cache
- * @mask: the allocation mask
*
* This will go through and set bits for the given range. If any states exist
* already in this range they are set with the given bit and cleared of the
* clear_bits. This is only meant to be used by things that are mergeable, ie
* converting from say DELALLOC to DIRTY. This is not meant to be used with
* boundary bits like LOCK.
+ *
+ * All allocations are done with GFP_NOFS.
*/
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, unsigned clear_bits,
- struct extent_state **cached_state, gfp_t mask)
+ struct extent_state **cached_state)
{
struct extent_state *state;
struct extent_state *prealloc = NULL;
@@ -1098,7 +1104,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
btrfs_debug_check_extent_io_range(tree, start, end);
again:
- if (!prealloc && gfpflags_allow_blocking(mask)) {
+ if (!prealloc) {
/*
* Best effort, don't worry if extent state allocation fails
* here for the first iteration. We might have a cached state
@@ -1106,7 +1112,7 @@ again:
* extent state allocations are needed. We'll only know this
* after locking the tree.
*/
- prealloc = alloc_extent_state(mask);
+ prealloc = alloc_extent_state(GFP_NOFS);
if (!prealloc && !first_iteration)
return -ENOMEM;
}
@@ -1263,7 +1269,13 @@ hit_next:
goto out;
}
- goto search_again;
+search_again:
+ if (start > end)
+ goto out;
+ spin_unlock(&tree->lock);
+ cond_resched();
+ first_iteration = false;
+ goto again;
out:
spin_unlock(&tree->lock);
@@ -1271,21 +1283,11 @@ out:
free_extent_state(prealloc);
return err;
-
-search_again:
- if (start > end)
- goto out;
- spin_unlock(&tree->lock);
- if (gfpflags_allow_blocking(mask))
- cond_resched();
- first_iteration = false;
- goto again;
}
/* wrappers around set/clear extent bit */
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, gfp_t mask,
- struct extent_changeset *changeset)
+ unsigned bits, struct extent_changeset *changeset)
{
/*
* We don't support EXTENT_LOCKED yet, as current changeset will
@@ -1295,7 +1297,7 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
*/
BUG_ON(bits & EXTENT_LOCKED);
- return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, mask,
+ return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS,
changeset);
}
@@ -1308,8 +1310,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
}
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, gfp_t mask,
- struct extent_changeset *changeset)
+ unsigned bits, struct extent_changeset *changeset)
{
/*
* Don't support EXTENT_LOCKED case, same reason as
@@ -1317,7 +1318,7 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
*/
BUG_ON(bits & EXTENT_LOCKED);
- return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask,
+ return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS,
changeset);
}
@@ -1975,13 +1976,13 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec)
set_state_failrec(failure_tree, rec->start, NULL);
ret = clear_extent_bits(failure_tree, rec->start,
rec->start + rec->len - 1,
- EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+ EXTENT_LOCKED | EXTENT_DIRTY);
if (ret)
err = ret;
ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
rec->start + rec->len - 1,
- EXTENT_DAMAGED, GFP_NOFS);
+ EXTENT_DAMAGED);
if (ret && !err)
err = ret;
@@ -2232,13 +2233,12 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
/* set the bits in the private failure tree */
ret = set_extent_bits(failure_tree, start, end,
- EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+ EXTENT_LOCKED | EXTENT_DIRTY);
if (ret >= 0)
ret = set_state_failrec(failure_tree, start, failrec);
/* set the bits in the inode's tree */
if (ret >= 0)
- ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
- GFP_NOFS);
+ ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED);
if (ret < 0) {
kfree(failrec);
return ret;
@@ -4389,8 +4389,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (ret < 0) {
btrfs_free_path(path);
return ret;
+ } else {
+ WARN_ON(!ret);
+ if (ret == 1)
+ ret = 0;
}
- WARN_ON(!ret);
+
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
found_type = found_key.type;
@@ -4601,7 +4605,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
if (mapped)
spin_unlock(&page->mapping->private_lock);
- /* One for when we alloced the page */
+ /* One for when we allocated the page */
put_page(page);
} while (index != 0);
}
@@ -5761,7 +5765,7 @@ int try_release_extent_buffer(struct page *page)
struct extent_buffer *eb;
/*
- * We need to make sure noboody is attaching this page to an eb right
+ * We need to make sure nobody is attaching this page to an eb right
* now.
*/
spin_lock(&page->mapping->private_lock);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 981f402bf754..1baf19c9b79d 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -220,8 +220,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int filled,
struct extent_state *cached_state);
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, gfp_t mask,
- struct extent_changeset *changeset);
+ unsigned bits, struct extent_changeset *changeset);
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int wake, int delete,
struct extent_state **cached, gfp_t mask);
@@ -240,27 +239,27 @@ static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
}
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned bits, gfp_t mask)
+ u64 end, unsigned bits)
{
int wake = 0;
if (bits & EXTENT_LOCKED)
wake = 1;
- return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask);
+ return clear_extent_bit(tree, start, end, bits, wake, 0, NULL,
+ GFP_NOFS);
}
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- unsigned bits, gfp_t mask,
- struct extent_changeset *changeset);
+ unsigned bits, struct extent_changeset *changeset);
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, u64 *failed_start,
struct extent_state **cached_state, gfp_t mask);
static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
- u64 end, unsigned bits, gfp_t mask)
+ u64 end, unsigned bits)
{
- return set_extent_bit(tree, start, end, bits, NULL, NULL, mask);
+ return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
}
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
@@ -278,37 +277,38 @@ static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
}
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
- u64 end, gfp_t mask)
+ u64 end)
{
return clear_extent_bit(tree, start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
+ EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
}
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, unsigned clear_bits,
- struct extent_state **cached_state, gfp_t mask);
+ struct extent_state **cached_state);
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state, gfp_t mask)
+ u64 end, struct extent_state **cached_state)
{
return set_extent_bit(tree, start, end,
EXTENT_DELALLOC | EXTENT_UPTODATE,
- NULL, cached_state, mask);
+ NULL, cached_state, GFP_NOFS);
}
static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state, gfp_t mask)
+ u64 end, struct extent_state **cached_state)
{
return set_extent_bit(tree, start, end,
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
- NULL, cached_state, mask);
+ NULL, cached_state, GFP_NOFS);
}
static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
- u64 end, gfp_t mask)
+ u64 end)
{
- return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL, mask);
+ return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
+ GFP_NOFS);
}
static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 318b048eb254..e0715fcfb11e 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -62,7 +62,7 @@ struct extent_map *alloc_extent_map(void)
/**
* free_extent_map - drop reference count of an extent_map
- * @em: extent map being releasead
+ * @em: extent map being released
*
* Drops the reference out on @em by one and free the structure
* if the reference count hits zero.
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 7a7d6e253cfc..62a81ee13a5f 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -248,7 +248,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
BTRFS_DATA_RELOC_TREE_OBJECTID) {
set_extent_bits(io_tree, offset,
offset + root->sectorsize - 1,
- EXTENT_NODATASUM, GFP_NOFS);
+ EXTENT_NODATASUM);
} else {
btrfs_info(BTRFS_I(inode)->root->fs_info,
"no csum found for inode %llu start %llu",
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c98805c35bab..e0c9bd3fb02d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1596,6 +1596,13 @@ again:
copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
+ num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+ reserve_bytes);
+ dirty_sectors = round_up(copied + sector_offset,
+ root->sectorsize);
+ dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+ dirty_sectors);
+
/*
* if we have trouble faulting in the pages, fall
* back to one page at a time
@@ -1605,6 +1612,7 @@ again:
if (copied == 0) {
force_page_uptodate = true;
+ dirty_sectors = 0;
dirty_pages = 0;
} else {
force_page_uptodate = false;
@@ -1615,20 +1623,19 @@ again:
/*
* If we had a short copy we need to release the excess delaloc
* bytes we reserved. We need to increment outstanding_extents
- * because btrfs_delalloc_release_space will decrement it, but
+ * because btrfs_delalloc_release_space and
+ * btrfs_delalloc_release_metadata will decrement it, but
* we still have an outstanding extent for the chunk we actually
* managed to copy.
*/
- num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- reserve_bytes);
- dirty_sectors = round_up(copied + sector_offset,
- root->sectorsize);
- dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
- dirty_sectors);
-
if (num_sectors > dirty_sectors) {
- release_bytes = (write_bytes - copied)
- & ~((u64)root->sectorsize - 1);
+ /*
+ * we round down because we don't want to count
+ * any partial blocks actually sent through the
+ * IO machines
+ */
+ release_bytes = round_down(release_bytes - copied,
+ root->sectorsize);
if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
@@ -2022,7 +2029,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
BTRFS_I(inode)->last_trans
<= root->fs_info->last_trans_committed)) {
/*
- * We'v had everything committed since the last time we were
+ * We've had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever
* reason, it's no longer relevant.
*/
@@ -2370,7 +2377,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
/* Check the aligned pages after the first unaligned page,
* if offset != orig_start, which means the first unaligned page
- * including serveral following pages are already in holes,
+ * including several following pages are already in holes,
* the extra check can be skipped */
if (offset == orig_start) {
/* after truncate page, check hole again */
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 5e6062c26129..c6dc1183f542 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1983,7 +1983,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
/*
* If this block group has some small extents we don't want to
* use up all of our free slots in the cache with them, we want
- * to reserve them to larger extents, however if we have plent
+ * to reserve them to larger extents, however if we have plenty
* of cache left then go ahead an dadd them, no sense in adding
* the overhead of a bitmap if we don't have to.
*/
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 33178c490ace..3af651c2bbc7 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -123,7 +123,7 @@ int btrfs_return_cluster_to_free_space(
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen);
-/* Support functions for runnint our sanity tests */
+/* Support functions for running our sanity tests */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
u64 offset, u64 bytes, bool bitmap);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 91419ef79b00..270499598ed4 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -455,7 +455,7 @@ again:
/*
* skip compression for a small file range(<=blocksize) that
- * isn't an inline extent, since it dosen't save disk space at all.
+ * isn't an inline extent, since it doesn't save disk space at all.
*/
if (total_compressed <= blocksize &&
(start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
@@ -1978,7 +1978,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
{
WARN_ON((end & (PAGE_SIZE - 1)) == 0);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
- cached_state, GFP_NOFS);
+ cached_state);
}
/* see btrfs_writepage_start_hook for details on why this is required */
@@ -3119,8 +3119,7 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
- clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
- GFP_NOFS);
+ clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM);
return 0;
}
@@ -3722,7 +3721,7 @@ cache_index:
* and doesn't have an inode ref with the name "bar" anymore.
*
* Setting last_unlink_trans to last_trans is a pessimistic approach,
- * but it guarantees correctness at the expense of ocassional full
+ * but it guarantees correctness at the expense of occasional full
* transaction commits on fsync if our inode is a directory, or if our
* inode is not a directory, logging its parent unnecessarily.
*/
@@ -4978,7 +4977,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
* be instantly completed which will give us extents that need
* to be truncated. If we fail to get an orphan inode down we
* could have left over extents that were never meant to live,
- * so we need to garuntee from this point on that everything
+ * so we need to guarantee from this point on that everything
* will be consistent.
*/
ret = btrfs_orphan_add(trans, inode);
@@ -5248,7 +5247,7 @@ void btrfs_evict_inode(struct inode *inode)
}
/*
- * We can't just steal from the global reserve, we need tomake
+ * We can't just steal from the global reserve, we need to make
* sure there is room to do it, if not we need to commit and try
* again.
*/
@@ -7433,7 +7432,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
cached_state);
/*
* We're concerned with the entire range that we're going to be
- * doing DIO to, so we need to make sure theres no ordered
+ * doing DIO to, so we need to make sure there's no ordered
* extents in this range.
*/
ordered = btrfs_lookup_ordered_range(inode, lockstart,
@@ -7595,7 +7594,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (current->journal_info) {
/*
* Need to pull our outstanding extents and set journal_info to NULL so
- * that anything that needs to check if there's a transction doesn't get
+ * that anything that needs to check if there's a transaction doesn't get
* confused.
*/
dio_data = current->journal_info;
@@ -7628,7 +7627,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
* decompress it, so there will be buffering required no matter what we
* do, so go ahead and fallback to buffered.
*
- * We return -ENOTBLK because thats what makes DIO go ahead and go back
+ * We return -ENOTBLK because that's what makes DIO go ahead and go back
* to buffered IO. Don't blame me, this is the price we pay for using
* the generic code.
*/
@@ -9041,7 +9040,7 @@ static int btrfs_truncate(struct inode *inode)
return ret;
/*
- * Yes ladies and gentelment, this is indeed ugly. The fact is we have
+ * Yes ladies and gentlemen, this is indeed ugly. The fact is we have
* 3 things going on here
*
* 1) We need to reserve space for our orphan item and the space to
@@ -9055,15 +9054,15 @@ static int btrfs_truncate(struct inode *inode)
* space reserved in case it uses space during the truncate (thank you
* very much snapshotting).
*
- * And we need these to all be seperate. The fact is we can use alot of
+ * And we need these to all be separate. The fact is we can use a lot of
* space doing the truncate, and we have no earthly idea how much space
- * we will use, so we need the truncate reservation to be seperate so it
+ * we will use, so we need the truncate reservation to be separate so it
* doesn't end up using space reserved for updating the inode or
* removing the orphan item. We also need to be able to stop the
* transaction and start a new one, which means we need to be able to
* update the inode several times, and we have no idea of knowing how
* many times that will be, so we can't just reserve 1 item for the
- * entirety of the opration, so that has to be done seperately as well.
+ * entirety of the operation, so that has to be done separately as well.
* Then there is the orphan item, which does indeed need to be held on
* to for the whole operation, and we need nobody to touch this reserved
* space except the orphan code.
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4e700694b741..05173563e4a6 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -296,7 +296,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
}
} else {
/*
- * Revert back under same assuptions as above
+ * Revert back under same assumptions as above
*/
if (S_ISREG(mode)) {
if (inode->i_size == 0)
@@ -465,7 +465,7 @@ static noinline int create_subvol(struct inode *dir,
/*
* Don't create subvolume whose level is not zero. Or qgroup will be
- * screwed up since it assume subvolme qgroup's level to be 0.
+ * screwed up since it assumes subvolume qgroup's level to be 0.
*/
if (btrfs_qgroup_level(objectid)) {
ret = -ENOSPC;
@@ -780,7 +780,7 @@ free_pending:
* a. be owner of dir, or
* b. be owner of victim, or
* c. have CAP_FOWNER capability
- * 6. If the victim is append-only or immutable we can't do antyhing with
+ * 6. If the victim is append-only or immutable we can't do anything with
* links pointing to it.
* 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
* 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
@@ -846,11 +846,9 @@ static noinline int btrfs_mksubvol(struct path *parent,
struct dentry *dentry;
int error;
- inode_lock_nested(dir, I_MUTEX_PARENT);
- // XXX: should've been
- // mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
- // if (error == -EINTR)
- // return error;
+ error = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
+ if (error == -EINTR)
+ return error;
dentry = lookup_one_len(name, parent->dentry, namelen);
error = PTR_ERR(dentry);
@@ -1239,7 +1237,7 @@ again:
set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
- &cached_state, GFP_NOFS);
+ &cached_state);
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, &cached_state,
@@ -2377,11 +2375,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
goto out;
- inode_lock_nested(dir, I_MUTEX_PARENT);
- // XXX: should've been
- // err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
- // if (err == -EINTR)
- // goto out_drop_write;
+ err = down_write_killable_nested(&dir->i_rwsem, I_MUTEX_PARENT);
+ if (err == -EINTR)
+ goto out_drop_write;
dentry = lookup_one_len(vol_args->name, parent, namelen);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
@@ -2571,7 +2567,7 @@ out_dput:
dput(dentry);
out_unlock_dir:
inode_unlock(dir);
-//out_drop_write:
+out_drop_write:
mnt_drop_write_file(file);
out:
kfree(vol_args);
@@ -4654,7 +4650,7 @@ again:
}
/*
- * mut. excl. ops lock is locked. Three possibilites:
+ * mut. excl. ops lock is locked. Three possibilities:
* (1) some other op is running
* (2) balance is running
* (3) balance is paused -- special case (think resume)
@@ -5571,7 +5567,7 @@ long btrfs_ioctl(struct file *file, unsigned int
ret = btrfs_sync_fs(file_inode(file)->i_sb, 1);
/*
* The transaction thread may want to do more work,
- * namely it pokes the cleaner ktread that will start
+ * namely it pokes the cleaner kthread that will start
* processing uncleaned subvols.
*/
wake_up_process(root->fs_info->transaction_kthread);
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 8ef12623d65c..2049c9be85ee 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -58,7 +58,7 @@ struct btrfs_ordered_sum {
#define BTRFS_ORDERED_COMPRESSED 3 /* writing a zlib compressed extent */
-#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to prealloced extent */
+#define BTRFS_ORDERED_PREALLOC 4 /* set when writing to preallocated extent */
#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9e119552ed32..9d4c05b14f6e 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -85,7 +85,7 @@ struct btrfs_qgroup {
/*
* temp variables for accounting operations
- * Refer to qgroup_shared_accouting() for details.
+ * Refer to qgroup_shared_accounting() for details.
*/
u64 old_refcnt;
u64 new_refcnt;
@@ -499,7 +499,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
}
/*
* we call btrfs_free_qgroup_config() when umounting
- * filesystem and disabling quota, so we set qgroup_ulit
+ * filesystem and disabling quota, so we set qgroup_ulist
* to be null here to avoid double free.
*/
ulist_free(fs_info->qgroup_ulist);
@@ -1036,7 +1036,7 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
/*
* The easy accounting, if we are adding/removing the only ref for an extent
- * then this qgroup and all of the parent qgroups get their refrence and
+ * then this qgroup and all of the parent qgroups get their reference and
* exclusive counts adjusted.
*
* Caller should hold fs_info->qgroup_lock.
@@ -1436,7 +1436,7 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
/*
* No need to do lock, since this function will only be called in
- * btrfs_commmit_transaction().
+ * btrfs_commit_transaction().
*/
node = rb_first(&delayed_refs->dirty_extent_root);
while (node) {
@@ -1557,7 +1557,7 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
* A: cur_old_roots < nr_old_roots (not exclusive before)
* !A: cur_old_roots == nr_old_roots (possible exclusive before)
* B: cur_new_roots < nr_new_roots (not exclusive now)
- * !B: cur_new_roots == nr_new_roots (possible exclsuive now)
+ * !B: cur_new_roots == nr_new_roots (possible exclusive now)
*
* Results:
* +: Possible sharing -> exclusive -: Possible exclusive -> sharing
@@ -1851,7 +1851,7 @@ out:
}
/*
- * Copy the acounting information between qgroups. This is necessary
+ * Copy the accounting information between qgroups. This is necessary
* when a snapshot or a subvolume is created. Throwing an error will
* cause a transaction abort so we take extra care here to only error
* when a readonly fs is a reasonable outcome.
@@ -2340,7 +2340,7 @@ out:
mutex_unlock(&fs_info->qgroup_rescan_lock);
/*
- * only update status, since the previous part has alreay updated the
+ * only update status, since the previous part has already updated the
* qgroup info.
*/
trans = btrfs_start_transaction(fs_info->quota_root, 1);
@@ -2542,8 +2542,7 @@ int btrfs_qgroup_reserve_data(struct inode *inode, u64 start, u64 len)
changeset.bytes_changed = 0;
changeset.range_changed = ulist_alloc(GFP_NOFS);
ret = set_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
- start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS,
- &changeset);
+ start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
trace_btrfs_qgroup_reserve_data(inode, start, len,
changeset.bytes_changed,
QGROUP_RESERVE);
@@ -2580,8 +2579,7 @@ static int __btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len,
return -ENOMEM;
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, start,
- start + len -1, EXTENT_QGROUP_RESERVED, GFP_NOFS,
- &changeset);
+ start + len -1, EXTENT_QGROUP_RESERVED, &changeset);
if (ret < 0)
goto out;
@@ -2672,7 +2670,7 @@ void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
}
/*
- * Check qgroup reserved space leaking, normally at destory inode
+ * Check qgroup reserved space leaking, normally at destroy inode
* time
*/
void btrfs_qgroup_check_reserved_leak(struct inode *inode)
@@ -2688,7 +2686,7 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
return;
ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
- EXTENT_QGROUP_RESERVED, GFP_NOFS, &changeset);
+ EXTENT_QGROUP_RESERVED, &changeset);
WARN_ON(ret < 0);
if (WARN_ON(changeset.bytes_changed)) {
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0b7792e02dd5..f8b6d411a034 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -576,7 +576,7 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
* we can't merge with cached rbios, since the
* idea is that when we merge the destination
* rbio is going to run our IO for us. We can
- * steal from cached rbio's though, other functions
+ * steal from cached rbios though, other functions
* handle that.
*/
if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
@@ -2368,7 +2368,7 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
}
- /* Check scrubbing pairty and repair it */
+ /* Check scrubbing parity and repair it */
p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
parity = kmap(p);
if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
@@ -2493,7 +2493,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
/*
* Here means we got one corrupted data stripe and one
* corrupted parity on RAID6, if the corrupted parity
- * is scrubbing parity, luckly, use the other one to repair
+ * is scrubbing parity, luckily, use the other one to repair
* the data, or we can not repair the data stripe.
*/
if (failp != rbio->scrubp)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 1cfd35cfac76..0477dca154ed 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -668,8 +668,8 @@ int find_inline_backref(struct extent_buffer *leaf, int slot,
* roots of b-trees that reference the tree block.
*
* the basic idea of this function is check backrefs of a given block
- * to find upper level blocks that refernece the block, and then check
- * bakcrefs of these upper level blocks recursively. the recursion stop
+ * to find upper level blocks that reference the block, and then check
+ * backrefs of these upper level blocks recursively. the recursion stop
* when tree root is reached or backrefs for the block is cached.
*
* NOTE: if we find backrefs for a block are cached, we know backrefs
@@ -1160,7 +1160,7 @@ out:
if (!RB_EMPTY_NODE(&upper->rb_node))
continue;
- /* Add this guy's upper edges to the list to proces */
+ /* Add this guy's upper edges to the list to process */
list_for_each_entry(edge, &upper->upper, list[LOWER])
list_add_tail(&edge->list[UPPER], &list);
if (list_empty(&upper->upper))
@@ -2396,7 +2396,7 @@ again:
}
/*
- * we keep the old last snapshod transid in rtranid when we
+ * we keep the old last snapshot transid in rtranid when we
* created the relocation tree.
*/
last_snap = btrfs_root_rtransid(&reloc_root->root_item);
@@ -2616,7 +2616,7 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
* only one thread can access block_rsv at this point,
* so we don't need hold lock to protect block_rsv.
* we expand more reservation size here to allow enough
- * space for relocation and we will return eailer in
+ * space for relocation and we will return earlier in
* enospc case.
*/
rc->block_rsv->size = tmp + rc->extent_root->nodesize *
@@ -2814,7 +2814,7 @@ static void mark_block_processed(struct reloc_control *rc,
u64 bytenr, u32 blocksize)
{
set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
- EXTENT_DIRTY, GFP_NOFS);
+ EXTENT_DIRTY);
}
static void __mark_block_processed(struct reloc_control *rc,
@@ -3182,7 +3182,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
page_start + offset == cluster->boundary[nr]) {
set_extent_bits(&BTRFS_I(inode)->io_tree,
page_start, page_end,
- EXTENT_BOUNDARY, GFP_NOFS);
+ EXTENT_BOUNDARY);
nr++;
}
@@ -4059,8 +4059,7 @@ restart:
}
btrfs_release_path(path);
- clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
- GFP_NOFS);
+ clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY);
if (trans) {
btrfs_end_transaction_throttle(trans, rc->extent_root);
@@ -4591,7 +4590,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
/*
* called before creating snapshot. it calculates metadata reservation
- * requried for relocating tree blocks in the snapshot
+ * required for relocating tree blocks in the snapshot
*/
void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
u64 *bytes_to_reserve)
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index b2b14e7115f1..f1c30861d062 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -71,9 +71,9 @@ static void btrfs_read_root_item(struct extent_buffer *eb, int slot,
* search_key: the key to search
* path: the path we search
* root_item: the root item of the tree we look for
- * root_key: the reak key of the tree we look for
+ * root_key: the root key of the tree we look for
*
- * If ->offset of 'seach_key' is -1ULL, it means we are not sure the offset
+ * If ->offset of 'search_key' is -1ULL, it means we are not sure the offset
* of the search key, just lookup the root with the highest offset for a
* given objectid.
*
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index fa35cdc46494..46d847f66e4b 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -745,7 +745,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
* sure we read the bad mirror.
*/
ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
- EXTENT_DAMAGED, GFP_NOFS);
+ EXTENT_DAMAGED);
if (ret) {
/* set_extent_bits should give proper error */
WARN_ON(ret > 0);
@@ -763,7 +763,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
end, EXTENT_DAMAGED, 0, NULL);
if (!corrected)
clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
- EXTENT_DAMAGED, GFP_NOFS);
+ EXTENT_DAMAGED);
}
out:
@@ -1044,7 +1044,7 @@ nodatasum_case:
/*
* !is_metadata and !have_csum, this means that the data
- * might not be COW'ed, that it might be modified
+ * might not be COWed, that it might be modified
* concurrently. The general strategy to work on the
* commit root does not help in the case when COW is not
* used.
@@ -1125,7 +1125,7 @@ nodatasum_case:
* the 2nd page of mirror #1 faces I/O errors, and the 2nd page
* of mirror #2 is readable but the final checksum test fails,
* then the 2nd page of mirror #3 could be tried, whether now
- * the final checksum succeedes. But this would be a rare
+ * the final checksum succeeds. But this would be a rare
* exception and is therefore not implemented. At least it is
* avoided that the good copy is overwritten.
* A more useful improvement would be to pick the sectors
@@ -2181,7 +2181,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
u64 length = sblock->page_count * PAGE_SIZE;
u64 logical = sblock->pagev[0]->logical;
- struct btrfs_bio *bbio;
+ struct btrfs_bio *bbio = NULL;
struct bio *bio;
struct btrfs_raid_bio *rbio;
int ret;
@@ -2982,6 +2982,7 @@ again:
extent_len);
mapped_length = extent_len;
+ bbio = NULL;
ret = btrfs_map_block(fs_info, READ, extent_logical,
&mapped_length, &bbio, 0);
if (!ret) {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6a8c86074aa4..b71dd298385c 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -1831,7 +1831,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
/*
* If we have a parent root we need to verify that the parent dir was
- * not delted and then re-created, if it was then we have no overwrite
+ * not deleted and then re-created, if it was then we have no overwrite
* and we can just unlink this entry.
*/
if (sctx->parent_root) {
@@ -4192,9 +4192,9 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
return -ENOMEM;
/*
- * This hack is needed because empty acl's are stored as zero byte
+ * This hack is needed because empty acls are stored as zero byte
* data in xattrs. Problem with that is, that receiving these zero byte
- * acl's will fail later. To fix this, we send a dummy acl list that
+ * acls will fail later. To fix this, we send a dummy acl list that
* only contains the version number and no entries.
*/
if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index e05619f241be..875c757e73e2 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -36,7 +36,7 @@ static inline void put_unaligned_le8(u8 val, void *p)
*
* The end result is that anyone who #includes ctree.h gets a
* declaration for the btrfs_set_foo functions and btrfs_foo functions,
- * which are wappers of btrfs_set_token_#bits functions and
+ * which are wrappers of btrfs_set_token_#bits functions and
* btrfs_get_token_#bits functions, which are defined in this file.
*
* These setget functions do all the extent_buffer related mapping
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index bf71071ab6f6..4e59a91a11e0 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -112,7 +112,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
* Note that a running device replace operation is not
* canceled here although there is no way to update
* the progress. It would add the risk of a deadlock,
- * therefore the canceling is ommited. The only penalty
+ * therefore the canceling is omitted. The only penalty
* is that some I/O remains active until the procedure
* completes. The next time when the filesystem is
* mounted writeable again, the device replace
@@ -1877,7 +1877,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
int ret;
/*
- * We aren't under the device list lock, so this is racey-ish, but good
+ * We aren't under the device list lock, so this is racy-ish, but good
* enough for our purposes.
*/
nr_devices = fs_info->fs_devices->open_devices;
@@ -1896,7 +1896,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
if (!devices_info)
return -ENOMEM;
- /* calc min stripe number for data space alloction */
+ /* calc min stripe number for data space allocation */
type = btrfs_get_alloc_profile(root, 1);
if (type & BTRFS_BLOCK_GROUP_RAID0) {
min_stripes = 2;
@@ -1932,7 +1932,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
avail_space *= BTRFS_STRIPE_LEN;
/*
- * In order to avoid overwritting the superblock on the drive,
+ * In order to avoid overwriting the superblock on the drive,
* btrfs starts at an offset of at least 1MB when doing chunk
* allocation.
*/
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c
index 70948b13bc81..55724607f79b 100644
--- a/fs/btrfs/tests/extent-io-tests.c
+++ b/fs/btrfs/tests/extent-io-tests.c
@@ -113,7 +113,7 @@ static int test_find_delalloc(void)
* |--- delalloc ---|
* |--- search ---|
*/
- set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_KERNEL);
+ set_extent_delalloc(&tmp, 0, 4095, NULL);
start = 0;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -144,7 +144,7 @@ static int test_find_delalloc(void)
test_msg("Couldn't find the locked page\n");
goto out_bits;
}
- set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_KERNEL);
+ set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL);
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -176,7 +176,7 @@ static int test_find_delalloc(void)
locked_page = find_lock_page(inode->i_mapping, test_start >>
PAGE_SHIFT);
if (!locked_page) {
- test_msg("Could'nt find the locked page\n");
+ test_msg("Couldn't find the locked page\n");
goto out_bits;
}
start = test_start;
@@ -199,7 +199,7 @@ static int test_find_delalloc(void)
*
* We are re-using our test_start from above since it works out well.
*/
- set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_KERNEL);
+ set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -262,7 +262,7 @@ static int test_find_delalloc(void)
}
ret = 0;
out_bits:
- clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
+ clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1);
out:
if (locked_page)
put_page(locked_page);
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 514247515312..0eeb8f3d6b67 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -25,7 +25,7 @@
#define BITS_PER_BITMAP (PAGE_SIZE * 8)
/*
- * This test just does basic sanity checking, making sure we can add an exten
+ * This test just does basic sanity checking, making sure we can add an extent
* entry and remove space from either end and the middle, and make sure we can
* remove space that covers adjacent extent entries.
*/
@@ -396,8 +396,9 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache)
* wasn't optimal as they could be spread all over the block group while under
* concurrency (extra overhead and fragmentation).
*
- * This stealing approach is benefical, since we always prefer to allocate from
- * extent entries, both for clustered and non-clustered allocation requests.
+ * This stealing approach is beneficial, since we always prefer to allocate
+ * from extent entries, both for clustered and non-clustered allocation
+ * requests.
*/
static int
test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache)
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 863a6a3af1f8..8a25fe8b7c45 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -264,7 +264,7 @@ static noinline int test_btrfs_get_extent(void)
/*
* We will just free a dummy node if it's ref count is 2 so we need an
- * extra ref so our searches don't accidently release our page.
+ * extra ref so our searches don't accidentally release our page.
*/
extent_buffer_get(root->node);
btrfs_set_header_nritems(root->node, 0);
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 8ea5d34bc5a2..8aa4ded31326 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -234,7 +234,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
}
/*
- * Since the test trans doesn't havee the complicated delayed refs,
+ * Since the test trans doesn't have the complicated delayed refs,
* we can only call btrfs_qgroup_account_extent() directly to test
* quota.
*/
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5b0b758a3f79..f6e24cb423ae 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -944,7 +944,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
err = convert_extent_bit(dirty_pages, start, end,
EXTENT_NEED_WAIT,
- mark, &cached_state, GFP_NOFS);
+ mark, &cached_state);
/*
* convert_extent_bit can return -ENOMEM, which is most of the
* time a temporary error. So when it happens, ignore the error
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 72be51f7ca2f..9fe0ec2bf0fe 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -144,7 +144,7 @@ struct btrfs_pending_snapshot {
/* block reservation for the operation */
struct btrfs_block_rsv block_rsv;
u64 qgroup_reserved;
- /* extra metadata reseration for relocation */
+ /* extra metadata reservation for relocation */
int error;
bool readonly;
struct list_head list;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 8aaca5c6af94..b7665af471d8 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2330,7 +2330,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
break;
/* for regular files, make sure corresponding
- * orhpan item exist. extents past the new EOF
+ * orphan item exist. extents past the new EOF
* will be truncated later by orphan cleanup.
*/
if (S_ISREG(mode)) {
@@ -3001,7 +3001,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
break;
clear_extent_bits(&log->dirty_log_pages, start, end,
- EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
+ EXTENT_DIRTY | EXTENT_NEW);
}
/*
@@ -4914,7 +4914,7 @@ out_unlock:
* the actual unlink operation, so if we do this check before a concurrent task
* sets last_unlink_trans it means we've logged a consistent version/state of
* all the inode items, otherwise we are not sure and must do a transaction
- * commit (the concurrent task migth have only updated last_unlink_trans before
+ * commit (the concurrent task might have only updated last_unlink_trans before
* we logged the inode or it might have also done the unlink).
*/
static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
@@ -4973,7 +4973,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
while (1) {
/*
* If we are logging a directory then we start with our inode,
- * not our parents inode, so we need to skipp setting the
+ * not our parent's inode, so we need to skip setting the
* logged_trans so that further down in the log code we don't
* think this inode has already been logged.
*/
@@ -5357,7 +5357,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
log_dentries = true;
/*
- * On unlink we must make sure all our current and old parent directores
+ * On unlink we must make sure all our current and old parent directory
* inodes are fully logged. This is to prevent leaving dangling
* directory index entries in directories that were our parents but are
* not anymore. Not doing this results in old parent directory being
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 91feb2bdefee..b1434bb57e36 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -28,7 +28,7 @@
* }
* ulist_free(ulist);
*
- * This assumes the graph nodes are adressable by u64. This stems from the
+ * This assumes the graph nodes are addressable by u64. This stems from the
* usage for tree enumeration in btrfs, where the logical addresses are
* 64 bit.
*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2b88127bba5b..bdc62561ede8 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2190,7 +2190,7 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
}
/*
- * strore the expected generation for seed devices in device items.
+ * Store the expected generation for seed devices in device items.
*/
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
@@ -3387,7 +3387,7 @@ static int should_balance_chunk(struct btrfs_root *root,
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
/*
* Same logic as the 'limit' filter; the minimum cannot be
- * determined here because we do not have the global informatoin
+ * determined here because we do not have the global information
* about the count of all chunks that satisfy the filters.
*/
if (bargs->limit_max == 0)
@@ -6076,7 +6076,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{
atomic_inc(&bbio->error);
if (atomic_dec_and_test(&bbio->stripes_pending)) {
- /* Shoud be the original bio. */
+ /* Should be the original bio. */
WARN_ON(bio != bbio->orig_bio);
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -6560,7 +6560,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
set_extent_buffer_uptodate(sb);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
/*
- * The sb extent buffer is artifical and just used to read the system array.
+ * The sb extent buffer is artificial and just used to read the system array.
* set_extent_buffer_uptodate() call does not properly mark all it's
* pages up-to-date when the page is larger: extent does not cover the
* whole page and consequently check_page_uptodate does not find all
@@ -6630,13 +6630,13 @@ int btrfs_read_sys_array(struct btrfs_root *root)
sb_array_offset += len;
cur_offset += len;
}
- free_extent_buffer(sb);
+ free_extent_buffer_stale(sb);
return ret;
out_short_read:
printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
len, cur_offset);
- free_extent_buffer(sb);
+ free_extent_buffer_stale(sb);
return -EIO;
}
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 3bfb252206c7..d1a177a3dbe8 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -380,23 +380,21 @@ static int btrfs_xattr_handler_get(const struct xattr_handler *handler,
}
static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *buffer, size_t size,
- int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *buffer,
+ size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
-
name = xattr_full_name(handler, name);
return __btrfs_setxattr(NULL, inode, name, buffer, size, flags);
}
static int btrfs_xattr_handler_set_prop(const struct xattr_handler *handler,
- struct dentry *dentry,
+ struct dentry *unused, struct inode *inode,
const char *name, const void *value,
size_t size, int flags)
{
name = xattr_full_name(handler, name);
- return btrfs_set_prop(d_inode(dentry), name, value, size, flags);
+ return btrfs_set_prop(inode, name, value, size, flags);
}
static const struct xattr_handler btrfs_security_xattr_handler = {
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 43098cd9602b..eeb71e5de27a 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -257,12 +257,12 @@ static int ceph_readpage(struct file *filp, struct page *page)
/*
* Finish an async read(ahead) op.
*/
-static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
+static void finish_read(struct ceph_osd_request *req)
{
struct inode *inode = req->r_inode;
struct ceph_osd_data *osd_data;
- int rc = req->r_result;
- int bytes = le32_to_cpu(msg->hdr.data_len);
+ int rc = req->r_result <= 0 ? req->r_result : 0;
+ int bytes = req->r_result >= 0 ? req->r_result : 0;
int num_pages;
int i;
@@ -376,8 +376,6 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
req->r_callback = finish_read;
req->r_inode = inode;
- ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
-
dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
ret = ceph_osdc_start_request(osdc, req, false);
if (ret < 0)
@@ -546,11 +544,21 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
truncate_seq, truncate_size,
&inode->i_mtime, &page, 1);
if (err < 0) {
- dout("writepage setting page/mapping error %d %p\n", err, page);
+ struct writeback_control tmp_wbc;
+ if (!wbc)
+ wbc = &tmp_wbc;
+ if (err == -ERESTARTSYS) {
+ /* killed by SIGKILL */
+ dout("writepage interrupted page %p\n", page);
+ redirty_page_for_writepage(wbc, page);
+ end_page_writeback(page);
+ goto out;
+ }
+ dout("writepage setting page/mapping error %d %p\n",
+ err, page);
SetPageError(page);
mapping_set_error(&inode->i_data, err);
- if (wbc)
- wbc->pages_skipped++;
+ wbc->pages_skipped++;
} else {
dout("writepage cleaned page %p\n", page);
err = 0; /* vfs expects us to return 0 */
@@ -571,12 +579,16 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
BUG_ON(!inode);
ihold(inode);
err = writepage_nounlock(page, wbc);
+ if (err == -ERESTARTSYS) {
+ /* direct memory reclaimer was killed by SIGKILL. return 0
+ * to prevent caller from setting mapping/page error */
+ err = 0;
+ }
unlock_page(page);
iput(inode);
return err;
}
-
/*
* lame release_pages helper. release_pages() isn't exported to
* modules.
@@ -600,8 +612,7 @@ static void ceph_release_pages(struct page **pages, int num)
* If we get an error, set the mapping error bit, but not the individual
* page error bits.
*/
-static void writepages_finish(struct ceph_osd_request *req,
- struct ceph_msg *msg)
+static void writepages_finish(struct ceph_osd_request *req)
{
struct inode *inode = req->r_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -615,7 +626,6 @@ static void writepages_finish(struct ceph_osd_request *req,
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
bool remove_page;
-
dout("writepages_finish %p rc %d\n", inode, rc);
if (rc < 0)
mapping_set_error(mapping, rc);
@@ -650,6 +660,9 @@ static void writepages_finish(struct ceph_osd_request *req,
clear_bdi_congested(&fsc->backing_dev_info,
BLK_RW_ASYNC);
+ if (rc < 0)
+ SetPageError(page);
+
ceph_put_snap_context(page_snap_context(page));
page->private = 0;
ClearPagePrivate(page);
@@ -718,8 +731,11 @@ static int ceph_writepages_start(struct address_space *mapping,
(wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
- pr_warn("writepage_start %p on forced umount\n", inode);
- truncate_pagecache(inode, 0);
+ if (ci->i_wrbuffer_ref > 0) {
+ pr_warn_ratelimited(
+ "writepage_start %p %lld forced umount\n",
+ inode, ceph_ino(inode));
+ }
mapping_set_error(mapping, -EIO);
return -EIO; /* we're in a forced umount, don't write! */
}
@@ -1063,10 +1079,7 @@ new_request:
pages = NULL;
}
- vino = ceph_vino(inode);
- ceph_osdc_build_request(req, offset, snapc, vino.snap,
- &inode->i_mtime);
-
+ req->r_mtime = inode->i_mtime;
rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
BUG_ON(rc);
req = NULL;
@@ -1099,8 +1112,7 @@ release_pvec_pages:
mapping->writeback_index = index;
out:
- if (req)
- ceph_osdc_put_request(req);
+ ceph_osdc_put_request(req);
ceph_put_snap_context(snapc);
dout("writepages done, rc = %d\n", rc);
return rc;
@@ -1134,6 +1146,7 @@ static int ceph_update_writeable_page(struct file *file,
struct page *page)
{
struct inode *inode = file_inode(file);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
loff_t page_off = pos & PAGE_MASK;
int pos_in_page = pos & ~PAGE_MASK;
@@ -1142,6 +1155,12 @@ static int ceph_update_writeable_page(struct file *file,
int r;
struct ceph_snap_context *snapc, *oldest;
+ if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+ dout(" page %p forced umount\n", page);
+ unlock_page(page);
+ return -EIO;
+ }
+
retry_locked:
/* writepages currently holds page lock, but if we change that later, */
wait_on_page_writeback(page);
@@ -1165,7 +1184,7 @@ retry_locked:
snapc = ceph_get_snap_context(snapc);
unlock_page(page);
ceph_queue_writeback(inode);
- r = wait_event_interruptible(ci->i_cap_wq,
+ r = wait_event_killable(ci->i_cap_wq,
context_is_writeable_or_written(inode, snapc));
ceph_put_snap_context(snapc);
if (r == -ERESTARTSYS)
@@ -1311,6 +1330,17 @@ const struct address_space_operations ceph_aops = {
.direct_IO = ceph_direct_io,
};
+static void ceph_block_sigs(sigset_t *oldset)
+{
+ sigset_t mask;
+ siginitsetinv(&mask, sigmask(SIGKILL));
+ sigprocmask(SIG_BLOCK, &mask, oldset);
+}
+
+static void ceph_restore_sigs(sigset_t *oldset)
+{
+ sigprocmask(SIG_SETMASK, oldset, NULL);
+}
/*
* vm ops
@@ -1323,6 +1353,9 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *pinned_page = NULL;
loff_t off = vmf->pgoff << PAGE_SHIFT;
int want, got, ret;
+ sigset_t oldset;
+
+ ceph_block_sigs(&oldset);
dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n",
inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE);
@@ -1330,17 +1363,12 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
want = CEPH_CAP_FILE_CACHE;
- while (1) {
- got = 0;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want,
- -1, &got, &pinned_page);
- if (ret == 0)
- break;
- if (ret != -ERESTARTSYS) {
- WARN_ON(1);
- return VM_FAULT_SIGBUS;
- }
- }
+
+ got = 0;
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
+ if (ret < 0)
+ goto out_restore;
+
dout("filemap_fault %p %llu~%zd got cap refs on %s\n",
inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got));
@@ -1357,7 +1385,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ceph_put_cap_refs(ci, got);
if (ret != -EAGAIN)
- return ret;
+ goto out_restore;
/* read inline data */
if (off >= PAGE_SIZE) {
@@ -1371,15 +1399,18 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
~__GFP_FS));
if (!page) {
ret = VM_FAULT_OOM;
- goto out;
+ goto out_inline;
}
ret1 = __ceph_do_getattr(inode, page,
CEPH_STAT_CAP_INLINE_DATA, true);
if (ret1 < 0 || off >= i_size_read(inode)) {
unlock_page(page);
put_page(page);
- ret = VM_FAULT_SIGBUS;
- goto out;
+ if (ret1 < 0)
+ ret = ret1;
+ else
+ ret = VM_FAULT_SIGBUS;
+ goto out_inline;
}
if (ret1 < PAGE_SIZE)
zero_user_segment(page, ret1, PAGE_SIZE);
@@ -1388,10 +1419,15 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
SetPageUptodate(page);
vmf->page = page;
ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
+out_inline:
+ dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
+ inode, off, (size_t)PAGE_SIZE, ret);
}
-out:
- dout("filemap_fault %p %llu~%zd read inline data ret %d\n",
- inode, off, (size_t)PAGE_SIZE, ret);
+out_restore:
+ ceph_restore_sigs(&oldset);
+ if (ret < 0)
+ ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+
return ret;
}
@@ -1409,10 +1445,13 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
loff_t size = i_size_read(inode);
size_t len;
int want, got, ret;
+ sigset_t oldset;
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
- return VM_FAULT_SIGBUS;
+ return VM_FAULT_OOM;
+
+ ceph_block_sigs(&oldset);
if (ci->i_inline_version != CEPH_INLINE_NONE) {
struct page *locked_page = NULL;
@@ -1423,10 +1462,8 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = ceph_uninline_data(vma->vm_file, locked_page);
if (locked_page)
unlock_page(locked_page);
- if (ret < 0) {
- ret = VM_FAULT_SIGBUS;
+ if (ret < 0)
goto out_free;
- }
}
if (off + PAGE_SIZE <= size)
@@ -1440,45 +1477,36 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
else
want = CEPH_CAP_FILE_BUFFER;
- while (1) {
- got = 0;
- ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
- &got, NULL);
- if (ret == 0)
- break;
- if (ret != -ERESTARTSYS) {
- WARN_ON(1);
- ret = VM_FAULT_SIGBUS;
- goto out_free;
- }
- }
+
+ got = 0;
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, off + len,
+ &got, NULL);
+ if (ret < 0)
+ goto out_free;
+
dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
inode, off, len, ceph_cap_string(got));
/* Update time before taking page lock */
file_update_time(vma->vm_file);
- lock_page(page);
+ do {
+ lock_page(page);
- ret = VM_FAULT_NOPAGE;
- if ((off > size) ||
- (page->mapping != inode->i_mapping)) {
- unlock_page(page);
- goto out;
- }
+ if ((off > size) || (page->mapping != inode->i_mapping)) {
+ unlock_page(page);
+ ret = VM_FAULT_NOPAGE;
+ break;
+ }
+
+ ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
+ if (ret >= 0) {
+ /* success. we'll keep the page locked. */
+ set_page_dirty(page);
+ ret = VM_FAULT_LOCKED;
+ }
+ } while (ret == -EAGAIN);
- ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
- if (ret >= 0) {
- /* success. we'll keep the page locked. */
- set_page_dirty(page);
- ret = VM_FAULT_LOCKED;
- } else {
- if (ret == -ENOMEM)
- ret = VM_FAULT_OOM;
- else
- ret = VM_FAULT_SIGBUS;
- }
-out:
if (ret == VM_FAULT_LOCKED ||
ci->i_inline_version != CEPH_INLINE_NONE) {
int dirty;
@@ -1495,8 +1523,10 @@ out:
inode, off, len, ceph_cap_string(got), ret);
ceph_put_cap_refs(ci, got);
out_free:
+ ceph_restore_sigs(&oldset);
ceph_free_cap_flush(prealloc_cf);
-
+ if (ret < 0)
+ ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
return ret;
}
@@ -1614,7 +1644,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
goto out;
}
- ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
+ req->r_mtime = inode->i_mtime;
err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!err)
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -1657,7 +1687,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
goto out_put;
}
- ceph_osdc_build_request(req, 0, NULL, CEPH_NOSNAP, &inode->i_mtime);
+ req->r_mtime = inode->i_mtime;
err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!err)
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -1758,9 +1788,11 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
rd_req->r_flags = CEPH_OSD_FLAG_READ;
osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
rd_req->r_base_oloc.pool = pool;
- snprintf(rd_req->r_base_oid.name, sizeof(rd_req->r_base_oid.name),
- "%llx.00000000", ci->i_vino.ino);
- rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name);
+ ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
+
+ err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
+ if (err)
+ goto out_unlock;
wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
1, false, GFP_NOFS);
@@ -1769,11 +1801,14 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
goto out_unlock;
}
- wr_req->r_flags = CEPH_OSD_FLAG_WRITE |
- CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
+ wr_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ACK;
osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
- wr_req->r_base_oloc.pool = pool;
- wr_req->r_base_oid = rd_req->r_base_oid;
+ ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
+ ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
+
+ err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
+ if (err)
+ goto out_unlock;
/* one page should be large enough for STAT data */
pages = ceph_alloc_page_vector(1, GFP_KERNEL);
@@ -1784,12 +1819,9 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
0, false, true);
- ceph_osdc_build_request(rd_req, 0, NULL, CEPH_NOSNAP,
- &ci->vfs_inode.i_mtime);
err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
- ceph_osdc_build_request(wr_req, 0, NULL, CEPH_NOSNAP,
- &ci->vfs_inode.i_mtime);
+ wr_req->r_mtime = ci->vfs_inode.i_mtime;
err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
if (!err)
@@ -1823,10 +1855,8 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
out_unlock:
up_write(&mdsc->pool_perm_rwsem);
- if (rd_req)
- ceph_osdc_put_request(rd_req);
- if (wr_req)
- ceph_osdc_put_request(wr_req);
+ ceph_osdc_put_request(rd_req);
+ ceph_osdc_put_request(wr_req);
out:
if (!err)
err = have;
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index a351480dbabc..c052b5bf219b 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -236,7 +236,7 @@ static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int
unlock_page(page);
}
-static inline int cache_valid(struct ceph_inode_info *ci)
+static inline bool cache_valid(struct ceph_inode_info *ci)
{
return ((ceph_caps_issued(ci) & CEPH_CAP_FILE_CACHE) &&
(ci->i_fscache_gen == ci->i_rdcache_gen));
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index cfaeef18cbca..c17b5d76d75e 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1656,7 +1656,7 @@ retry_locked:
*/
if ((!is_delayed || mdsc->stopping) &&
!S_ISDIR(inode->i_mode) && /* ignore readdir cache */
- ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
+ !(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */
inode->i_data.nrpages && /* have cached pages */
(revoking & (CEPH_CAP_FILE_CACHE|
CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */
@@ -1698,8 +1698,8 @@ retry_locked:
revoking = cap->implemented & ~cap->issued;
dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
- cap->mds, cap, ceph_cap_string(cap->issued),
- ceph_cap_string(cap_used),
+ cap->mds, cap, ceph_cap_string(cap_used),
+ ceph_cap_string(cap->issued),
ceph_cap_string(cap->implemented),
ceph_cap_string(revoking));
@@ -2317,7 +2317,7 @@ again:
/* make sure file is actually open */
file_wanted = __ceph_caps_file_wanted(ci);
- if ((file_wanted & need) == 0) {
+ if ((file_wanted & need) != need) {
dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
ceph_cap_string(need), ceph_cap_string(file_wanted));
*err = -EBADF;
@@ -2412,12 +2412,26 @@ again:
goto out_unlock;
}
- if (!__ceph_is_any_caps(ci) &&
- ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
- dout("get_cap_refs %p forced umount\n", inode);
- *err = -EIO;
- ret = 1;
- goto out_unlock;
+ if (ci->i_ceph_flags & CEPH_I_CAP_DROPPED) {
+ int mds_wanted;
+ if (ACCESS_ONCE(mdsc->fsc->mount_state) ==
+ CEPH_MOUNT_SHUTDOWN) {
+ dout("get_cap_refs %p forced umount\n", inode);
+ *err = -EIO;
+ ret = 1;
+ goto out_unlock;
+ }
+ mds_wanted = __ceph_caps_mds_wanted(ci);
+ if ((mds_wanted & need) != need) {
+ dout("get_cap_refs %p caps were dropped"
+ " (session killed?)\n", inode);
+ *err = -ESTALE;
+ ret = 1;
+ goto out_unlock;
+ }
+ if ((mds_wanted & file_wanted) ==
+ (file_wanted & (CEPH_CAP_FILE_RD|CEPH_CAP_FILE_WR)))
+ ci->i_ceph_flags &= ~CEPH_I_CAP_DROPPED;
}
dout("get_cap_refs %p have %s needed %s\n", inode,
@@ -2487,7 +2501,7 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
if (err == -EAGAIN)
continue;
if (err < 0)
- return err;
+ ret = err;
} else {
ret = wait_event_interruptible(ci->i_cap_wq,
try_get_cap_refs(ci, need, want, endoff,
@@ -2496,8 +2510,15 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
continue;
if (err < 0)
ret = err;
- if (ret < 0)
- return ret;
+ }
+ if (ret < 0) {
+ if (err == -ESTALE) {
+ /* session was killed, try renew caps */
+ ret = ceph_renew_caps(&ci->vfs_inode);
+ if (ret == 0)
+ continue;
+ }
+ return ret;
}
if (ci->i_inline_version != CEPH_INLINE_NONE &&
@@ -2807,7 +2828,7 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
(newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
- !ci->i_wrbuffer_ref) {
+ !(ci->i_wrbuffer_ref || ci->i_wb_ref)) {
if (try_nonblocking_invalidate(inode)) {
/* there were locked pages.. invalidate later
in a separate thread. */
@@ -3226,6 +3247,8 @@ retry:
if (target < 0) {
__ceph_remove_cap(cap, false);
+ if (!ci->i_auth_cap)
+ ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
goto out_unlock;
}
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 31f831471ed2..39ff678e567f 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -109,7 +109,7 @@ static int mdsc_show(struct seq_file *s, void *p)
path ? path : "");
spin_unlock(&req->r_old_dentry->d_lock);
kfree(path);
- } else if (req->r_path2) {
+ } else if (req->r_path2 && req->r_op != CEPH_MDS_OP_SYMLINK) {
if (req->r_ino2.ino)
seq_printf(s, " #%llx/%s", req->r_ino2.ino,
req->r_path2);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 3ab1192d2029..6e0fedf6713b 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -70,16 +70,42 @@ out_unlock:
}
/*
- * for readdir, we encode the directory frag and offset within that
- * frag into f_pos.
+ * for f_pos for readdir:
+ * - hash order:
+ * (0xff << 52) | ((24 bits hash) << 28) |
+ * (the nth entry has hash collision);
+ * - frag+name order;
+ * ((frag value) << 28) | (the nth entry in frag);
*/
+#define OFFSET_BITS 28
+#define OFFSET_MASK ((1 << OFFSET_BITS) - 1)
+#define HASH_ORDER (0xffull << (OFFSET_BITS + 24))
+loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order)
+{
+ loff_t fpos = ((loff_t)high << 28) | (loff_t)off;
+ if (hash_order)
+ fpos |= HASH_ORDER;
+ return fpos;
+}
+
+static bool is_hash_order(loff_t p)
+{
+ return (p & HASH_ORDER) == HASH_ORDER;
+}
+
static unsigned fpos_frag(loff_t p)
{
- return p >> 32;
+ return p >> OFFSET_BITS;
}
+
+static unsigned fpos_hash(loff_t p)
+{
+ return ceph_frag_value(fpos_frag(p));
+}
+
static unsigned fpos_off(loff_t p)
{
- return p & 0xffffffff;
+ return p & OFFSET_MASK;
}
static int fpos_cmp(loff_t l, loff_t r)
@@ -111,6 +137,50 @@ static int note_last_dentry(struct ceph_file_info *fi, const char *name,
return 0;
}
+
+static struct dentry *
+__dcache_find_get_entry(struct dentry *parent, u64 idx,
+ struct ceph_readdir_cache_control *cache_ctl)
+{
+ struct inode *dir = d_inode(parent);
+ struct dentry *dentry;
+ unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
+ loff_t ptr_pos = idx * sizeof(struct dentry *);
+ pgoff_t ptr_pgoff = ptr_pos >> PAGE_SHIFT;
+
+ if (ptr_pos >= i_size_read(dir))
+ return NULL;
+
+ if (!cache_ctl->page || ptr_pgoff != page_index(cache_ctl->page)) {
+ ceph_readdir_cache_release(cache_ctl);
+ cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
+ if (!cache_ctl->page) {
+ dout(" page %lu not found\n", ptr_pgoff);
+ return ERR_PTR(-EAGAIN);
+ }
+ /* reading/filling the cache are serialized by
+ i_mutex, no need to use page lock */
+ unlock_page(cache_ctl->page);
+ cache_ctl->dentries = kmap(cache_ctl->page);
+ }
+
+ cache_ctl->index = idx & idx_mask;
+
+ rcu_read_lock();
+ spin_lock(&parent->d_lock);
+ /* check i_size again here, because empty directory can be
+ * marked as complete while not holding the i_mutex. */
+ if (ceph_dir_is_complete_ordered(dir) && ptr_pos < i_size_read(dir))
+ dentry = cache_ctl->dentries[cache_ctl->index];
+ else
+ dentry = NULL;
+ spin_unlock(&parent->d_lock);
+ if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
+ dentry = NULL;
+ rcu_read_unlock();
+ return dentry ? : ERR_PTR(-EAGAIN);
+}
+
/*
* When possible, we try to satisfy a readdir by peeking at the
* dcache. We make this work by carefully ordering dentries on
@@ -130,75 +200,68 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
struct inode *dir = d_inode(parent);
struct dentry *dentry, *last = NULL;
struct ceph_dentry_info *di;
- unsigned nsize = PAGE_SIZE / sizeof(struct dentry *);
- int err = 0;
- loff_t ptr_pos = 0;
struct ceph_readdir_cache_control cache_ctl = {};
+ u64 idx = 0;
+ int err = 0;
- dout("__dcache_readdir %p v%u at %llu\n", dir, shared_gen, ctx->pos);
+ dout("__dcache_readdir %p v%u at %llx\n", dir, shared_gen, ctx->pos);
+
+ /* search start position */
+ if (ctx->pos > 2) {
+ u64 count = div_u64(i_size_read(dir), sizeof(struct dentry *));
+ while (count > 0) {
+ u64 step = count >> 1;
+ dentry = __dcache_find_get_entry(parent, idx + step,
+ &cache_ctl);
+ if (!dentry) {
+ /* use linar search */
+ idx = 0;
+ break;
+ }
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+ }
+ di = ceph_dentry(dentry);
+ spin_lock(&dentry->d_lock);
+ if (fpos_cmp(di->offset, ctx->pos) < 0) {
+ idx += step + 1;
+ count -= step + 1;
+ } else {
+ count = step;
+ }
+ spin_unlock(&dentry->d_lock);
+ dput(dentry);
+ }
- /* we can calculate cache index for the first dirfrag */
- if (ceph_frag_is_leftmost(fpos_frag(ctx->pos))) {
- cache_ctl.index = fpos_off(ctx->pos) - 2;
- BUG_ON(cache_ctl.index < 0);
- ptr_pos = cache_ctl.index * sizeof(struct dentry *);
+ dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
}
- while (true) {
- pgoff_t pgoff;
- bool emit_dentry;
- if (ptr_pos >= i_size_read(dir)) {
+ for (;;) {
+ bool emit_dentry = false;
+ dentry = __dcache_find_get_entry(parent, idx++, &cache_ctl);
+ if (!dentry) {
fi->flags |= CEPH_F_ATEND;
err = 0;
break;
}
-
- err = -EAGAIN;
- pgoff = ptr_pos >> PAGE_SHIFT;
- if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) {
- ceph_readdir_cache_release(&cache_ctl);
- cache_ctl.page = find_lock_page(&dir->i_data, pgoff);
- if (!cache_ctl.page) {
- dout(" page %lu not found\n", pgoff);
- break;
- }
- /* reading/filling the cache are serialized by
- * i_mutex, no need to use page lock */
- unlock_page(cache_ctl.page);
- cache_ctl.dentries = kmap(cache_ctl.page);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
}
- rcu_read_lock();
- spin_lock(&parent->d_lock);
- /* check i_size again here, because empty directory can be
- * marked as complete while not holding the i_mutex. */
- if (ceph_dir_is_complete_ordered(dir) &&
- ptr_pos < i_size_read(dir))
- dentry = cache_ctl.dentries[cache_ctl.index % nsize];
- else
- dentry = NULL;
- spin_unlock(&parent->d_lock);
- if (dentry && !lockref_get_not_dead(&dentry->d_lockref))
- dentry = NULL;
- rcu_read_unlock();
- if (!dentry)
- break;
-
- emit_dentry = false;
di = ceph_dentry(dentry);
spin_lock(&dentry->d_lock);
if (di->lease_shared_gen == shared_gen &&
d_really_is_positive(dentry) &&
- ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR &&
- ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH &&
fpos_cmp(ctx->pos, di->offset) <= 0) {
emit_dentry = true;
}
spin_unlock(&dentry->d_lock);
if (emit_dentry) {
- dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos,
+ dout(" %llx dentry %p %pd %p\n", di->offset,
dentry, dentry, d_inode(dentry));
ctx->pos = di->offset;
if (!dir_emit(ctx, dentry->d_name.name,
@@ -218,10 +281,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
} else {
dput(dentry);
}
-
- cache_ctl.index++;
- ptr_pos += sizeof(struct dentry *);
}
+out:
ceph_readdir_cache_release(&cache_ctl);
if (last) {
int ret;
@@ -235,6 +296,16 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
return err;
}
+static bool need_send_readdir(struct ceph_file_info *fi, loff_t pos)
+{
+ if (!fi->last_readdir)
+ return true;
+ if (is_hash_order(pos))
+ return !ceph_frag_contains_value(fi->frag, fpos_hash(pos));
+ else
+ return fi->frag != fpos_frag(pos);
+}
+
static int ceph_readdir(struct file *file, struct dir_context *ctx)
{
struct ceph_file_info *fi = file->private_data;
@@ -242,13 +313,12 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
- unsigned frag = fpos_frag(ctx->pos);
- int off = fpos_off(ctx->pos);
+ int i;
int err;
u32 ftype;
struct ceph_mds_reply_info_parsed *rinfo;
- dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
+ dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
if (fi->flags & CEPH_F_ATEND)
return 0;
@@ -260,7 +330,6 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
inode->i_mode >> 12))
return 0;
ctx->pos = 1;
- off = 1;
}
if (ctx->pos == 1) {
ino_t ino = parent_ino(file->f_path.dentry);
@@ -270,7 +339,6 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
inode->i_mode >> 12))
return 0;
ctx->pos = 2;
- off = 2;
}
/* can we use the dcache? */
@@ -285,8 +353,6 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
err = __dcache_readdir(file, ctx, shared_gen);
if (err != -EAGAIN)
return err;
- frag = fpos_frag(ctx->pos);
- off = fpos_off(ctx->pos);
} else {
spin_unlock(&ci->i_ceph_lock);
}
@@ -294,8 +360,9 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
/* proceed with a normal readdir */
more:
/* do we have the correct frag content buffered? */
- if (fi->frag != frag || fi->last_readdir == NULL) {
+ if (need_send_readdir(fi, ctx->pos)) {
struct ceph_mds_request *req;
+ unsigned frag;
int op = ceph_snap(inode) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
@@ -305,6 +372,13 @@ more:
fi->last_readdir = NULL;
}
+ if (is_hash_order(ctx->pos)) {
+ frag = ceph_choose_frag(ci, fpos_hash(ctx->pos),
+ NULL, NULL);
+ } else {
+ frag = fpos_frag(ctx->pos);
+ }
+
dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
ceph_vinop(inode), frag, fi->last_name);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
@@ -331,6 +405,8 @@ more:
req->r_readdir_cache_idx = fi->readdir_cache_idx;
req->r_readdir_offset = fi->next_offset;
req->r_args.readdir.frag = cpu_to_le32(frag);
+ req->r_args.readdir.flags =
+ cpu_to_le16(CEPH_READDIR_REPLY_BITFLAGS);
req->r_inode = inode;
ihold(inode);
@@ -340,22 +416,26 @@ more:
ceph_mdsc_put_request(req);
return err;
}
- dout("readdir got and parsed readdir result=%d"
- " on frag %x, end=%d, complete=%d\n", err, frag,
+ dout("readdir got and parsed readdir result=%d on "
+ "frag %x, end=%d, complete=%d, hash_order=%d\n",
+ err, frag,
(int)req->r_reply_info.dir_end,
- (int)req->r_reply_info.dir_complete);
-
+ (int)req->r_reply_info.dir_complete,
+ (int)req->r_reply_info.hash_order);
- /* note next offset and last dentry name */
rinfo = &req->r_reply_info;
if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
frag = le32_to_cpu(rinfo->dir_dir->frag);
- off = req->r_readdir_offset;
- fi->next_offset = off;
+ if (!rinfo->hash_order) {
+ fi->next_offset = req->r_readdir_offset;
+ /* adjust ctx->pos to beginning of frag */
+ ctx->pos = ceph_make_fpos(frag,
+ fi->next_offset,
+ false);
+ }
}
fi->frag = frag;
- fi->offset = fi->next_offset;
fi->last_readdir = req;
if (req->r_did_prepopulate) {
@@ -363,7 +443,8 @@ more:
if (fi->readdir_cache_idx < 0) {
/* preclude from marking dir ordered */
fi->dir_ordered_count = 0;
- } else if (ceph_frag_is_leftmost(frag) && off == 2) {
+ } else if (ceph_frag_is_leftmost(frag) &&
+ fi->next_offset == 2) {
/* note dir version at start of readdir so
* we can tell if any dentries get dropped */
fi->dir_release_count = req->r_dir_release_cnt;
@@ -377,65 +458,87 @@ more:
fi->dir_release_count = 0;
}
- if (req->r_reply_info.dir_end) {
- kfree(fi->last_name);
- fi->last_name = NULL;
- if (ceph_frag_is_rightmost(frag))
- fi->next_offset = 2;
- else
- fi->next_offset = 0;
- } else {
- err = note_last_dentry(fi,
- rinfo->dir_dname[rinfo->dir_nr-1],
- rinfo->dir_dname_len[rinfo->dir_nr-1],
- fi->next_offset + rinfo->dir_nr);
+ /* note next offset and last dentry name */
+ if (rinfo->dir_nr > 0) {
+ struct ceph_mds_reply_dir_entry *rde =
+ rinfo->dir_entries + (rinfo->dir_nr-1);
+ unsigned next_offset = req->r_reply_info.dir_end ?
+ 2 : (fpos_off(rde->offset) + 1);
+ err = note_last_dentry(fi, rde->name, rde->name_len,
+ next_offset);
if (err)
return err;
+ } else if (req->r_reply_info.dir_end) {
+ fi->next_offset = 2;
+ /* keep last name */
}
}
rinfo = &fi->last_readdir->r_reply_info;
- dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
- rinfo->dir_nr, off, fi->offset);
-
- ctx->pos = ceph_make_fpos(frag, off);
- while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
- struct ceph_mds_reply_inode *in =
- rinfo->dir_in[off - fi->offset].in;
+ dout("readdir frag %x num %d pos %llx chunk first %llx\n",
+ fi->frag, rinfo->dir_nr, ctx->pos,
+ rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
+
+ i = 0;
+ /* search start position */
+ if (rinfo->dir_nr > 0) {
+ int step, nr = rinfo->dir_nr;
+ while (nr > 0) {
+ step = nr >> 1;
+ if (rinfo->dir_entries[i + step].offset < ctx->pos) {
+ i += step + 1;
+ nr -= step + 1;
+ } else {
+ nr = step;
+ }
+ }
+ }
+ for (; i < rinfo->dir_nr; i++) {
+ struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
struct ceph_vino vino;
ino_t ino;
- dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
- off, off - fi->offset, rinfo->dir_nr, ctx->pos,
- rinfo->dir_dname_len[off - fi->offset],
- rinfo->dir_dname[off - fi->offset], in);
- BUG_ON(!in);
- ftype = le32_to_cpu(in->mode) >> 12;
- vino.ino = le64_to_cpu(in->ino);
- vino.snap = le64_to_cpu(in->snapid);
+ BUG_ON(rde->offset < ctx->pos);
+
+ ctx->pos = rde->offset;
+ dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
+ i, rinfo->dir_nr, ctx->pos,
+ rde->name_len, rde->name, &rde->inode.in);
+
+ BUG_ON(!rde->inode.in);
+ ftype = le32_to_cpu(rde->inode.in->mode) >> 12;
+ vino.ino = le64_to_cpu(rde->inode.in->ino);
+ vino.snap = le64_to_cpu(rde->inode.in->snapid);
ino = ceph_vino_to_ino(vino);
- if (!dir_emit(ctx,
- rinfo->dir_dname[off - fi->offset],
- rinfo->dir_dname_len[off - fi->offset],
- ceph_translate_ino(inode->i_sb, ino), ftype)) {
+
+ if (!dir_emit(ctx, rde->name, rde->name_len,
+ ceph_translate_ino(inode->i_sb, ino), ftype)) {
dout("filldir stopping us...\n");
return 0;
}
- off++;
ctx->pos++;
}
- if (fi->last_name) {
+ if (fi->next_offset > 2) {
ceph_mdsc_put_request(fi->last_readdir);
fi->last_readdir = NULL;
goto more;
}
/* more frags? */
- if (!ceph_frag_is_rightmost(frag)) {
- frag = ceph_frag_next(frag);
- off = 0;
- ctx->pos = ceph_make_fpos(frag, off);
+ if (!ceph_frag_is_rightmost(fi->frag)) {
+ unsigned frag = ceph_frag_next(fi->frag);
+ if (is_hash_order(ctx->pos)) {
+ loff_t new_pos = ceph_make_fpos(ceph_frag_value(frag),
+ fi->next_offset, true);
+ if (new_pos > ctx->pos)
+ ctx->pos = new_pos;
+ /* keep last_name */
+ } else {
+ ctx->pos = ceph_make_fpos(frag, fi->next_offset, false);
+ kfree(fi->last_name);
+ fi->last_name = NULL;
+ }
dout("readdir next frag is %x\n", frag);
goto more;
}
@@ -467,7 +570,7 @@ more:
return 0;
}
-static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
+static void reset_readdir(struct ceph_file_info *fi)
{
if (fi->last_readdir) {
ceph_mdsc_put_request(fi->last_readdir);
@@ -477,18 +580,38 @@ static void reset_readdir(struct ceph_file_info *fi, unsigned frag)
fi->last_name = NULL;
fi->dir_release_count = 0;
fi->readdir_cache_idx = -1;
- if (ceph_frag_is_leftmost(frag))
- fi->next_offset = 2; /* compensate for . and .. */
- else
- fi->next_offset = 0;
+ fi->next_offset = 2; /* compensate for . and .. */
fi->flags &= ~CEPH_F_ATEND;
}
+/*
+ * discard buffered readdir content on seekdir(0), or seek to new frag,
+ * or seek prior to current chunk
+ */
+static bool need_reset_readdir(struct ceph_file_info *fi, loff_t new_pos)
+{
+ struct ceph_mds_reply_info_parsed *rinfo;
+ loff_t chunk_offset;
+ if (new_pos == 0)
+ return true;
+ if (is_hash_order(new_pos)) {
+ /* no need to reset last_name for a forward seek when
+ * dentries are sotred in hash order */
+ } else if (fi->frag |= fpos_frag(new_pos)) {
+ return true;
+ }
+ rinfo = fi->last_readdir ? &fi->last_readdir->r_reply_info : NULL;
+ if (!rinfo || !rinfo->dir_nr)
+ return true;
+ chunk_offset = rinfo->dir_entries[0].offset;
+ return new_pos < chunk_offset ||
+ is_hash_order(new_pos) != is_hash_order(chunk_offset);
+}
+
static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_mapping->host;
- loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset);
loff_t retval;
inode_lock(inode);
@@ -505,25 +628,22 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
}
if (offset >= 0) {
+ if (need_reset_readdir(fi, offset)) {
+ dout("dir_llseek dropping %p content\n", file);
+ reset_readdir(fi);
+ } else if (is_hash_order(offset) && offset > file->f_pos) {
+ /* for hash offset, we don't know if a forward seek
+ * is within same frag */
+ fi->dir_release_count = 0;
+ fi->readdir_cache_idx = -1;
+ }
+
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_version = 0;
fi->flags &= ~CEPH_F_ATEND;
}
retval = offset;
-
- if (offset == 0 ||
- fpos_frag(offset) != fi->frag ||
- fpos_off(offset) < fi->offset) {
- /* discard buffered readdir content on seekdir(0), or
- * seek to new frag, or seek prior to current chunk */
- dout("dir_llseek dropping %p content\n", file);
- reset_readdir(fi, fpos_frag(offset));
- } else if (fpos_cmp(offset, old_offset) > 0) {
- /* reset dir_release_count if we did a forward seek */
- fi->dir_release_count = 0;
- fi->readdir_cache_idx = -1;
- }
}
out:
inode_unlock(inode);
@@ -591,7 +711,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
return dentry;
}
-static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
+static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
{
return ceph_ino(inode) == CEPH_INO_ROOT &&
strncmp(dentry->d_name.name, ".ceph", 5) == 0;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 4f1dc7120916..a888df6f2d71 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -192,6 +192,59 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
}
/*
+ * try renew caps after session gets killed.
+ */
+int ceph_renew_caps(struct inode *inode)
+{
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_request *req;
+ int err, flags, wanted;
+
+ spin_lock(&ci->i_ceph_lock);
+ wanted = __ceph_caps_file_wanted(ci);
+ if (__ceph_is_any_real_caps(ci) &&
+ (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) {
+ int issued = __ceph_caps_issued(ci, NULL);
+ spin_unlock(&ci->i_ceph_lock);
+ dout("renew caps %p want %s issued %s updating mds_wanted\n",
+ inode, ceph_cap_string(wanted), ceph_cap_string(issued));
+ ceph_check_caps(ci, 0, NULL);
+ return 0;
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
+ flags = 0;
+ if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
+ flags = O_RDWR;
+ else if (wanted & CEPH_CAP_FILE_RD)
+ flags = O_RDONLY;
+ else if (wanted & CEPH_CAP_FILE_WR)
+ flags = O_WRONLY;
+#ifdef O_LAZY
+ if (wanted & CEPH_CAP_FILE_LAZYIO)
+ flags |= O_LAZY;
+#endif
+
+ req = prepare_open_request(inode->i_sb, flags, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
+
+ req->r_inode = inode;
+ ihold(inode);
+ req->r_num_caps = 1;
+ req->r_fmode = -1;
+
+ err = ceph_mdsc_do_request(mdsc, NULL, req);
+ ceph_mdsc_put_request(req);
+out:
+ dout("renew caps %p open result=%d\n", inode, err);
+ return err < 0 ? err : 0;
+}
+
+/*
* If we already have the requisite capabilities, we can satisfy
* the open request locally (no need to request new caps from the
* MDS). We do, however, need to inform the MDS (asynchronously)
@@ -616,8 +669,7 @@ static void ceph_aio_complete(struct inode *inode,
kfree(aio_req);
}
-static void ceph_aio_complete_req(struct ceph_osd_request *req,
- struct ceph_msg *msg)
+static void ceph_aio_complete_req(struct ceph_osd_request *req)
{
int rc = req->r_result;
struct inode *inode = req->r_inode;
@@ -714,14 +766,21 @@ static void ceph_aio_retry_work(struct work_struct *work)
req->r_flags = CEPH_OSD_FLAG_ORDERSNAP |
CEPH_OSD_FLAG_ONDISK |
CEPH_OSD_FLAG_WRITE;
- req->r_base_oloc = orig_req->r_base_oloc;
- req->r_base_oid = orig_req->r_base_oid;
+ ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
+ ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
+
+ ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
+ if (ret) {
+ ceph_osdc_put_request(req);
+ req = orig_req;
+ goto out;
+ }
req->r_ops[0] = orig_req->r_ops[0];
osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
- ceph_osdc_build_request(req, req->r_ops[0].extent.offset,
- snapc, CEPH_NOSNAP, &aio_req->mtime);
+ req->r_mtime = aio_req->mtime;
+ req->r_data_offset = req->r_ops[0].extent.offset;
ceph_osdc_put_request(orig_req);
@@ -733,7 +792,7 @@ static void ceph_aio_retry_work(struct work_struct *work)
out:
if (ret < 0) {
req->r_result = ret;
- ceph_aio_complete_req(req, NULL);
+ ceph_aio_complete_req(req);
}
ceph_put_snap_context(snapc);
@@ -764,6 +823,8 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
list_add_tail(&req->r_unsafe_item,
&ci->i_unsafe_writes);
spin_unlock(&ci->i_unsafe_lock);
+
+ complete_all(&req->r_completion);
} else {
spin_lock(&ci->i_unsafe_lock);
list_del_init(&req->r_unsafe_item);
@@ -875,14 +936,12 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
(pos+len) | (PAGE_SIZE - 1));
osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
+ req->r_mtime = mtime;
}
-
osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
false, false);
- ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
-
if (aio_req) {
aio_req->total_len += len;
aio_req->num_reqs++;
@@ -956,7 +1015,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
req, false);
if (ret < 0) {
req->r_result = ret;
- ceph_aio_complete_req(req, NULL);
+ ceph_aio_complete_req(req);
}
}
return -EIOCBQUEUED;
@@ -1067,9 +1126,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
false, true);
- /* BUG_ON(vino.snap != CEPH_NOSNAP); */
- ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
-
+ req->r_mtime = mtime;
ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!ret)
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
@@ -1524,9 +1581,7 @@ static int ceph_zero_partial_object(struct inode *inode,
goto out;
}
- ceph_osdc_build_request(req, offset, NULL, ceph_vino(inode).snap,
- &inode->i_mtime);
-
+ req->r_mtime = inode->i_mtime;
ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!ret) {
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e669cfa9d793..f059b5997072 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -11,6 +11,7 @@
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/random.h>
+#include <linux/sort.h>
#include "super.h"
#include "mds_client.h"
@@ -254,6 +255,9 @@ static int ceph_fill_dirfrag(struct inode *inode,
diri_auth = ci->i_auth_cap->mds;
spin_unlock(&ci->i_ceph_lock);
+ if (mds == -1) /* CDIR_AUTH_PARENT */
+ mds = diri_auth;
+
mutex_lock(&ci->i_fragtree_mutex);
if (ndist == 0 && mds == diri_auth) {
/* no delegation info needed. */
@@ -300,20 +304,38 @@ out:
return err;
}
+static int frag_tree_split_cmp(const void *l, const void *r)
+{
+ struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
+ struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
+ return ceph_frag_compare(ls->frag, rs->frag);
+}
+
+static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
+{
+ if (!frag)
+ return f == ceph_frag_make(0, 0);
+ if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
+ return false;
+ return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
+}
+
static int ceph_fill_fragtree(struct inode *inode,
struct ceph_frag_tree_head *fragtree,
struct ceph_mds_reply_dirfrag *dirinfo)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_inode_frag *frag;
+ struct ceph_inode_frag *frag, *prev_frag = NULL;
struct rb_node *rb_node;
- int i;
- u32 id, nsplits;
+ unsigned i, split_by, nsplits;
+ u32 id;
bool update = false;
mutex_lock(&ci->i_fragtree_mutex);
nsplits = le32_to_cpu(fragtree->nsplits);
- if (nsplits) {
+ if (nsplits != ci->i_fragtree_nsplits) {
+ update = true;
+ } else if (nsplits) {
i = prandom_u32() % nsplits;
id = le32_to_cpu(fragtree->splits[i].frag);
if (!__ceph_find_frag(ci, id))
@@ -332,10 +354,22 @@ static int ceph_fill_fragtree(struct inode *inode,
if (!update)
goto out_unlock;
+ if (nsplits > 1) {
+ sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
+ frag_tree_split_cmp, NULL);
+ }
+
dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
rb_node = rb_first(&ci->i_fragtree);
for (i = 0; i < nsplits; i++) {
id = le32_to_cpu(fragtree->splits[i].frag);
+ split_by = le32_to_cpu(fragtree->splits[i].by);
+ if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
+ pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
+ "frag %x split by %d\n", ceph_vinop(inode),
+ i, nsplits, id, split_by);
+ continue;
+ }
frag = NULL;
while (rb_node) {
frag = rb_entry(rb_node, struct ceph_inode_frag, node);
@@ -347,8 +381,14 @@ static int ceph_fill_fragtree(struct inode *inode,
break;
}
rb_node = rb_next(rb_node);
- rb_erase(&frag->node, &ci->i_fragtree);
- kfree(frag);
+ /* delete stale split/leaf node */
+ if (frag->split_by > 0 ||
+ !is_frag_child(frag->frag, prev_frag)) {
+ rb_erase(&frag->node, &ci->i_fragtree);
+ if (frag->split_by > 0)
+ ci->i_fragtree_nsplits--;
+ kfree(frag);
+ }
frag = NULL;
}
if (!frag) {
@@ -356,14 +396,23 @@ static int ceph_fill_fragtree(struct inode *inode,
if (IS_ERR(frag))
continue;
}
- frag->split_by = le32_to_cpu(fragtree->splits[i].by);
+ if (frag->split_by == 0)
+ ci->i_fragtree_nsplits++;
+ frag->split_by = split_by;
dout(" frag %x split by %d\n", frag->frag, frag->split_by);
+ prev_frag = frag;
}
while (rb_node) {
frag = rb_entry(rb_node, struct ceph_inode_frag, node);
rb_node = rb_next(rb_node);
- rb_erase(&frag->node, &ci->i_fragtree);
- kfree(frag);
+ /* delete stale split/leaf node */
+ if (frag->split_by > 0 ||
+ !is_frag_child(frag->frag, prev_frag)) {
+ rb_erase(&frag->node, &ci->i_fragtree);
+ if (frag->split_by > 0)
+ ci->i_fragtree_nsplits--;
+ kfree(frag);
+ }
}
out_unlock:
mutex_unlock(&ci->i_fragtree_mutex);
@@ -513,6 +562,7 @@ void ceph_destroy_inode(struct inode *inode)
rb_erase(n, &ci->i_fragtree);
kfree(frag);
}
+ ci->i_fragtree_nsplits = 0;
__ceph_destroy_xattrs(ci);
if (ci->i_xattrs.blob)
@@ -533,6 +583,11 @@ int ceph_drop_inode(struct inode *inode)
return 1;
}
+static inline blkcnt_t calc_inode_blocks(u64 size)
+{
+ return (size + (1<<9) - 1) >> 9;
+}
+
/*
* Helpers to fill in size, ctime, mtime, and atime. We have to be
* careful because either the client or MDS may have more up to date
@@ -555,7 +610,7 @@ int ceph_fill_file_size(struct inode *inode, int issued,
size = 0;
}
i_size_write(inode, size);
- inode->i_blocks = (size + (1<<9) - 1) >> 9;
+ inode->i_blocks = calc_inode_blocks(size);
ci->i_reported_size = size;
if (truncate_seq != ci->i_truncate_seq) {
dout("truncate_seq %u -> %u\n",
@@ -814,9 +869,13 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
spin_unlock(&ci->i_ceph_lock);
- err = -EINVAL;
- if (WARN_ON(symlen != i_size_read(inode)))
- goto out;
+ if (symlen != i_size_read(inode)) {
+ pr_err("fill_inode %llx.%llx BAD symlink "
+ "size %lld\n", ceph_vinop(inode),
+ i_size_read(inode));
+ i_size_write(inode, symlen);
+ inode->i_blocks = calc_inode_blocks(symlen);
+ }
err = -ENOMEM;
sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
@@ -1309,12 +1368,13 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
int i, err = 0;
for (i = 0; i < rinfo->dir_nr; i++) {
+ struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
struct ceph_vino vino;
struct inode *in;
int rc;
- vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
- vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
+ vino.ino = le64_to_cpu(rde->inode.in->ino);
+ vino.snap = le64_to_cpu(rde->inode.in->snapid);
in = ceph_get_inode(req->r_dentry->d_sb, vino);
if (IS_ERR(in)) {
@@ -1322,14 +1382,14 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
dout("new_inode badness got %d\n", err);
continue;
}
- rc = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
+ rc = fill_inode(in, NULL, &rde->inode, NULL, session,
req->r_request_started, -1,
&req->r_caps_reservation);
if (rc < 0) {
pr_err("fill_inode badness on %p got %d\n", in, rc);
err = rc;
- continue;
}
+ iput(in);
}
return err;
@@ -1387,6 +1447,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
struct ceph_mds_session *session)
{
struct dentry *parent = req->r_dentry;
+ struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
struct qstr dname;
struct dentry *dn;
@@ -1394,22 +1455,27 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
int err = 0, skipped = 0, ret, i;
struct inode *snapdir = NULL;
struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
- struct ceph_dentry_info *di;
u32 frag = le32_to_cpu(rhead->args.readdir.frag);
+ u32 last_hash = 0;
+ u32 fpos_offset;
struct ceph_readdir_cache_control cache_ctl = {};
if (req->r_aborted)
return readdir_prepopulate_inodes_only(req, session);
+ if (rinfo->hash_order && req->r_path2) {
+ last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
+ req->r_path2, strlen(req->r_path2));
+ last_hash = ceph_frag_value(last_hash);
+ }
+
if (rinfo->dir_dir &&
le32_to_cpu(rinfo->dir_dir->frag) != frag) {
dout("readdir_prepopulate got new frag %x -> %x\n",
frag, le32_to_cpu(rinfo->dir_dir->frag));
frag = le32_to_cpu(rinfo->dir_dir->frag);
- if (ceph_frag_is_leftmost(frag))
+ if (!rinfo->hash_order)
req->r_readdir_offset = 2;
- else
- req->r_readdir_offset = 0;
}
if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
@@ -1427,24 +1493,37 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) {
/* note dir version at start of readdir so we can tell
* if any dentries get dropped */
- struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
req->r_dir_release_cnt = atomic64_read(&ci->i_release_count);
req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count);
req->r_readdir_cache_idx = 0;
}
cache_ctl.index = req->r_readdir_cache_idx;
+ fpos_offset = req->r_readdir_offset;
/* FIXME: release caps/leases if error occurs */
for (i = 0; i < rinfo->dir_nr; i++) {
+ struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
struct ceph_vino vino;
- dname.name = rinfo->dir_dname[i];
- dname.len = rinfo->dir_dname_len[i];
+ dname.name = rde->name;
+ dname.len = rde->name_len;
dname.hash = full_name_hash(dname.name, dname.len);
- vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
- vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
+ vino.ino = le64_to_cpu(rde->inode.in->ino);
+ vino.snap = le64_to_cpu(rde->inode.in->snapid);
+
+ if (rinfo->hash_order) {
+ u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
+ rde->name, rde->name_len);
+ hash = ceph_frag_value(hash);
+ if (hash != last_hash)
+ fpos_offset = 2;
+ last_hash = hash;
+ rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
+ } else {
+ rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
+ }
retry_lookup:
dn = d_lookup(parent, &dname);
@@ -1490,7 +1569,7 @@ retry_lookup:
}
}
- ret = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session,
+ ret = fill_inode(in, NULL, &rde->inode, NULL, session,
req->r_request_started, -1,
&req->r_caps_reservation);
if (ret < 0) {
@@ -1523,11 +1602,9 @@ retry_lookup:
dn = realdn;
}
- di = dn->d_fsdata;
- di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
+ ceph_dentry(dn)->offset = rde->offset;
- update_dentry_lease(dn, rinfo->dir_dlease[i],
- req->r_session,
+ update_dentry_lease(dn, rde->lease, req->r_session,
req->r_request_started);
if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
@@ -1562,7 +1639,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
spin_lock(&ci->i_ceph_lock);
dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
i_size_write(inode, size);
- inode->i_blocks = (size + (1 << 9) - 1) >> 9;
+ inode->i_blocks = calc_inode_blocks(size);
/* tell the MDS if we are approaching max_size */
if ((size << 1) >= ci->i_max_size &&
@@ -1624,10 +1701,21 @@ static void ceph_invalidate_work(struct work_struct *work)
struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
i_pg_inv_work);
struct inode *inode = &ci->vfs_inode;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
u32 orig_gen;
int check = 0;
mutex_lock(&ci->i_truncate_mutex);
+
+ if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
+ pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
+ inode, ceph_ino(inode));
+ mapping_set_error(inode->i_mapping, -EIO);
+ truncate_pagecache(inode, 0);
+ mutex_unlock(&ci->i_truncate_mutex);
+ goto out;
+ }
+
spin_lock(&ci->i_ceph_lock);
dout("invalidate_pages %p gen %d revoking %d\n", inode,
ci->i_rdcache_gen, ci->i_rdcache_revoking);
@@ -1641,7 +1729,9 @@ static void ceph_invalidate_work(struct work_struct *work)
orig_gen = ci->i_rdcache_gen;
spin_unlock(&ci->i_ceph_lock);
- truncate_pagecache(inode, 0);
+ if (invalidate_inode_pages2(inode->i_mapping) < 0) {
+ pr_err("invalidate_pages %p fails\n", inode);
+ }
spin_lock(&ci->i_ceph_lock);
if (orig_gen == ci->i_rdcache_gen &&
@@ -1920,8 +2010,7 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
if ((issued & CEPH_CAP_FILE_EXCL) &&
attr->ia_size > inode->i_size) {
i_size_write(inode, attr->ia_size);
- inode->i_blocks =
- (attr->ia_size + (1 << 9) - 1) >> 9;
+ inode->i_blocks = calc_inode_blocks(attr->ia_size);
inode->i_ctime = attr->ia_ctime;
ci->i_reported_size = attr->ia_size;
dirtied |= CEPH_CAP_FILE_EXCL;
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index f851d8d70158..be6b1657b1af 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -193,12 +193,12 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
if (copy_from_user(&dl, arg, sizeof(dl)))
return -EFAULT;
- down_read(&osdc->map_sem);
+ down_read(&osdc->lock);
r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
&dl.object_no, &dl.object_offset,
&olen);
if (r < 0) {
- up_read(&osdc->map_sem);
+ up_read(&osdc->lock);
return -EIO;
}
dl.file_offset -= dl.object_offset;
@@ -213,15 +213,15 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
ceph_ino(inode), dl.object_no);
oloc.pool = ceph_file_layout_pg_pool(ci->i_layout);
- ceph_oid_set_name(&oid, dl.object_name);
+ ceph_oid_printf(&oid, "%s", dl.object_name);
- r = ceph_oloc_oid_to_pg(osdc->osdmap, &oloc, &oid, &pgid);
+ r = ceph_object_locator_to_pg(osdc->osdmap, &oid, &oloc, &pgid);
if (r < 0) {
- up_read(&osdc->map_sem);
+ up_read(&osdc->lock);
return r;
}
- dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
+ dl.osd = ceph_pg_to_acting_primary(osdc->osdmap, &pgid);
if (dl.osd >= 0) {
struct ceph_entity_addr *a =
ceph_osd_addr(osdc->osdmap, dl.osd);
@@ -230,7 +230,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
} else {
memset(&dl.osd_addr, 0, sizeof(dl.osd_addr));
}
- up_read(&osdc->map_sem);
+ up_read(&osdc->lock);
/* send result back to user */
if (copy_to_user(arg, &dl, sizeof(dl)))
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 85b8517f17a0..2103b823bec0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -181,17 +181,18 @@ static int parse_reply_info_dir(void **p, void *end,
ceph_decode_need(p, end, sizeof(num) + 2, bad);
num = ceph_decode_32(p);
- info->dir_end = ceph_decode_8(p);
- info->dir_complete = ceph_decode_8(p);
+ {
+ u16 flags = ceph_decode_16(p);
+ info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
+ info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
+ info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
+ }
if (num == 0)
goto done;
- BUG_ON(!info->dir_in);
- info->dir_dname = (void *)(info->dir_in + num);
- info->dir_dname_len = (void *)(info->dir_dname + num);
- info->dir_dlease = (void *)(info->dir_dname_len + num);
- if ((unsigned long)(info->dir_dlease + num) >
- (unsigned long)info->dir_in + info->dir_buf_size) {
+ BUG_ON(!info->dir_entries);
+ if ((unsigned long)(info->dir_entries + num) >
+ (unsigned long)info->dir_entries + info->dir_buf_size) {
pr_err("dir contents are larger than expected\n");
WARN_ON(1);
goto bad;
@@ -199,21 +200,23 @@ static int parse_reply_info_dir(void **p, void *end,
info->dir_nr = num;
while (num) {
+ struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
/* dentry */
ceph_decode_need(p, end, sizeof(u32)*2, bad);
- info->dir_dname_len[i] = ceph_decode_32(p);
- ceph_decode_need(p, end, info->dir_dname_len[i], bad);
- info->dir_dname[i] = *p;
- *p += info->dir_dname_len[i];
- dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
- info->dir_dname[i]);
- info->dir_dlease[i] = *p;
+ rde->name_len = ceph_decode_32(p);
+ ceph_decode_need(p, end, rde->name_len, bad);
+ rde->name = *p;
+ *p += rde->name_len;
+ dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
+ rde->lease = *p;
*p += sizeof(struct ceph_mds_reply_lease);
/* inode */
- err = parse_reply_info_in(p, end, &info->dir_in[i], features);
+ err = parse_reply_info_in(p, end, &rde->inode, features);
if (err < 0)
goto out_bad;
+ /* ceph_readdir_prepopulate() will update it */
+ rde->offset = 0;
i++;
num--;
}
@@ -345,9 +348,9 @@ out_bad:
static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
{
- if (!info->dir_in)
+ if (!info->dir_entries)
return;
- free_pages((unsigned long)info->dir_in, get_order(info->dir_buf_size));
+ free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
}
@@ -567,51 +570,23 @@ void ceph_mdsc_release_request(struct kref *kref)
kfree(req);
}
+DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
+
/*
* lookup session, bump ref if found.
*
* called under mdsc->mutex.
*/
-static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
- u64 tid)
+static struct ceph_mds_request *
+lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
{
struct ceph_mds_request *req;
- struct rb_node *n = mdsc->request_tree.rb_node;
-
- while (n) {
- req = rb_entry(n, struct ceph_mds_request, r_node);
- if (tid < req->r_tid)
- n = n->rb_left;
- else if (tid > req->r_tid)
- n = n->rb_right;
- else {
- ceph_mdsc_get_request(req);
- return req;
- }
- }
- return NULL;
-}
-static void __insert_request(struct ceph_mds_client *mdsc,
- struct ceph_mds_request *new)
-{
- struct rb_node **p = &mdsc->request_tree.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_mds_request *req = NULL;
+ req = lookup_request(&mdsc->request_tree, tid);
+ if (req)
+ ceph_mdsc_get_request(req);
- while (*p) {
- parent = *p;
- req = rb_entry(parent, struct ceph_mds_request, r_node);
- if (new->r_tid < req->r_tid)
- p = &(*p)->rb_left;
- else if (new->r_tid > req->r_tid)
- p = &(*p)->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&new->r_node, parent, p);
- rb_insert_color(&new->r_node, &mdsc->request_tree);
+ return req;
}
/*
@@ -630,7 +605,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
req->r_num_caps);
dout("__register_request %p tid %lld\n", req, req->r_tid);
ceph_mdsc_get_request(req);
- __insert_request(mdsc, req);
+ insert_request(&mdsc->request_tree, req);
req->r_uid = current_fsuid();
req->r_gid = current_fsgid();
@@ -663,8 +638,7 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
}
}
- rb_erase(&req->r_node, &mdsc->request_tree);
- RB_CLEAR_NODE(&req->r_node);
+ erase_request(&mdsc->request_tree, req);
if (req->r_unsafe_dir && req->r_got_unsafe) {
struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
@@ -868,12 +842,14 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
int metadata_bytes = 0;
int metadata_key_count = 0;
struct ceph_options *opt = mdsc->fsc->client->options;
+ struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
void *p;
const char* metadata[][2] = {
{"hostname", utsname()->nodename},
{"kernel_version", utsname()->release},
- {"entity_id", opt->name ? opt->name : ""},
+ {"entity_id", opt->name ? : ""},
+ {"root", fsopt->server_path ? : "/"},
{NULL, NULL}
};
@@ -1149,9 +1125,11 @@ out:
static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
void *arg)
{
+ struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
struct ceph_inode_info *ci = ceph_inode(inode);
LIST_HEAD(to_remove);
- int drop = 0;
+ bool drop = false;
+ bool invalidate = false;
dout("removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->vfs_inode);
@@ -1159,8 +1137,13 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
__ceph_remove_cap(cap, false);
if (!ci->i_auth_cap) {
struct ceph_cap_flush *cf;
- struct ceph_mds_client *mdsc =
- ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+
+ ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
+
+ if (ci->i_wrbuffer_ref > 0 &&
+ ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
+ invalidate = true;
while (true) {
struct rb_node *n = rb_first(&ci->i_cap_flush_tree);
@@ -1183,7 +1166,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
inode, ceph_ino(inode));
ci->i_dirty_caps = 0;
list_del_init(&ci->i_dirty_item);
- drop = 1;
+ drop = true;
}
if (!list_empty(&ci->i_flushing_item)) {
pr_warn_ratelimited(
@@ -1193,7 +1176,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
ci->i_flushing_caps = 0;
list_del_init(&ci->i_flushing_item);
mdsc->num_cap_flushing--;
- drop = 1;
+ drop = true;
}
spin_unlock(&mdsc->cap_dirty_lock);
@@ -1210,7 +1193,11 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
list_del(&cf->list);
ceph_free_cap_flush(cf);
}
- while (drop--)
+
+ wake_up_all(&ci->i_cap_wq);
+ if (invalidate)
+ ceph_queue_invalidate(inode);
+ if (drop)
iput(inode);
return 0;
}
@@ -1220,12 +1207,13 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
*/
static void remove_session_caps(struct ceph_mds_session *session)
{
+ struct ceph_fs_client *fsc = session->s_mdsc->fsc;
+ struct super_block *sb = fsc->sb;
dout("remove_session_caps on %p\n", session);
- iterate_session_caps(session, remove_session_caps_cb, NULL);
+ iterate_session_caps(session, remove_session_caps_cb, fsc);
spin_lock(&session->s_cap_lock);
if (session->s_nr_caps > 0) {
- struct super_block *sb = session->s_mdsc->fsc->sb;
struct inode *inode;
struct ceph_cap *cap, *prev = NULL;
struct ceph_vino vino;
@@ -1270,13 +1258,13 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
{
struct ceph_inode_info *ci = ceph_inode(inode);
- wake_up_all(&ci->i_cap_wq);
if (arg) {
spin_lock(&ci->i_ceph_lock);
ci->i_wanted_max_size = 0;
ci->i_requested_max_size = 0;
spin_unlock(&ci->i_ceph_lock);
}
+ wake_up_all(&ci->i_cap_wq);
return 0;
}
@@ -1671,8 +1659,7 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
- size_t size = sizeof(*rinfo->dir_in) + sizeof(*rinfo->dir_dname_len) +
- sizeof(*rinfo->dir_dname) + sizeof(*rinfo->dir_dlease);
+ size_t size = sizeof(struct ceph_mds_reply_dir_entry);
int order, num_entries;
spin_lock(&ci->i_ceph_lock);
@@ -1683,14 +1670,14 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
order = get_order(size * num_entries);
while (order >= 0) {
- rinfo->dir_in = (void*)__get_free_pages(GFP_KERNEL |
- __GFP_NOWARN,
- order);
- if (rinfo->dir_in)
+ rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
+ __GFP_NOWARN,
+ order);
+ if (rinfo->dir_entries)
break;
order--;
}
- if (!rinfo->dir_in)
+ if (!rinfo->dir_entries)
return -ENOMEM;
num_entries = (PAGE_SIZE << order) / size;
@@ -1722,6 +1709,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
INIT_LIST_HEAD(&req->r_unsafe_target_item);
req->r_fmode = -1;
kref_init(&req->r_kref);
+ RB_CLEAR_NODE(&req->r_node);
INIT_LIST_HEAD(&req->r_wait);
init_completion(&req->r_completion);
init_completion(&req->r_safe_completion);
@@ -2414,7 +2402,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
/* get request, session */
tid = le64_to_cpu(msg->hdr.tid);
mutex_lock(&mdsc->mutex);
- req = __lookup_request(mdsc, tid);
+ req = lookup_get_request(mdsc, tid);
if (!req) {
dout("handle_reply on unknown tid %llu\n", tid);
mutex_unlock(&mdsc->mutex);
@@ -2604,7 +2592,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
fwd_seq = ceph_decode_32(&p);
mutex_lock(&mdsc->mutex);
- req = __lookup_request(mdsc, tid);
+ req = lookup_get_request(mdsc, tid);
if (!req) {
dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
goto out; /* dup reply? */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index ee69a537dba5..e7d38aac7109 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -47,6 +47,14 @@ struct ceph_mds_reply_info_in {
u32 pool_ns_len;
};
+struct ceph_mds_reply_dir_entry {
+ char *name;
+ u32 name_len;
+ struct ceph_mds_reply_lease *lease;
+ struct ceph_mds_reply_info_in inode;
+ loff_t offset;
+};
+
/*
* parsed info about an mds reply, including information about
* either: 1) the target inode and/or its parent directory and dentry,
@@ -73,11 +81,10 @@ struct ceph_mds_reply_info_parsed {
struct ceph_mds_reply_dirfrag *dir_dir;
size_t dir_buf_size;
int dir_nr;
- char **dir_dname;
- u32 *dir_dname_len;
- struct ceph_mds_reply_lease **dir_dlease;
- struct ceph_mds_reply_info_in *dir_in;
- u8 dir_complete, dir_end;
+ bool dir_complete;
+ bool dir_end;
+ bool hash_order;
+ struct ceph_mds_reply_dir_entry *dir_entries;
};
/* for create results */
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 261531e55e9d..8c3591a7fbae 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -54,16 +54,21 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
const void *start = *p;
int i, j, n;
int err = -EINVAL;
- u16 version;
+ u8 mdsmap_v, mdsmap_cv;
m = kzalloc(sizeof(*m), GFP_NOFS);
if (m == NULL)
return ERR_PTR(-ENOMEM);
- ceph_decode_16_safe(p, end, version, bad);
- if (version > 3) {
- pr_warn("got mdsmap version %d > 3, failing", version);
- goto bad;
+ ceph_decode_need(p, end, 1 + 1, bad);
+ mdsmap_v = ceph_decode_8(p);
+ mdsmap_cv = ceph_decode_8(p);
+ if (mdsmap_v >= 4) {
+ u32 mdsmap_len;
+ ceph_decode_32_safe(p, end, mdsmap_len, bad);
+ if (end < *p + mdsmap_len)
+ goto bad;
+ end = *p + mdsmap_len;
}
ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
@@ -87,16 +92,29 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
u32 namelen;
s32 mds, inc, state;
u64 state_seq;
- u8 infoversion;
+ u8 info_v;
+ void *info_end = NULL;
struct ceph_entity_addr addr;
u32 num_export_targets;
void *pexport_targets = NULL;
struct ceph_timespec laggy_since;
struct ceph_mds_info *info;
- ceph_decode_need(p, end, sizeof(u64)*2 + 1 + sizeof(u32), bad);
+ ceph_decode_need(p, end, sizeof(u64) + 1, bad);
global_id = ceph_decode_64(p);
- infoversion = ceph_decode_8(p);
+ info_v= ceph_decode_8(p);
+ if (info_v >= 4) {
+ u32 info_len;
+ u8 info_cv;
+ ceph_decode_need(p, end, 1 + sizeof(u32), bad);
+ info_cv = ceph_decode_8(p);
+ info_len = ceph_decode_32(p);
+ info_end = *p + info_len;
+ if (info_end > end)
+ goto bad;
+ }
+
+ ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
*p += sizeof(u64);
namelen = ceph_decode_32(p); /* skip mds name */
*p += namelen;
@@ -115,7 +133,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
*p += sizeof(u32);
ceph_decode_32_safe(p, end, namelen, bad);
*p += namelen;
- if (infoversion >= 2) {
+ if (info_v >= 2) {
ceph_decode_32_safe(p, end, num_export_targets, bad);
pexport_targets = *p;
*p += num_export_targets * sizeof(u32);
@@ -123,6 +141,12 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
num_export_targets = 0;
}
+ if (info_end && *p != info_end) {
+ if (*p > info_end)
+ goto bad;
+ *p = info_end;
+ }
+
dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
i+1, n, global_id, mds, inc,
ceph_pr_addr(&addr.in_addr),
@@ -163,6 +187,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
m->m_cas_pg_pool = ceph_decode_64(p);
/* ok, we don't care about the rest. */
+ *p = end;
dout("mdsmap_decode success epoch %u\n", m->m_epoch);
return m;
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index f12d5e2955c2..91e02481ce06 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -108,6 +108,7 @@ static int ceph_sync_fs(struct super_block *sb, int wait)
* mount options
*/
enum {
+ Opt_mds_namespace,
Opt_wsize,
Opt_rsize,
Opt_rasize,
@@ -143,6 +144,7 @@ enum {
};
static match_table_t fsopt_tokens = {
+ {Opt_mds_namespace, "mds_namespace=%d"},
{Opt_wsize, "wsize=%d"},
{Opt_rsize, "rsize=%d"},
{Opt_rasize, "rasize=%d"},
@@ -212,6 +214,9 @@ static int parse_fsopt_token(char *c, void *private)
break;
/* misc */
+ case Opt_mds_namespace:
+ fsopt->mds_namespace = intval;
+ break;
case Opt_wsize:
fsopt->wsize = intval;
break;
@@ -297,6 +302,7 @@ static void destroy_mount_options(struct ceph_mount_options *args)
{
dout("destroy_mount_options %p\n", args);
kfree(args->snapdir_name);
+ kfree(args->server_path);
kfree(args);
}
@@ -328,14 +334,17 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
if (ret)
return ret;
+ ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
+ if (ret)
+ return ret;
+
return ceph_compare_options(new_opt, fsc->client);
}
static int parse_mount_options(struct ceph_mount_options **pfsopt,
struct ceph_options **popt,
int flags, char *options,
- const char *dev_name,
- const char **path)
+ const char *dev_name)
{
struct ceph_mount_options *fsopt;
const char *dev_name_end;
@@ -367,6 +376,7 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
fsopt->congestion_kb = default_congestion_kb();
+ fsopt->mds_namespace = CEPH_FS_CLUSTER_ID_NONE;
/*
* Distinguish the server list from the path in "dev_name".
@@ -380,12 +390,13 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
*/
dev_name_end = strchr(dev_name, '/');
if (dev_name_end) {
- /* skip over leading '/' for path */
- *path = dev_name_end + 1;
+ fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
+ if (!fsopt->server_path) {
+ err = -ENOMEM;
+ goto out;
+ }
} else {
- /* path is empty */
dev_name_end = dev_name + strlen(dev_name);
- *path = dev_name_end;
}
err = -EINVAL;
dev_name_end--; /* back up to ':' separator */
@@ -395,7 +406,8 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
goto out;
}
dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
- dout("server path '%s'\n", *path);
+ if (fsopt->server_path)
+ dout("server path '%s'\n", fsopt->server_path);
*popt = ceph_parse_options(options, dev_name, dev_name_end,
parse_fsopt_token, (void *)fsopt);
@@ -457,6 +469,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",noacl");
#endif
+ if (fsopt->mds_namespace != CEPH_FS_CLUSTER_ID_NONE)
+ seq_printf(m, ",mds_namespace=%d", fsopt->mds_namespace);
if (fsopt->wsize)
seq_printf(m, ",wsize=%d", fsopt->wsize);
if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
@@ -511,9 +525,8 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
{
struct ceph_fs_client *fsc;
const u64 supported_features =
- CEPH_FEATURE_FLOCK |
- CEPH_FEATURE_DIRLAYOUTHASH |
- CEPH_FEATURE_MDS_INLINE_DATA;
+ CEPH_FEATURE_FLOCK | CEPH_FEATURE_DIRLAYOUTHASH |
+ CEPH_FEATURE_MDSENC | CEPH_FEATURE_MDS_INLINE_DATA;
const u64 required_features = 0;
int page_count;
size_t size;
@@ -530,6 +543,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
goto fail;
}
fsc->client->extra_mon_dispatch = extra_mon_dispatch;
+ fsc->client->monc.fs_cluster_id = fsopt->mds_namespace;
ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 0, true);
fsc->mount_options = fsopt;
@@ -785,8 +799,7 @@ out:
/*
* mount: join the ceph cluster, and open root directory.
*/
-static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
- const char *path)
+static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
{
int err;
unsigned long started = jiffies; /* note the start time */
@@ -815,11 +828,12 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
goto fail;
}
- if (path[0] == 0) {
+ if (!fsc->mount_options->server_path) {
root = fsc->sb->s_root;
dget(root);
} else {
- dout("mount opening base mountpoint\n");
+ const char *path = fsc->mount_options->server_path + 1;
+ dout("mount opening path %s\n", path);
root = open_root_dentry(fsc, path, started);
if (IS_ERR(root)) {
err = PTR_ERR(root);
@@ -935,7 +949,6 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
struct dentry *res;
int err;
int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
- const char *path = NULL;
struct ceph_mount_options *fsopt = NULL;
struct ceph_options *opt = NULL;
@@ -944,7 +957,7 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
#ifdef CONFIG_CEPH_FS_POSIX_ACL
flags |= MS_POSIXACL;
#endif
- err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
+ err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
if (err < 0) {
res = ERR_PTR(err);
goto out_final;
@@ -987,7 +1000,7 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
}
}
- res = ceph_real_mount(fsc, path);
+ res = ceph_real_mount(fsc);
if (IS_ERR(res))
goto out_splat;
dout("root %p inode %p ino %llx.%llx\n", res,
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 7b99eb756477..0130a8592191 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -62,6 +62,7 @@ struct ceph_mount_options {
int cap_release_safety;
int max_readdir; /* max readdir result (entires) */
int max_readdir_bytes; /* max readdir result (bytes) */
+ int mds_namespace;
/*
* everything above this point can be memcmp'd; everything below
@@ -69,6 +70,7 @@ struct ceph_mount_options {
*/
char *snapdir_name; /* default ".snap" */
+ char *server_path; /* default "/" */
};
struct ceph_fs_client {
@@ -295,6 +297,7 @@ struct ceph_inode_info {
u64 i_files, i_subdirs;
struct rb_root i_fragtree;
+ int i_fragtree_nsplits;
struct mutex i_fragtree_mutex;
struct ceph_inode_xattrs_info i_xattrs;
@@ -469,6 +472,7 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_POOL_RD (1 << 5) /* can read from pool */
#define CEPH_I_POOL_WR (1 << 6) /* can write to pool */
#define CEPH_I_SEC_INITED (1 << 7) /* security initialized */
+#define CEPH_I_CAP_DROPPED (1 << 8) /* caps were forcibly dropped */
static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
long long release_count,
@@ -537,11 +541,6 @@ static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry)
return (struct ceph_dentry_info *)dentry->d_fsdata;
}
-static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
-{
- return ((loff_t)frag << 32) | (loff_t)off;
-}
-
/*
* caps helpers
*/
@@ -632,7 +631,6 @@ struct ceph_file_info {
struct ceph_mds_request *last_readdir;
/* readdir: position within a frag */
- unsigned offset; /* offset of last chunk, adjusted for . and .. */
unsigned next_offset; /* offset of next chunk (last_name's + 1) */
char *last_name; /* last entry in previous chunk */
long long dir_release_count;
@@ -927,6 +925,7 @@ extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
/* file.c */
extern const struct file_operations ceph_file_fops;
+extern int ceph_renew_caps(struct inode *inode);
extern int ceph_open(struct inode *inode, struct file *file);
extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode,
@@ -942,6 +941,7 @@ extern const struct inode_operations ceph_snapdir_iops;
extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
ceph_snapdir_dentry_ops;
+extern loff_t ceph_make_fpos(unsigned high, unsigned off, bool hash_order);
extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
extern int ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *dentry, int err);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 0d66722c6a52..4870b29df224 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -77,7 +77,7 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
char buf[128];
dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
- down_read(&osdc->map_sem);
+ down_read(&osdc->lock);
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
if (pool_name) {
size_t len = strlen(pool_name);
@@ -109,7 +109,7 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
ret = -ERANGE;
}
}
- up_read(&osdc->map_sem);
+ up_read(&osdc->lock);
return ret;
}
@@ -143,13 +143,13 @@ static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
const char *pool_name;
- down_read(&osdc->map_sem);
+ down_read(&osdc->lock);
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
if (pool_name)
ret = snprintf(val, size, "%s", pool_name);
else
ret = snprintf(val, size, "%lld", (unsigned long long)pool);
- up_read(&osdc->map_sem);
+ up_read(&osdc->lock);
return ret;
}
@@ -862,6 +862,7 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
struct ceph_mds_request *req;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_pagelist *pagelist = NULL;
+ int op = CEPH_MDS_OP_SETXATTR;
int err;
if (size > 0) {
@@ -875,20 +876,21 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
if (err)
goto out;
} else if (!value) {
- flags |= CEPH_XATTR_REMOVE;
+ if (flags & CEPH_XATTR_REPLACE)
+ op = CEPH_MDS_OP_RMXATTR;
+ else
+ flags |= CEPH_XATTR_REMOVE;
}
dout("setxattr value=%.*s\n", (int)size, value);
/* do request */
- req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
- USE_AUTH_MDS);
+ req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out;
}
- req->r_args.setxattr.flags = cpu_to_le32(flags);
req->r_path2 = kstrdup(name, GFP_NOFS);
if (!req->r_path2) {
ceph_mdsc_put_request(req);
@@ -896,8 +898,11 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
goto out;
}
- req->r_pagelist = pagelist;
- pagelist = NULL;
+ if (op == CEPH_MDS_OP_SETXATTR) {
+ req->r_args.setxattr.flags = cpu_to_le32(flags);
+ req->r_pagelist = pagelist;
+ pagelist = NULL;
+ }
req->r_inode = inode;
ihold(inode);
@@ -1051,12 +1056,13 @@ static int ceph_get_xattr_handler(const struct xattr_handler *handler,
}
static int ceph_set_xattr_handler(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
if (!ceph_is_valid_xattr(name))
return -EOPNOTSUPP;
- return __ceph_setxattr(d_inode(dentry), name, value, size, flags);
+ return __ceph_setxattr(inode, name, value, size, flags);
}
const struct xattr_handler ceph_other_xattr_handler = {
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index c8b77aa24a1d..5e23f64c0804 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -39,8 +39,9 @@
enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT };
static int cifs_xattr_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
int rc = -EOPNOTSUPP;
unsigned int xid;
@@ -99,12 +100,12 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
if (value &&
pTcon->ses->server->ops->set_acl)
rc = pTcon->ses->server->ops->set_acl(pacl,
- size, d_inode(dentry),
+ size, inode,
full_path, CIFS_ACL_DACL);
else
rc = -EOPNOTSUPP;
if (rc == 0) /* force revalidate of the inode */
- CIFS_I(d_inode(dentry))->time = 0;
+ CIFS_I(inode)->time = 0;
kfree(pacl);
}
#endif /* CONFIG_CIFS_ACL */
diff --git a/fs/dax.c b/fs/dax.c
index 7d9df93b3a14..761495bf5eb9 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -32,14 +32,43 @@
#include <linux/pfn_t.h>
#include <linux/sizes.h>
-#define RADIX_DAX_MASK 0xf
-#define RADIX_DAX_SHIFT 4
-#define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
-#define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
-#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
+/*
+ * We use lowest available bit in exceptional entry for locking, other two
+ * bits to determine entry type. In total 3 special bits.
+ */
+#define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
+#define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
+#define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
+#define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
+#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
- RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
+ RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
+ RADIX_TREE_EXCEPTIONAL_ENTRY))
+
+/* We choose 4096 entries - same as per-zone page wait tables */
+#define DAX_WAIT_TABLE_BITS 12
+#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
+
+wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
+
+static int __init init_dax_wait_table(void)
+{
+ int i;
+
+ for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
+ init_waitqueue_head(wait_table + i);
+ return 0;
+}
+fs_initcall(init_dax_wait_table);
+
+static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
+ pgoff_t index)
+{
+ unsigned long hash = hash_long((unsigned long)mapping ^ index,
+ DAX_WAIT_TABLE_BITS);
+ return wait_table + hash;
+}
static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
{
@@ -87,50 +116,6 @@ struct page *read_dax_sector(struct block_device *bdev, sector_t n)
return page;
}
-/*
- * dax_clear_sectors() is called from within transaction context from XFS,
- * and hence this means the stack from this point must follow GFP_NOFS
- * semantics for all operations.
- */
-int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
-{
- struct blk_dax_ctl dax = {
- .sector = _sector,
- .size = _size,
- };
-
- might_sleep();
- do {
- long count, sz;
-
- count = dax_map_atomic(bdev, &dax);
- if (count < 0)
- return count;
- sz = min_t(long, count, SZ_128K);
- clear_pmem(dax.addr, sz);
- dax.size -= sz;
- dax.sector += sz / 512;
- dax_unmap_atomic(bdev, &dax);
- cond_resched();
- } while (dax.size);
-
- wmb_pmem();
- return 0;
-}
-EXPORT_SYMBOL_GPL(dax_clear_sectors);
-
-/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
-static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
- loff_t pos, loff_t end)
-{
- loff_t final = end - pos + first; /* The final byte of the buffer */
-
- if (first > 0)
- clear_pmem(addr, first);
- if (final < size)
- clear_pmem(addr + final, size - final);
-}
-
static bool buffer_written(struct buffer_head *bh)
{
return buffer_mapped(bh) && !buffer_unwritten(bh);
@@ -169,6 +154,9 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
struct blk_dax_ctl dax = {
.addr = (void __pmem *) ERR_PTR(-EIO),
};
+ unsigned blkbits = inode->i_blkbits;
+ sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
+ >> blkbits;
if (rw == READ)
end = min(end, i_size_read(inode));
@@ -176,7 +164,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
while (pos < end) {
size_t len;
if (pos == max) {
- unsigned blkbits = inode->i_blkbits;
long page = pos >> PAGE_SHIFT;
sector_t block = page << (PAGE_SHIFT - blkbits);
unsigned first = pos - (block << blkbits);
@@ -192,6 +179,13 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
bh->b_size = 1 << blkbits;
bh_max = pos - first + bh->b_size;
bdev = bh->b_bdev;
+ /*
+ * We allow uninitialized buffers for writes
+ * beyond EOF as those cannot race with faults
+ */
+ WARN_ON_ONCE(
+ (buffer_new(bh) && block < file_blks) ||
+ (rw == WRITE && buffer_unwritten(bh)));
} else {
unsigned done = bh->b_size -
(bh_max - (pos - first));
@@ -211,11 +205,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
rc = map_len;
break;
}
- if (buffer_unwritten(bh) || buffer_new(bh)) {
- dax_new_buf(dax.addr, map_len, first,
- pos, end);
- need_wmb = true;
- }
dax.addr += first;
size = map_len - first;
}
@@ -276,15 +265,8 @@ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
memset(&bh, 0, sizeof(bh));
bh.b_bdev = inode->i_sb->s_bdev;
- if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
- struct address_space *mapping = inode->i_mapping;
+ if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
inode_lock(inode);
- retval = filemap_write_and_wait_range(mapping, pos, end - 1);
- if (retval) {
- inode_unlock(inode);
- goto out;
- }
- }
/* Protects against truncate */
if (!(flags & DIO_SKIP_DIO_COUNT))
@@ -305,12 +287,268 @@ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
if (!(flags & DIO_SKIP_DIO_COUNT))
inode_dio_end(inode);
- out:
return retval;
}
EXPORT_SYMBOL_GPL(dax_do_io);
/*
+ * DAX radix tree locking
+ */
+struct exceptional_entry_key {
+ struct address_space *mapping;
+ unsigned long index;
+};
+
+struct wait_exceptional_entry_queue {
+ wait_queue_t wait;
+ struct exceptional_entry_key key;
+};
+
+static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
+ int sync, void *keyp)
+{
+ struct exceptional_entry_key *key = keyp;
+ struct wait_exceptional_entry_queue *ewait =
+ container_of(wait, struct wait_exceptional_entry_queue, wait);
+
+ if (key->mapping != ewait->key.mapping ||
+ key->index != ewait->key.index)
+ return 0;
+ return autoremove_wake_function(wait, mode, sync, NULL);
+}
+
+/*
+ * Check whether the given slot is locked. The function must be called with
+ * mapping->tree_lock held
+ */
+static inline int slot_locked(struct address_space *mapping, void **slot)
+{
+ unsigned long entry = (unsigned long)
+ radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+ return entry & RADIX_DAX_ENTRY_LOCK;
+}
+
+/*
+ * Mark the given slot is locked. The function must be called with
+ * mapping->tree_lock held
+ */
+static inline void *lock_slot(struct address_space *mapping, void **slot)
+{
+ unsigned long entry = (unsigned long)
+ radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+
+ entry |= RADIX_DAX_ENTRY_LOCK;
+ radix_tree_replace_slot(slot, (void *)entry);
+ return (void *)entry;
+}
+
+/*
+ * Mark the given slot is unlocked. The function must be called with
+ * mapping->tree_lock held
+ */
+static inline void *unlock_slot(struct address_space *mapping, void **slot)
+{
+ unsigned long entry = (unsigned long)
+ radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
+
+ entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
+ radix_tree_replace_slot(slot, (void *)entry);
+ return (void *)entry;
+}
+
+/*
+ * Lookup entry in radix tree, wait for it to become unlocked if it is
+ * exceptional entry and return it. The caller must call
+ * put_unlocked_mapping_entry() when he decided not to lock the entry or
+ * put_locked_mapping_entry() when he locked the entry and now wants to
+ * unlock it.
+ *
+ * The function must be called with mapping->tree_lock held.
+ */
+static void *get_unlocked_mapping_entry(struct address_space *mapping,
+ pgoff_t index, void ***slotp)
+{
+ void *ret, **slot;
+ struct wait_exceptional_entry_queue ewait;
+ wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+
+ init_wait(&ewait.wait);
+ ewait.wait.func = wake_exceptional_entry_func;
+ ewait.key.mapping = mapping;
+ ewait.key.index = index;
+
+ for (;;) {
+ ret = __radix_tree_lookup(&mapping->page_tree, index, NULL,
+ &slot);
+ if (!ret || !radix_tree_exceptional_entry(ret) ||
+ !slot_locked(mapping, slot)) {
+ if (slotp)
+ *slotp = slot;
+ return ret;
+ }
+ prepare_to_wait_exclusive(wq, &ewait.wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&mapping->tree_lock);
+ schedule();
+ finish_wait(wq, &ewait.wait);
+ spin_lock_irq(&mapping->tree_lock);
+ }
+}
+
+/*
+ * Find radix tree entry at given index. If it points to a page, return with
+ * the page locked. If it points to the exceptional entry, return with the
+ * radix tree entry locked. If the radix tree doesn't contain given index,
+ * create empty exceptional entry for the index and return with it locked.
+ *
+ * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
+ * persistent memory the benefit is doubtful. We can add that later if we can
+ * show it helps.
+ */
+static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index)
+{
+ void *ret, **slot;
+
+restart:
+ spin_lock_irq(&mapping->tree_lock);
+ ret = get_unlocked_mapping_entry(mapping, index, &slot);
+ /* No entry for given index? Make sure radix tree is big enough. */
+ if (!ret) {
+ int err;
+
+ spin_unlock_irq(&mapping->tree_lock);
+ err = radix_tree_preload(
+ mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
+ if (err)
+ return ERR_PTR(err);
+ ret = (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
+ RADIX_DAX_ENTRY_LOCK);
+ spin_lock_irq(&mapping->tree_lock);
+ err = radix_tree_insert(&mapping->page_tree, index, ret);
+ radix_tree_preload_end();
+ if (err) {
+ spin_unlock_irq(&mapping->tree_lock);
+ /* Someone already created the entry? */
+ if (err == -EEXIST)
+ goto restart;
+ return ERR_PTR(err);
+ }
+ /* Good, we have inserted empty locked entry into the tree. */
+ mapping->nrexceptional++;
+ spin_unlock_irq(&mapping->tree_lock);
+ return ret;
+ }
+ /* Normal page in radix tree? */
+ if (!radix_tree_exceptional_entry(ret)) {
+ struct page *page = ret;
+
+ get_page(page);
+ spin_unlock_irq(&mapping->tree_lock);
+ lock_page(page);
+ /* Page got truncated? Retry... */
+ if (unlikely(page->mapping != mapping)) {
+ unlock_page(page);
+ put_page(page);
+ goto restart;
+ }
+ return page;
+ }
+ ret = lock_slot(mapping, slot);
+ spin_unlock_irq(&mapping->tree_lock);
+ return ret;
+}
+
+void dax_wake_mapping_entry_waiter(struct address_space *mapping,
+ pgoff_t index, bool wake_all)
+{
+ wait_queue_head_t *wq = dax_entry_waitqueue(mapping, index);
+
+ /*
+ * Checking for locked entry and prepare_to_wait_exclusive() happens
+ * under mapping->tree_lock, ditto for entry handling in our callers.
+ * So at this point all tasks that could have seen our entry locked
+ * must be in the waitqueue and the following check will see them.
+ */
+ if (waitqueue_active(wq)) {
+ struct exceptional_entry_key key;
+
+ key.mapping = mapping;
+ key.index = index;
+ __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
+ }
+}
+
+void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index)
+{
+ void *ret, **slot;
+
+ spin_lock_irq(&mapping->tree_lock);
+ ret = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
+ if (WARN_ON_ONCE(!ret || !radix_tree_exceptional_entry(ret) ||
+ !slot_locked(mapping, slot))) {
+ spin_unlock_irq(&mapping->tree_lock);
+ return;
+ }
+ unlock_slot(mapping, slot);
+ spin_unlock_irq(&mapping->tree_lock);
+ dax_wake_mapping_entry_waiter(mapping, index, false);
+}
+
+static void put_locked_mapping_entry(struct address_space *mapping,
+ pgoff_t index, void *entry)
+{
+ if (!radix_tree_exceptional_entry(entry)) {
+ unlock_page(entry);
+ put_page(entry);
+ } else {
+ dax_unlock_mapping_entry(mapping, index);
+ }
+}
+
+/*
+ * Called when we are done with radix tree entry we looked up via
+ * get_unlocked_mapping_entry() and which we didn't lock in the end.
+ */
+static void put_unlocked_mapping_entry(struct address_space *mapping,
+ pgoff_t index, void *entry)
+{
+ if (!radix_tree_exceptional_entry(entry))
+ return;
+
+ /* We have to wake up next waiter for the radix tree entry lock */
+ dax_wake_mapping_entry_waiter(mapping, index, false);
+}
+
+/*
+ * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
+ * entry to get unlocked before deleting it.
+ */
+int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
+{
+ void *entry;
+
+ spin_lock_irq(&mapping->tree_lock);
+ entry = get_unlocked_mapping_entry(mapping, index, NULL);
+ /*
+ * This gets called from truncate / punch_hole path. As such, the caller
+ * must hold locks protecting against concurrent modifications of the
+ * radix tree (usually fs-private i_mmap_sem for writing). Since the
+ * caller has seen exceptional entry for this index, we better find it
+ * at that index as well...
+ */
+ if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
+ spin_unlock_irq(&mapping->tree_lock);
+ return 0;
+ }
+ radix_tree_delete(&mapping->page_tree, index);
+ mapping->nrexceptional--;
+ spin_unlock_irq(&mapping->tree_lock);
+ dax_wake_mapping_entry_waiter(mapping, index, true);
+
+ return 1;
+}
+
+/*
* The user has performed a load from a hole in the file. Allocating
* a new page in the file would cause excessive storage usage for
* workloads with sparse files. We allocate a page cache page instead.
@@ -318,24 +556,24 @@ EXPORT_SYMBOL_GPL(dax_do_io);
* otherwise it will simply fall out of the page cache under memory
* pressure without ever having been dirtied.
*/
-static int dax_load_hole(struct address_space *mapping, struct page *page,
- struct vm_fault *vmf)
+static int dax_load_hole(struct address_space *mapping, void *entry,
+ struct vm_fault *vmf)
{
- unsigned long size;
- struct inode *inode = mapping->host;
- if (!page)
- page = find_or_create_page(mapping, vmf->pgoff,
- GFP_KERNEL | __GFP_ZERO);
- if (!page)
- return VM_FAULT_OOM;
- /* Recheck i_size under page lock to avoid truncate race */
- size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (vmf->pgoff >= size) {
- unlock_page(page);
- put_page(page);
- return VM_FAULT_SIGBUS;
+ struct page *page;
+
+ /* Hole page already exists? Return it... */
+ if (!radix_tree_exceptional_entry(entry)) {
+ vmf->page = entry;
+ return VM_FAULT_LOCKED;
}
+ /* This will replace locked radix tree entry with a hole page */
+ page = find_or_create_page(mapping, vmf->pgoff,
+ vmf->gfp_mask | __GFP_ZERO);
+ if (!page) {
+ put_locked_mapping_entry(mapping, vmf->pgoff, entry);
+ return VM_FAULT_OOM;
+ }
vmf->page = page;
return VM_FAULT_LOCKED;
}
@@ -359,77 +597,72 @@ static int copy_user_bh(struct page *to, struct inode *inode,
return 0;
}
-#define NO_SECTOR -1
#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
-static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
- sector_t sector, bool pmd_entry, bool dirty)
+static void *dax_insert_mapping_entry(struct address_space *mapping,
+ struct vm_fault *vmf,
+ void *entry, sector_t sector)
{
struct radix_tree_root *page_tree = &mapping->page_tree;
- pgoff_t pmd_index = DAX_PMD_INDEX(index);
- int type, error = 0;
- void *entry;
+ int error = 0;
+ bool hole_fill = false;
+ void *new_entry;
+ pgoff_t index = vmf->pgoff;
- WARN_ON_ONCE(pmd_entry && !dirty);
- if (dirty)
+ if (vmf->flags & FAULT_FLAG_WRITE)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- spin_lock_irq(&mapping->tree_lock);
-
- entry = radix_tree_lookup(page_tree, pmd_index);
- if (entry && RADIX_DAX_TYPE(entry) == RADIX_DAX_PMD) {
- index = pmd_index;
- goto dirty;
+ /* Replacing hole page with block mapping? */
+ if (!radix_tree_exceptional_entry(entry)) {
+ hole_fill = true;
+ /*
+ * Unmap the page now before we remove it from page cache below.
+ * The page is locked so it cannot be faulted in again.
+ */
+ unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
+ PAGE_SIZE, 0);
+ error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
+ if (error)
+ return ERR_PTR(error);
}
- entry = radix_tree_lookup(page_tree, index);
- if (entry) {
- type = RADIX_DAX_TYPE(entry);
- if (WARN_ON_ONCE(type != RADIX_DAX_PTE &&
- type != RADIX_DAX_PMD)) {
- error = -EIO;
+ spin_lock_irq(&mapping->tree_lock);
+ new_entry = (void *)((unsigned long)RADIX_DAX_ENTRY(sector, false) |
+ RADIX_DAX_ENTRY_LOCK);
+ if (hole_fill) {
+ __delete_from_page_cache(entry, NULL);
+ /* Drop pagecache reference */
+ put_page(entry);
+ error = radix_tree_insert(page_tree, index, new_entry);
+ if (error) {
+ new_entry = ERR_PTR(error);
goto unlock;
}
+ mapping->nrexceptional++;
+ } else {
+ void **slot;
+ void *ret;
- if (!pmd_entry || type == RADIX_DAX_PMD)
- goto dirty;
-
- /*
- * We only insert dirty PMD entries into the radix tree. This
- * means we don't need to worry about removing a dirty PTE
- * entry and inserting a clean PMD entry, thus reducing the
- * range we would flush with a follow-up fsync/msync call.
- */
- radix_tree_delete(&mapping->page_tree, index);
- mapping->nrexceptional--;
- }
-
- if (sector == NO_SECTOR) {
- /*
- * This can happen during correct operation if our pfn_mkwrite
- * fault raced against a hole punch operation. If this
- * happens the pte that was hole punched will have been
- * unmapped and the radix tree entry will have been removed by
- * the time we are called, but the call will still happen. We
- * will return all the way up to wp_pfn_shared(), where the
- * pte_same() check will fail, eventually causing page fault
- * to be retried by the CPU.
- */
- goto unlock;
+ ret = __radix_tree_lookup(page_tree, index, NULL, &slot);
+ WARN_ON_ONCE(ret != entry);
+ radix_tree_replace_slot(slot, new_entry);
}
-
- error = radix_tree_insert(page_tree, index,
- RADIX_DAX_ENTRY(sector, pmd_entry));
- if (error)
- goto unlock;
-
- mapping->nrexceptional++;
- dirty:
- if (dirty)
+ if (vmf->flags & FAULT_FLAG_WRITE)
radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
unlock:
spin_unlock_irq(&mapping->tree_lock);
- return error;
+ if (hole_fill) {
+ radix_tree_preload_end();
+ /*
+ * We don't need hole page anymore, it has been replaced with
+ * locked radix tree entry now.
+ */
+ if (mapping->a_ops->freepage)
+ mapping->a_ops->freepage(entry);
+ unlock_page(entry);
+ put_page(entry);
+ }
+ return new_entry;
}
static int dax_writeback_one(struct block_device *bdev,
@@ -555,56 +788,29 @@ int dax_writeback_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
-static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
+static int dax_insert_mapping(struct address_space *mapping,
+ struct buffer_head *bh, void **entryp,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
unsigned long vaddr = (unsigned long)vmf->virtual_address;
- struct address_space *mapping = inode->i_mapping;
struct block_device *bdev = bh->b_bdev;
struct blk_dax_ctl dax = {
- .sector = to_sector(bh, inode),
+ .sector = to_sector(bh, mapping->host),
.size = bh->b_size,
};
- pgoff_t size;
- int error;
-
- i_mmap_lock_read(mapping);
-
- /*
- * Check truncate didn't happen while we were allocating a block.
- * If it did, this block may or may not be still allocated to the
- * file. We can't tell the filesystem to free it because we can't
- * take i_mutex here. In the worst case, the file still has blocks
- * allocated past the end of the file.
- */
- size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (unlikely(vmf->pgoff >= size)) {
- error = -EIO;
- goto out;
- }
+ void *ret;
+ void *entry = *entryp;
- if (dax_map_atomic(bdev, &dax) < 0) {
- error = PTR_ERR(dax.addr);
- goto out;
- }
-
- if (buffer_unwritten(bh) || buffer_new(bh)) {
- clear_pmem(dax.addr, PAGE_SIZE);
- wmb_pmem();
- }
+ if (dax_map_atomic(bdev, &dax) < 0)
+ return PTR_ERR(dax.addr);
dax_unmap_atomic(bdev, &dax);
- error = dax_radix_entry(mapping, vmf->pgoff, dax.sector, false,
- vmf->flags & FAULT_FLAG_WRITE);
- if (error)
- goto out;
-
- error = vm_insert_mixed(vma, vaddr, dax.pfn);
+ ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+ *entryp = ret;
- out:
- i_mmap_unlock_read(mapping);
-
- return error;
+ return vm_insert_mixed(vma, vaddr, dax.pfn);
}
/**
@@ -612,24 +818,18 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
* @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault
* @get_block: The filesystem method used to translate file offsets to blocks
- * @complete_unwritten: The filesystem method used to convert unwritten blocks
- * to written so the data written to them is exposed. This is required for
- * required by write faults for filesystems that will return unwritten
- * extent mappings from @get_block, but it is optional for reads as
- * dax_insert_mapping() will always zero unwritten blocks. If the fs does
- * not support unwritten extents, the it should pass NULL.
*
* When a page fault occurs, filesystems may call this helper in their
* fault handler for DAX files. __dax_fault() assumes the caller has done all
* the necessary locking for the page fault to proceed successfully.
*/
int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
- get_block_t get_block, dax_iodone_t complete_unwritten)
+ get_block_t get_block)
{
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
- struct page *page;
+ void *entry;
struct buffer_head bh;
unsigned long vaddr = (unsigned long)vmf->virtual_address;
unsigned blkbits = inode->i_blkbits;
@@ -638,6 +838,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
int error;
int major = 0;
+ /*
+ * Check whether offset isn't beyond end of file now. Caller is supposed
+ * to hold locks serializing us with truncate / punch hole so this is
+ * a reliable test.
+ */
size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (vmf->pgoff >= size)
return VM_FAULT_SIGBUS;
@@ -647,49 +852,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
bh.b_bdev = inode->i_sb->s_bdev;
bh.b_size = PAGE_SIZE;
- repeat:
- page = find_get_page(mapping, vmf->pgoff);
- if (page) {
- if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
- put_page(page);
- return VM_FAULT_RETRY;
- }
- if (unlikely(page->mapping != mapping)) {
- unlock_page(page);
- put_page(page);
- goto repeat;
- }
- size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (unlikely(vmf->pgoff >= size)) {
- /*
- * We have a struct page covering a hole in the file
- * from a read fault and we've raced with a truncate
- */
- error = -EIO;
- goto unlock_page;
- }
+ entry = grab_mapping_entry(mapping, vmf->pgoff);
+ if (IS_ERR(entry)) {
+ error = PTR_ERR(entry);
+ goto out;
}
error = get_block(inode, block, &bh, 0);
if (!error && (bh.b_size < PAGE_SIZE))
error = -EIO; /* fs corruption? */
if (error)
- goto unlock_page;
-
- if (!buffer_mapped(&bh) && !vmf->cow_page) {
- if (vmf->flags & FAULT_FLAG_WRITE) {
- error = get_block(inode, block, &bh, 1);
- count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
- major = VM_FAULT_MAJOR;
- if (!error && (bh.b_size < PAGE_SIZE))
- error = -EIO;
- if (error)
- goto unlock_page;
- } else {
- return dax_load_hole(mapping, page, vmf);
- }
- }
+ goto unlock_entry;
if (vmf->cow_page) {
struct page *new_page = vmf->cow_page;
@@ -698,53 +871,35 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
else
clear_user_highpage(new_page, vaddr);
if (error)
- goto unlock_page;
- vmf->page = page;
- if (!page) {
- i_mmap_lock_read(mapping);
- /* Check we didn't race with truncate */
- size = (i_size_read(inode) + PAGE_SIZE - 1) >>
- PAGE_SHIFT;
- if (vmf->pgoff >= size) {
- i_mmap_unlock_read(mapping);
- error = -EIO;
- goto out;
- }
+ goto unlock_entry;
+ if (!radix_tree_exceptional_entry(entry)) {
+ vmf->page = entry;
+ return VM_FAULT_LOCKED;
}
- return VM_FAULT_LOCKED;
+ vmf->entry = entry;
+ return VM_FAULT_DAX_LOCKED;
}
- /* Check we didn't race with a read fault installing a new page */
- if (!page && major)
- page = find_lock_page(mapping, vmf->pgoff);
-
- if (page) {
- unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
- PAGE_SIZE, 0);
- delete_from_page_cache(page);
- unlock_page(page);
- put_page(page);
- page = NULL;
- }
-
- /*
- * If we successfully insert the new mapping over an unwritten extent,
- * we need to ensure we convert the unwritten extent. If there is an
- * error inserting the mapping, the filesystem needs to leave it as
- * unwritten to prevent exposure of the stale underlying data to
- * userspace, but we still need to call the completion function so
- * the private resources on the mapping buffer can be released. We
- * indicate what the callback should do via the uptodate variable, same
- * as for normal BH based IO completions.
- */
- error = dax_insert_mapping(inode, &bh, vma, vmf);
- if (buffer_unwritten(&bh)) {
- if (complete_unwritten)
- complete_unwritten(&bh, !error);
- else
- WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
+ if (!buffer_mapped(&bh)) {
+ if (vmf->flags & FAULT_FLAG_WRITE) {
+ error = get_block(inode, block, &bh, 1);
+ count_vm_event(PGMAJFAULT);
+ mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+ major = VM_FAULT_MAJOR;
+ if (!error && (bh.b_size < PAGE_SIZE))
+ error = -EIO;
+ if (error)
+ goto unlock_entry;
+ } else {
+ return dax_load_hole(mapping, entry, vmf);
+ }
}
+ /* Filesystem should not return unwritten buffers to us! */
+ WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
+ error = dax_insert_mapping(mapping, &bh, &entry, vma, vmf);
+ unlock_entry:
+ put_locked_mapping_entry(mapping, vmf->pgoff, entry);
out:
if (error == -ENOMEM)
return VM_FAULT_OOM | major;
@@ -752,13 +907,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
if ((error < 0) && (error != -EBUSY))
return VM_FAULT_SIGBUS | major;
return VM_FAULT_NOPAGE | major;
-
- unlock_page:
- if (page) {
- unlock_page(page);
- put_page(page);
- }
- goto out;
}
EXPORT_SYMBOL(__dax_fault);
@@ -772,7 +920,7 @@ EXPORT_SYMBOL(__dax_fault);
* fault handler for DAX files.
*/
int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
- get_block_t get_block, dax_iodone_t complete_unwritten)
+ get_block_t get_block)
{
int result;
struct super_block *sb = file_inode(vma->vm_file)->i_sb;
@@ -781,7 +929,7 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
}
- result = __dax_fault(vma, vmf, get_block, complete_unwritten);
+ result = __dax_fault(vma, vmf, get_block);
if (vmf->flags & FAULT_FLAG_WRITE)
sb_end_pagefault(sb);
@@ -789,7 +937,7 @@ int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
}
EXPORT_SYMBOL_GPL(dax_fault);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
/*
* The 'colour' (ie low bits) within a PMD of a page offset. This comes up
* more often than one might expect in the below function.
@@ -815,8 +963,7 @@ static void __dax_dbg(struct buffer_head *bh, unsigned long address,
#define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmd, unsigned int flags, get_block_t get_block,
- dax_iodone_t complete_unwritten)
+ pmd_t *pmd, unsigned int flags, get_block_t get_block)
{
struct file *file = vma->vm_file;
struct address_space *mapping = file->f_mapping;
@@ -828,7 +975,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
struct block_device *bdev;
pgoff_t size, pgoff;
sector_t block;
- int error, result = 0;
+ int result = 0;
bool alloc = false;
/* dax pmd mappings require pfn_t_devmap() */
@@ -875,6 +1022,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
if (get_block(inode, block, &bh, 1) != 0)
return VM_FAULT_SIGBUS;
alloc = true;
+ WARN_ON_ONCE(buffer_unwritten(&bh) || buffer_new(&bh));
}
bdev = bh.b_bdev;
@@ -900,26 +1048,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
truncate_pagecache_range(inode, lstart, lend);
}
- i_mmap_lock_read(mapping);
-
- /*
- * If a truncate happened while we were allocating blocks, we may
- * leave blocks allocated to the file that are beyond EOF. We can't
- * take i_mutex here, so just leave them hanging; they'll be freed
- * when the file is deleted.
- */
- size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (pgoff >= size) {
- result = VM_FAULT_SIGBUS;
- goto out;
- }
- if ((pgoff | PG_PMD_COLOUR) >= size) {
- dax_pmd_dbg(&bh, address,
- "offset + huge page size > file size");
- goto fallback;
- }
-
- if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
+ if (!write && !buffer_mapped(&bh)) {
spinlock_t *ptl;
pmd_t entry;
struct page *zero_page = get_huge_zero_page();
@@ -954,8 +1083,8 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
long length = dax_map_atomic(bdev, &dax);
if (length < 0) {
- result = VM_FAULT_SIGBUS;
- goto out;
+ dax_pmd_dbg(&bh, address, "dax-error fallback");
+ goto fallback;
}
if (length < PMD_SIZE) {
dax_pmd_dbg(&bh, address, "dax-length too small");
@@ -973,14 +1102,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
dax_pmd_dbg(&bh, address, "pfn not in memmap");
goto fallback;
}
-
- if (buffer_unwritten(&bh) || buffer_new(&bh)) {
- clear_pmem(dax.addr, PMD_SIZE);
- wmb_pmem();
- count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
- result |= VM_FAULT_MAJOR;
- }
dax_unmap_atomic(bdev, &dax);
/*
@@ -999,13 +1120,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
* the write to insert a dirty entry.
*/
if (write) {
- error = dax_radix_entry(mapping, pgoff, dax.sector,
- true, true);
- if (error) {
- dax_pmd_dbg(&bh, address,
- "PMD radix insertion failed");
- goto fallback;
- }
+ /*
+ * We should insert radix-tree entry and dirty it here.
+ * For now this is broken...
+ */
}
dev_dbg(part_to_dev(bdev->bd_part),
@@ -1018,11 +1136,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
}
out:
- i_mmap_unlock_read(mapping);
-
- if (buffer_unwritten(&bh))
- complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
-
return result;
fallback:
@@ -1042,8 +1155,7 @@ EXPORT_SYMBOL_GPL(__dax_pmd_fault);
* pmd_fault handler for DAX files.
*/
int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmd, unsigned int flags, get_block_t get_block,
- dax_iodone_t complete_unwritten)
+ pmd_t *pmd, unsigned int flags, get_block_t get_block)
{
int result;
struct super_block *sb = file_inode(vma->vm_file)->i_sb;
@@ -1052,8 +1164,7 @@ int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
sb_start_pagefault(sb);
file_update_time(vma->vm_file);
}
- result = __dax_pmd_fault(vma, address, pmd, flags, get_block,
- complete_unwritten);
+ result = __dax_pmd_fault(vma, address, pmd, flags, get_block);
if (flags & FAULT_FLAG_WRITE)
sb_end_pagefault(sb);
@@ -1070,27 +1181,59 @@ EXPORT_SYMBOL_GPL(dax_pmd_fault);
int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct file *file = vma->vm_file;
- int error;
-
- /*
- * We pass NO_SECTOR to dax_radix_entry() because we expect that a
- * RADIX_DAX_PTE entry already exists in the radix tree from a
- * previous call to __dax_fault(). We just want to look up that PTE
- * entry using vmf->pgoff and make sure the dirty tag is set. This
- * saves us from having to make a call to get_block() here to look
- * up the sector.
- */
- error = dax_radix_entry(file->f_mapping, vmf->pgoff, NO_SECTOR, false,
- true);
+ struct address_space *mapping = file->f_mapping;
+ void *entry;
+ pgoff_t index = vmf->pgoff;
- if (error == -ENOMEM)
- return VM_FAULT_OOM;
- if (error)
- return VM_FAULT_SIGBUS;
+ spin_lock_irq(&mapping->tree_lock);
+ entry = get_unlocked_mapping_entry(mapping, index, NULL);
+ if (!entry || !radix_tree_exceptional_entry(entry))
+ goto out;
+ radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
+ put_unlocked_mapping_entry(mapping, index, entry);
+out:
+ spin_unlock_irq(&mapping->tree_lock);
return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
+static bool dax_range_is_aligned(struct block_device *bdev,
+ unsigned int offset, unsigned int length)
+{
+ unsigned short sector_size = bdev_logical_block_size(bdev);
+
+ if (!IS_ALIGNED(offset, sector_size))
+ return false;
+ if (!IS_ALIGNED(length, sector_size))
+ return false;
+
+ return true;
+}
+
+int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
+ unsigned int offset, unsigned int length)
+{
+ struct blk_dax_ctl dax = {
+ .sector = sector,
+ .size = PAGE_SIZE,
+ };
+
+ if (dax_range_is_aligned(bdev, offset, length)) {
+ sector_t start_sector = dax.sector + (offset >> 9);
+
+ return blkdev_issue_zeroout(bdev, start_sector,
+ length >> 9, GFP_NOFS, true);
+ } else {
+ if (dax_map_atomic(bdev, &dax) < 0)
+ return PTR_ERR(dax.addr);
+ clear_pmem(dax.addr + offset, length);
+ wmb_pmem();
+ dax_unmap_atomic(bdev, &dax);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__dax_zero_page_range);
+
/**
* dax_zero_page_range - zero a range within a page of a DAX file
* @inode: The file being truncated
@@ -1102,12 +1245,6 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
* page in a DAX file. This is intended for hole-punch operations. If
* you are truncating a file, the helper function dax_truncate_page() may be
* more convenient.
- *
- * We work in terms of PAGE_SIZE here for commonality with
- * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
- * took care of disposing of the unnecessary blocks. Even if the filesystem
- * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
- * since the file might be mmapped.
*/
int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
get_block_t get_block)
@@ -1126,23 +1263,11 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
bh.b_bdev = inode->i_sb->s_bdev;
bh.b_size = PAGE_SIZE;
err = get_block(inode, index, &bh, 0);
- if (err < 0)
+ if (err < 0 || !buffer_written(&bh))
return err;
- if (buffer_written(&bh)) {
- struct block_device *bdev = bh.b_bdev;
- struct blk_dax_ctl dax = {
- .sector = to_sector(&bh, inode),
- .size = PAGE_SIZE,
- };
- if (dax_map_atomic(bdev, &dax) < 0)
- return PTR_ERR(dax.addr);
- clear_pmem(dax.addr + offset, length);
- wmb_pmem();
- dax_unmap_atomic(bdev, &dax);
- }
-
- return 0;
+ return __dax_zero_page_range(bh.b_bdev, to_sector(&bh, inode),
+ offset, length);
}
EXPORT_SYMBOL_GPL(dax_zero_page_range);
@@ -1154,12 +1279,6 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range);
*
* Similar to block_truncate_page(), this function can be called by a
* filesystem when it is truncating a DAX file to handle the partial page.
- *
- * We work in terms of PAGE_SIZE here for commonality with
- * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
- * took care of disposing of the unnecessary blocks. Even if the filesystem
- * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
- * since the file might be mmapped.
*/
int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
{
diff --git a/fs/dcache.c b/fs/dcache.c
index c622872c12c5..ad4a542e9bab 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1670,8 +1670,7 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
struct qstr q;
q.name = name;
- q.len = strlen(name);
- q.hash = full_name_hash(q.name, q.len);
+ q.hash_len = hashlen_string(name);
return d_alloc(parent, &q);
}
EXPORT_SYMBOL(d_alloc_name);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 3bf3f20f8ecc..f3b4408be590 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -628,11 +628,11 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
map_bh->b_size = fs_count << i_blkbits;
/*
- * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
- * forbid block creations: only overwrites are permitted.
- * We will return early to the caller once we see an
- * unmapped buffer head returned, and the caller will fall
- * back to buffered I/O.
+ * For writes that could fill holes inside i_size on a
+ * DIO_SKIP_HOLES filesystem we forbid block creations: only
+ * overwrites are permitted. We will return early to the caller
+ * once we see an unmapped buffer head returned, and the caller
+ * will fall back to buffered I/O.
*
* Otherwise the decision is left to the get_blocks method,
* which may decide to handle it or also return an unmapped
@@ -640,8 +640,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
*/
create = dio->rw & WRITE;
if (dio->flags & DIO_SKIP_HOLES) {
- if (sdio->block_in_file < (i_size_read(dio->inode) >>
- sdio->blkbits))
+ if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
+ i_blkbits))
create = 0;
}
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index ebd40f46ed4c..0d8eb3455b34 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1141,12 +1141,13 @@ ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode,
static int
ecryptfs_write_metadata_to_xattr(struct dentry *ecryptfs_dentry,
+ struct inode *ecryptfs_inode,
char *page_virt, size_t size)
{
int rc;
- rc = ecryptfs_setxattr(ecryptfs_dentry, ECRYPTFS_XATTR_NAME, page_virt,
- size, 0);
+ rc = ecryptfs_setxattr(ecryptfs_dentry, ecryptfs_inode,
+ ECRYPTFS_XATTR_NAME, page_virt, size, 0);
return rc;
}
@@ -1215,8 +1216,8 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
goto out_free;
}
if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
- rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
- size);
+ rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, ecryptfs_inode,
+ virt, size);
else
rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
virt_len);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 3ec495db7e82..4ba1547bb9ad 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -609,8 +609,8 @@ ssize_t
ecryptfs_getxattr_lower(struct dentry *lower_dentry, struct inode *lower_inode,
const char *name, void *value, size_t size);
int
-ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
- size_t size, int flags);
+ecryptfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
+ const void *value, size_t size, int flags);
int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode);
#ifdef CONFIG_ECRYPT_FS_MESSAGING
int ecryptfs_process_response(struct ecryptfs_daemon *daemon,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 318b04689d76..9d153b6a1d72 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -1001,7 +1001,8 @@ static int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
}
int
-ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ecryptfs_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
size_t size, int flags)
{
int rc = 0;
@@ -1014,8 +1015,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
}
rc = vfs_setxattr(lower_dentry, name, value, size, flags);
- if (!rc && d_really_is_positive(dentry))
- fsstack_copy_attr_all(d_inode(dentry), d_inode(lower_dentry));
+ if (!rc && inode)
+ fsstack_copy_attr_all(inode, d_inode(lower_dentry));
out:
return rc;
}
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 148d11b514fb..9c3437c8a5b1 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -442,7 +442,8 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode)
if (size < 0)
size = 8;
put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
- rc = lower_inode->i_op->setxattr(lower_dentry, ECRYPTFS_XATTR_NAME,
+ rc = lower_inode->i_op->setxattr(lower_dentry, lower_inode,
+ ECRYPTFS_XATTR_NAME,
xattr_virt, size, 0);
inode_unlock(lower_inode);
if (rc)
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index c1400b109805..868c02317b05 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -51,7 +51,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
down_read(&ei->dax_sem);
- ret = __dax_fault(vma, vmf, ext2_get_block, NULL);
+ ret = __dax_fault(vma, vmf, ext2_get_block);
up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE)
@@ -72,7 +72,7 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
}
down_read(&ei->dax_sem);
- ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block, NULL);
+ ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
up_read(&ei->dax_sem);
if (flags & FAULT_FLAG_WRITE)
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index b675610391b8..fcbe58641e40 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -26,6 +26,7 @@
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/dax.h>
+#include <linux/blkdev.h>
#include <linux/quotaops.h>
#include <linux/writeback.h>
#include <linux/buffer_head.h>
@@ -737,19 +738,18 @@ static int ext2_get_blocks(struct inode *inode,
* so that it's not found by another thread before it's
* initialised
*/
- err = dax_clear_sectors(inode->i_sb->s_bdev,
- le32_to_cpu(chain[depth-1].key) <<
- (inode->i_blkbits - 9),
- 1 << inode->i_blkbits);
+ err = sb_issue_zeroout(inode->i_sb,
+ le32_to_cpu(chain[depth-1].key), count,
+ GFP_NOFS);
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
}
- }
+ } else
+ set_buffer_new(bh_result);
ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
mutex_unlock(&ei->truncate_mutex);
- set_buffer_new(bh_result);
got_it:
map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
if (count > blocks_to_boundary)
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index b78caf25f746..1d9379568aa8 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -922,16 +922,9 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (sbi->s_mount_opt & EXT2_MOUNT_DAX) {
- if (blocksize != PAGE_SIZE) {
- ext2_msg(sb, KERN_ERR,
- "error: unsupported blocksize for dax");
+ err = bdev_dax_supported(sb, blocksize);
+ if (err)
goto failed_mount;
- }
- if (!sb->s_bdev->bd_disk->fops->direct_access) {
- ext2_msg(sb, KERN_ERR,
- "error: device does not support dax");
- goto failed_mount;
- }
}
/* If the blocksize doesn't match, re-read the thing.. */
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 7fd3b867ce65..7b9e9c1842d5 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -18,10 +18,11 @@ ext2_xattr_security_get(const struct xattr_handler *handler,
static int
ext2_xattr_security_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_SECURITY, name,
+ return ext2_xattr_set(inode, EXT2_XATTR_INDEX_SECURITY, name,
value, size, flags);
}
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 0f85705ff519..65049b71af13 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -25,10 +25,11 @@ ext2_xattr_trusted_get(const struct xattr_handler *handler,
static int
ext2_xattr_trusted_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_TRUSTED, name,
+ return ext2_xattr_set(inode, EXT2_XATTR_INDEX_TRUSTED, name,
value, size, flags);
}
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index 1fafd27037cc..fb2f992ae763 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -29,13 +29,14 @@ ext2_xattr_user_get(const struct xattr_handler *handler,
static int
ext2_xattr_user_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- if (!test_opt(dentry->d_sb, XATTR_USER))
+ if (!test_opt(inode->i_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext2_xattr_set(d_inode(dentry), EXT2_XATTR_INDEX_USER,
+ return ext2_xattr_set(inode, EXT2_XATTR_INDEX_USER,
name, value, size, flags);
}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index d478110c32a6..df44c877892a 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -202,7 +202,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else
- result = __dax_fault(vma, vmf, ext4_dax_get_block, NULL);
+ result = __dax_fault(vma, vmf, ext4_dax_get_block);
if (write) {
if (!IS_ERR(handle))
@@ -238,7 +238,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
result = VM_FAULT_SIGBUS;
else
result = __dax_pmd_fault(vma, addr, pmd, flags,
- ext4_dax_get_block, NULL);
+ ext4_dax_get_block);
if (write) {
if (!IS_ERR(handle))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 20c5d52253b4..3822a5aedc61 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3417,16 +3417,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
- if (blocksize != PAGE_SIZE) {
- ext4_msg(sb, KERN_ERR,
- "error: unsupported blocksize for dax");
- goto failed_mount;
- }
- if (!sb->s_bdev->bd_disk->fops->direct_access) {
- ext4_msg(sb, KERN_ERR,
- "error: device does not support dax");
+ err = bdev_dax_supported(sb, blocksize);
+ if (err)
goto failed_mount;
- }
}
if (ext4_has_feature_encrypt(sb) && es->s_encryption_level) {
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 123a7d010efe..a8921112030d 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -22,10 +22,11 @@ ext4_xattr_security_get(const struct xattr_handler *handler,
static int
ext4_xattr_security_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_SECURITY,
+ return ext4_xattr_set(inode, EXT4_XATTR_INDEX_SECURITY,
name, value, size, flags);
}
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
index 60652fa24cbc..c7765c735714 100644
--- a/fs/ext4/xattr_trusted.c
+++ b/fs/ext4/xattr_trusted.c
@@ -29,10 +29,11 @@ ext4_xattr_trusted_get(const struct xattr_handler *handler,
static int
ext4_xattr_trusted_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_TRUSTED,
+ return ext4_xattr_set(inode, EXT4_XATTR_INDEX_TRUSTED,
name, value, size, flags);
}
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
index 17a446ffecd3..ca20e423034b 100644
--- a/fs/ext4/xattr_user.c
+++ b/fs/ext4/xattr_user.c
@@ -30,12 +30,13 @@ ext4_xattr_user_get(const struct xattr_handler *handler,
static int
ext4_xattr_user_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- if (!test_opt(dentry->d_sb, XATTR_USER))
+ if (!test_opt(inode->i_sb, XATTR_USER))
return -EOPNOTSUPP;
- return ext4_xattr_set(d_inode(dentry), EXT4_XATTR_INDEX_USER,
+ return ext4_xattr_set(inode, EXT4_XATTR_INDEX_USER,
name, value, size, flags);
}
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 00ea56797258..e3decae3acfb 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -50,10 +50,11 @@ static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
}
static int f2fs_xattr_generic_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name, const void *value,
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
size_t size, int flags)
{
- struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
switch (handler->flags) {
case F2FS_XATTR_INDEX_USER:
@@ -69,7 +70,7 @@ static int f2fs_xattr_generic_set(const struct xattr_handler *handler,
default:
return -EINVAL;
}
- return f2fs_setxattr(d_inode(dentry), handler->flags, name,
+ return f2fs_setxattr(inode, handler->flags, name,
value, size, NULL, flags);
}
@@ -95,11 +96,10 @@ static int f2fs_xattr_advise_get(const struct xattr_handler *handler,
}
static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name, const void *value,
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
-
if (!inode_owner_or_capable(inode))
return -EPERM;
if (value == NULL)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b9419058108f..ccd4971cc6c1 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1719,10 +1719,10 @@ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
return fuse_update_attributes(inode, stat, NULL, NULL);
}
-static int fuse_setxattr(struct dentry *entry, const char *name,
- const void *value, size_t size, int flags)
+static int fuse_setxattr(struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct inode *inode = d_inode(entry);
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
struct fuse_setxattr_in inarg;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 4a01f30e9995..271d93905bac 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -783,12 +783,15 @@ static int get_leaf_nr(struct gfs2_inode *dip, u32 index,
u64 *leaf_out)
{
__be64 *hash;
+ int error;
hash = gfs2_dir_get_hash_table(dip);
- if (IS_ERR(hash))
- return PTR_ERR(hash);
- *leaf_out = be64_to_cpu(*(hash + index));
- return 0;
+ error = PTR_ERR_OR_ZERO(hash);
+
+ if (!error)
+ *leaf_out = be64_to_cpu(*(hash + index));
+
+ return error;
}
static int get_first_leaf(struct gfs2_inode *dip, u32 index,
@@ -798,7 +801,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index,
int error;
error = get_leaf_nr(dip, index, &leaf_no);
- if (!IS_ERR_VALUE(error))
+ if (!error)
error = get_leaf(dip, leaf_no, bh_out);
return error;
@@ -1014,7 +1017,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
index = name->hash >> (32 - dip->i_depth);
error = get_leaf_nr(dip, index, &leaf_no);
- if (IS_ERR_VALUE(error))
+ if (error)
return error;
/* Get the old leaf block */
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index f42ab53bd30d..3a2853504084 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1251,10 +1251,10 @@ int __gfs2_xattr_set(struct inode *inode, const char *name,
}
static int gfs2_xattr_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int ret;
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index 064f92f17efc..d9a86919fdf6 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -13,10 +13,10 @@
#include "hfs_fs.h"
#include "btree.h"
-int hfs_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+int hfs_setxattr(struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
struct hfs_find_data fd;
hfs_cat_rec rec;
struct hfs_cat_file *file;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index fa3eed86837c..ee2f385811c8 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -212,7 +212,7 @@ extern void hfs_evict_inode(struct inode *);
extern void hfs_delete_inode(struct inode *);
/* attr.c */
-extern int hfs_setxattr(struct dentry *dentry, const char *name,
+extern int hfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
const void *value, size_t size, int flags);
extern ssize_t hfs_getxattr(struct dentry *dentry, struct inode *inode,
const char *name, void *value, size_t size);
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index 4f118d282a7a..d37bb88dc746 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -424,7 +424,7 @@ static int copy_name(char *buffer, const char *xattr_name, int name_len)
return len;
}
-int hfsplus_setxattr(struct dentry *dentry, const char *name,
+int hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags,
const char *prefix, size_t prefixlen)
{
@@ -437,8 +437,7 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
return -ENOMEM;
strcpy(xattr_name, prefix);
strcpy(xattr_name + prefixlen, name);
- res = __hfsplus_setxattr(d_inode(dentry), xattr_name, value, size,
- flags);
+ res = __hfsplus_setxattr(inode, xattr_name, value, size, flags);
kfree(xattr_name);
return res;
}
@@ -864,8 +863,9 @@ static int hfsplus_osx_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_osx_setxattr(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *buffer, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *buffer,
+ size_t size, int flags)
{
/*
* Don't allow setting properly prefixed attributes
@@ -880,7 +880,7 @@ static int hfsplus_osx_setxattr(const struct xattr_handler *handler,
* creates), so we pass the name through unmodified (after
* ensuring it doesn't conflict with another namespace).
*/
- return __hfsplus_setxattr(d_inode(dentry), name, buffer, size, flags);
+ return __hfsplus_setxattr(inode, name, buffer, size, flags);
}
const struct xattr_handler hfsplus_xattr_osx_handler = {
diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h
index d04ba6f58df2..68f6b539371f 100644
--- a/fs/hfsplus/xattr.h
+++ b/fs/hfsplus/xattr.h
@@ -21,7 +21,7 @@ extern const struct xattr_handler *hfsplus_xattr_handlers[];
int __hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags);
-int hfsplus_setxattr(struct dentry *dentry, const char *name,
+int hfsplus_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags,
const char *prefix, size_t prefixlen);
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index ae2ca8c2e335..37b3efa733ef 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -23,10 +23,11 @@ static int hfsplus_security_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_security_setxattr(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *buffer, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *buffer,
+ size_t size, int flags)
{
- return hfsplus_setxattr(dentry, name, buffer, size, flags,
+ return hfsplus_setxattr(inode, name, buffer, size, flags,
XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN);
}
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
index eae2947060aa..94519d6c627d 100644
--- a/fs/hfsplus/xattr_trusted.c
+++ b/fs/hfsplus/xattr_trusted.c
@@ -21,10 +21,11 @@ static int hfsplus_trusted_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_trusted_setxattr(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *buffer, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *buffer,
+ size_t size, int flags)
{
- return hfsplus_setxattr(dentry, name, buffer, size, flags,
+ return hfsplus_setxattr(inode, name, buffer, size, flags,
XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
}
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
index 3c9eec3e4c7b..fae6c0ea0030 100644
--- a/fs/hfsplus/xattr_user.c
+++ b/fs/hfsplus/xattr_user.c
@@ -21,10 +21,11 @@ static int hfsplus_user_getxattr(const struct xattr_handler *handler,
}
static int hfsplus_user_setxattr(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *buffer, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *buffer,
+ size_t size, int flags)
{
- return hfsplus_setxattr(dentry, name, buffer, size, flags,
+ return hfsplus_setxattr(inode, name, buffer, size, flags,
XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
}
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 458cf463047b..82067ca22f2b 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/bitmap.h>
#include <linux/slab.h>
+#include <linux/seq_file.h>
/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
@@ -453,10 +454,6 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
int lowercase, eas, chk, errs, chkdsk, timeshift;
int o;
struct hpfs_sb_info *sbi = hpfs_sb(s);
- char *new_opts = kstrdup(data, GFP_KERNEL);
-
- if (!new_opts)
- return -ENOMEM;
sync_filesystem(s);
@@ -493,17 +490,44 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
- replace_mount_options(s, new_opts);
-
hpfs_unlock(s);
return 0;
out_err:
hpfs_unlock(s);
- kfree(new_opts);
return -EINVAL;
}
+static int hpfs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct hpfs_sb_info *sbi = hpfs_sb(root->d_sb);
+
+ seq_printf(seq, ",uid=%u", from_kuid_munged(&init_user_ns, sbi->sb_uid));
+ seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, sbi->sb_gid));
+ seq_printf(seq, ",umask=%03o", (~sbi->sb_mode & 0777));
+ if (sbi->sb_lowercase)
+ seq_printf(seq, ",case=lower");
+ if (!sbi->sb_chk)
+ seq_printf(seq, ",check=none");
+ if (sbi->sb_chk == 2)
+ seq_printf(seq, ",check=strict");
+ if (!sbi->sb_err)
+ seq_printf(seq, ",errors=continue");
+ if (sbi->sb_err == 2)
+ seq_printf(seq, ",errors=panic");
+ if (!sbi->sb_chkdsk)
+ seq_printf(seq, ",chkdsk=no");
+ if (sbi->sb_chkdsk == 2)
+ seq_printf(seq, ",chkdsk=always");
+ if (!sbi->sb_eas)
+ seq_printf(seq, ",eas=no");
+ if (sbi->sb_eas == 1)
+ seq_printf(seq, ",eas=ro");
+ if (sbi->sb_timeshift)
+ seq_printf(seq, ",timeshift=%d", sbi->sb_timeshift);
+ return 0;
+}
+
/* Super operations */
static const struct super_operations hpfs_sops =
@@ -514,7 +538,7 @@ static const struct super_operations hpfs_sops =
.put_super = hpfs_put_super,
.statfs = hpfs_statfs,
.remount_fs = hpfs_remount_fs,
- .show_options = generic_show_options,
+ .show_options = hpfs_show_options,
};
static int hpfs_fill_super(struct super_block *s, void *options, int silent)
@@ -537,8 +561,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
int o;
- save_mount_options(s, options);
-
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi) {
return -ENOMEM;
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index 3ed9a4b49778..c2332e30f218 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -57,10 +57,11 @@ static int jffs2_security_getxattr(const struct xattr_handler *handler,
}
static int jffs2_security_setxattr(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *buffer, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *buffer,
+ size_t size, int flags)
{
- return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_SECURITY,
+ return do_jffs2_setxattr(inode, JFFS2_XPREFIX_SECURITY,
name, buffer, size, flags);
}
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
index 4ebecff1d922..5d6030826c52 100644
--- a/fs/jffs2/xattr_trusted.c
+++ b/fs/jffs2/xattr_trusted.c
@@ -25,10 +25,11 @@ static int jffs2_trusted_getxattr(const struct xattr_handler *handler,
}
static int jffs2_trusted_setxattr(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *buffer, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *buffer,
+ size_t size, int flags)
{
- return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_TRUSTED,
+ return do_jffs2_setxattr(inode, JFFS2_XPREFIX_TRUSTED,
name, buffer, size, flags);
}
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
index bce249e1b277..9d027b4abcf9 100644
--- a/fs/jffs2/xattr_user.c
+++ b/fs/jffs2/xattr_user.c
@@ -25,10 +25,11 @@ static int jffs2_user_getxattr(const struct xattr_handler *handler,
}
static int jffs2_user_setxattr(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *buffer, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *buffer,
+ size_t size, int flags)
{
- return do_jffs2_setxattr(d_inode(dentry), JFFS2_XPREFIX_USER,
+ return do_jffs2_setxattr(inode, JFFS2_XPREFIX_USER,
name, buffer, size, flags);
}
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index beb182b503b3..0bf3c33aedff 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -943,11 +943,10 @@ static int jfs_xattr_get(const struct xattr_handler *handler,
}
static int jfs_xattr_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
-
name = xattr_full_name(handler, name);
return __jfs_xattr_set(inode, name, value, size, flags);
}
@@ -962,11 +961,10 @@ static int jfs_xattr_get_os2(const struct xattr_handler *handler,
}
static int jfs_xattr_set_os2(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
-
if (is_known_namespace(name))
return -EOPNOTSUPP;
return __jfs_xattr_set(inode, name, value, size, flags);
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index 1719649d7ad7..63b925d5ba1e 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -160,10 +160,11 @@ static int kernfs_node_setsecdata(struct kernfs_node *kn, void **secdata,
return 0;
}
-int kernfs_iop_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+int kernfs_iop_setxattr(struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct kernfs_node *kn = dentry->d_fsdata;
+ struct kernfs_node *kn = inode->i_private;
struct kernfs_iattrs *attrs;
void *secdata;
int error;
@@ -175,11 +176,11 @@ int kernfs_iop_setxattr(struct dentry *dentry, const char *name,
if (!strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) {
const char *suffix = name + XATTR_SECURITY_PREFIX_LEN;
- error = security_inode_setsecurity(d_inode(dentry), suffix,
+ error = security_inode_setsecurity(inode, suffix,
value, size, flags);
if (error)
return error;
- error = security_inode_getsecctx(d_inode(dentry),
+ error = security_inode_getsecctx(inode,
&secdata, &secdata_len);
if (error)
return error;
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 45c9192c276e..37159235ac10 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -81,7 +81,8 @@ int kernfs_iop_permission(struct inode *inode, int mask);
int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr);
int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
-int kernfs_iop_setxattr(struct dentry *dentry, const char *name, const void *value,
+int kernfs_iop_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
size_t size, int flags);
int kernfs_iop_removexattr(struct dentry *dentry, const char *name);
ssize_t kernfs_iop_getxattr(struct dentry *dentry, struct inode *inode,
diff --git a/fs/libfs.c b/fs/libfs.c
index 8765ff1adc07..3db2721144c2 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1118,8 +1118,9 @@ static int empty_dir_setattr(struct dentry *dentry, struct iattr *attr)
return -EPERM;
}
-static int empty_dir_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+static int empty_dir_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
return -EOPNOTSUPP;
}
diff --git a/fs/namei.c b/fs/namei.c
index 5375571cf6e1..4c4f95ac8aa5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -35,6 +35,7 @@
#include <linux/fs_struct.h>
#include <linux/posix_acl.h>
#include <linux/hash.h>
+#include <linux/bitops.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -1797,74 +1798,144 @@ static int walk_component(struct nameidata *nd, int flags)
#include <asm/word-at-a-time.h>
-#ifdef CONFIG_64BIT
+#ifdef HASH_MIX
-static inline unsigned int fold_hash(unsigned long hash)
-{
- return hash_64(hash, 32);
-}
+/* Architecture provides HASH_MIX and fold_hash() in <asm/hash.h> */
+#elif defined(CONFIG_64BIT)
/*
- * This is George Marsaglia's XORSHIFT generator.
- * It implements a maximum-period LFSR in only a few
- * instructions. It also has the property (required
- * by hash_name()) that mix_hash(0) = 0.
+ * Register pressure in the mixing function is an issue, particularly
+ * on 32-bit x86, but almost any function requires one state value and
+ * one temporary. Instead, use a function designed for two state values
+ * and no temporaries.
+ *
+ * This function cannot create a collision in only two iterations, so
+ * we have two iterations to achieve avalanche. In those two iterations,
+ * we have six layers of mixing, which is enough to spread one bit's
+ * influence out to 2^6 = 64 state bits.
+ *
+ * Rotate constants are scored by considering either 64 one-bit input
+ * deltas or 64*63/2 = 2016 two-bit input deltas, and finding the
+ * probability of that delta causing a change to each of the 128 output
+ * bits, using a sample of random initial states.
+ *
+ * The Shannon entropy of the computed probabilities is then summed
+ * to produce a score. Ideally, any input change has a 50% chance of
+ * toggling any given output bit.
+ *
+ * Mixing scores (in bits) for (12,45):
+ * Input delta: 1-bit 2-bit
+ * 1 round: 713.3 42542.6
+ * 2 rounds: 2753.7 140389.8
+ * 3 rounds: 5954.1 233458.2
+ * 4 rounds: 7862.6 256672.2
+ * Perfect: 8192 258048
+ * (64*128) (64*63/2 * 128)
*/
-static inline unsigned long mix_hash(unsigned long hash)
+#define HASH_MIX(x, y, a) \
+ ( x ^= (a), \
+ y ^= x, x = rol64(x,12),\
+ x += y, y = rol64(y,45),\
+ y *= 9 )
+
+/*
+ * Fold two longs into one 32-bit hash value. This must be fast, but
+ * latency isn't quite as critical, as there is a fair bit of additional
+ * work done before the hash value is used.
+ */
+static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
- hash ^= hash << 13;
- hash ^= hash >> 7;
- hash ^= hash << 17;
- return hash;
+ y ^= x * GOLDEN_RATIO_64;
+ y *= GOLDEN_RATIO_64;
+ return y >> 32;
}
#else /* 32-bit case */
-#define fold_hash(x) (x)
+/*
+ * Mixing scores (in bits) for (7,20):
+ * Input delta: 1-bit 2-bit
+ * 1 round: 330.3 9201.6
+ * 2 rounds: 1246.4 25475.4
+ * 3 rounds: 1907.1 31295.1
+ * 4 rounds: 2042.3 31718.6
+ * Perfect: 2048 31744
+ * (32*64) (32*31/2 * 64)
+ */
+#define HASH_MIX(x, y, a) \
+ ( x ^= (a), \
+ y ^= x, x = rol32(x, 7),\
+ x += y, y = rol32(y,20),\
+ y *= 9 )
-static inline unsigned long mix_hash(unsigned long hash)
+static inline unsigned int fold_hash(unsigned long x, unsigned long y)
{
- hash ^= hash << 13;
- hash ^= hash >> 17;
- hash ^= hash << 5;
- return hash;
+ /* Use arch-optimized multiply if one exists */
+ return __hash_32(y ^ __hash_32(x));
}
#endif
-unsigned int full_name_hash(const unsigned char *name, unsigned int len)
+/*
+ * Return the hash of a string of known length. This is carfully
+ * designed to match hash_name(), which is the more critical function.
+ * In particular, we must end by hashing a final word containing 0..7
+ * payload bytes, to match the way that hash_name() iterates until it
+ * finds the delimiter after the name.
+ */
+unsigned int full_name_hash(const char *name, unsigned int len)
{
- unsigned long a, hash = 0;
+ unsigned long a, x = 0, y = 0;
for (;;) {
+ if (!len)
+ goto done;
a = load_unaligned_zeropad(name);
if (len < sizeof(unsigned long))
break;
- hash = mix_hash(hash + a);
+ HASH_MIX(x, y, a);
name += sizeof(unsigned long);
len -= sizeof(unsigned long);
- if (!len)
- goto done;
}
- hash += a & bytemask_from_count(len);
+ x ^= a & bytemask_from_count(len);
done:
- return fold_hash(hash);
+ return fold_hash(x, y);
}
EXPORT_SYMBOL(full_name_hash);
+/* Return the "hash_len" (hash and length) of a null-terminated string */
+u64 hashlen_string(const char *name)
+{
+ unsigned long a = 0, x = 0, y = 0, adata, mask, len;
+ const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
+
+ len = -sizeof(unsigned long);
+ do {
+ HASH_MIX(x, y, a);
+ len += sizeof(unsigned long);
+ a = load_unaligned_zeropad(name+len);
+ } while (!has_zero(a, &adata, &constants));
+
+ adata = prep_zero_mask(a, adata, &constants);
+ mask = create_zero_mask(adata);
+ x ^= a & zero_bytemask(mask);
+
+ return hashlen_create(fold_hash(x, y), len + find_zero(mask));
+}
+EXPORT_SYMBOL(hashlen_string);
+
/*
* Calculate the length and hash of the path component, and
* return the "hash_len" as the result.
*/
static inline u64 hash_name(const char *name)
{
- unsigned long a, b, adata, bdata, mask, hash, len;
+ unsigned long a = 0, b, x = 0, y = 0, adata, bdata, mask, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- hash = a = 0;
len = -sizeof(unsigned long);
do {
- hash = mix_hash(hash + a);
+ HASH_MIX(x, y, a);
len += sizeof(unsigned long);
a = load_unaligned_zeropad(name+len);
b = a ^ REPEAT_BYTE('/');
@@ -1872,25 +1943,40 @@ static inline u64 hash_name(const char *name)
adata = prep_zero_mask(a, adata, &constants);
bdata = prep_zero_mask(b, bdata, &constants);
-
mask = create_zero_mask(adata | bdata);
+ x ^= a & zero_bytemask(mask);
- hash += a & zero_bytemask(mask);
- len += find_zero(mask);
- return hashlen_create(fold_hash(hash), len);
+ return hashlen_create(fold_hash(x, y), len + find_zero(mask));
}
-#else
+#else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
-unsigned int full_name_hash(const unsigned char *name, unsigned int len)
+/* Return the hash of a string of known length */
+unsigned int full_name_hash(const char *name, unsigned int len)
{
unsigned long hash = init_name_hash();
while (len--)
- hash = partial_name_hash(*name++, hash);
+ hash = partial_name_hash((unsigned char)*name++, hash);
return end_name_hash(hash);
}
EXPORT_SYMBOL(full_name_hash);
+/* Return the "hash_len" (hash and length) of a null-terminated string */
+u64 hashlen_string(const char *name)
+{
+ unsigned long hash = init_name_hash();
+ unsigned long len = 0, c;
+
+ c = (unsigned char)*name;
+ while (c) {
+ len++;
+ hash = partial_name_hash(c, hash);
+ c = (unsigned char)name[len];
+ }
+ return hashlen_create(end_name_hash(hash), len);
+}
+EXPORT_SYMBOL(hashlen_string);
+
/*
* We know there's a real path component here of at least
* one character.
@@ -1934,7 +2020,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
int type;
err = may_lookup(nd);
- if (err)
+ if (err)
return err;
hash_len = hash_name(name);
@@ -4542,7 +4628,6 @@ int readlink_copy(char __user *buffer, int buflen, const char *link)
out:
return len;
}
-EXPORT_SYMBOL(readlink_copy);
/*
* A helper for ->readlink(). This should be used *ONLY* for symlinks that
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 618ced381a14..aaa2e8d3df6f 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -217,7 +217,8 @@ static u32 initiate_file_draining(struct nfs_client *clp,
}
if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
- &args->cbl_range)) {
+ &args->cbl_range,
+ be32_to_cpu(args->cbl_stateid.seqid))) {
rv = NFS4_OK;
goto unlock;
}
@@ -500,8 +501,10 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
cps->slot = slot;
/* The ca_maxresponsesize_cached is 0 with no DRC */
- if (args->csa_cachethis != 0)
- return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
+ if (args->csa_cachethis != 0) {
+ status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
+ goto out_unlock;
+ }
/*
* Check for pending referring calls. If a match is found, a
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 976c90608e56..d81f96aacd51 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -146,10 +146,16 @@ static __be32 decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
p = read_buf(xdr, NFS4_STATEID_SIZE);
if (unlikely(p == NULL))
return htonl(NFS4ERR_RESOURCE);
- memcpy(stateid, p, NFS4_STATEID_SIZE);
+ memcpy(stateid->data, p, NFS4_STATEID_SIZE);
return 0;
}
+static __be32 decode_delegation_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ stateid->type = NFS4_DELEGATION_STATEID_TYPE;
+ return decode_stateid(xdr, stateid);
+}
+
static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
{
__be32 *p;
@@ -211,7 +217,7 @@ static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr,
__be32 *p;
__be32 status;
- status = decode_stateid(xdr, &args->stateid);
+ status = decode_delegation_stateid(xdr, &args->stateid);
if (unlikely(status != 0))
goto out;
p = read_buf(xdr, 4);
@@ -227,6 +233,11 @@ out:
}
#if defined(CONFIG_NFS_V4_1)
+static __be32 decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ stateid->type = NFS4_LAYOUT_STATEID_TYPE;
+ return decode_stateid(xdr, stateid);
+}
static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
struct xdr_stream *xdr,
@@ -263,7 +274,7 @@ static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp,
}
p = xdr_decode_hyper(p, &args->cbl_range.offset);
p = xdr_decode_hyper(p, &args->cbl_range.length);
- status = decode_stateid(xdr, &args->cbl_stateid);
+ status = decode_layout_stateid(xdr, &args->cbl_stateid);
if (unlikely(status != 0))
goto out;
} else if (args->cbl_recall_type == RETURN_FSID) {
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 5166adcfc0fb..322c2585bc34 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -875,15 +875,16 @@ int nfs_delegations_present(struct nfs_client *clp)
/**
* nfs4_copy_delegation_stateid - Copy inode's state ID information
- * @dst: stateid data structure to fill in
* @inode: inode to check
* @flags: delegation type requirement
+ * @dst: stateid data structure to fill in
+ * @cred: optional argument to retrieve credential
*
* Returns "true" and fills in "dst->data" * if inode had a delegation,
* otherwise "false" is returned.
*/
-bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
- fmode_t flags)
+bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags,
+ nfs4_stateid *dst, struct rpc_cred **cred)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
@@ -896,6 +897,8 @@ bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
if (ret) {
nfs4_stateid_copy(dst, &delegation->stateid);
nfs_mark_delegation_referenced(delegation);
+ if (cred)
+ *cred = get_rpccred(delegation->cred);
}
rcu_read_unlock();
return ret;
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 333063e032f0..64724d252a79 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -56,7 +56,7 @@ void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
-bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode, fmode_t flags);
+bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred);
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
int nfs4_have_delegation(struct inode *inode, fmode_t flags);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 741a92c470bb..979b3c4dee6a 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -87,6 +87,7 @@ struct nfs_direct_req {
int mirror_count;
ssize_t count, /* bytes actually processed */
+ max_count, /* max expected count */
bytes_left, /* bytes left to be sent */
io_start, /* start of IO */
error; /* any reported error */
@@ -123,6 +124,8 @@ nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
int i;
ssize_t count;
+ WARN_ON_ONCE(dreq->count >= dreq->max_count);
+
if (dreq->mirror_count == 1) {
dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
dreq->count += hdr->good_bytes;
@@ -275,7 +278,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
struct nfs_direct_req *dreq)
{
- cinfo->lock = &dreq->inode->i_lock;
+ cinfo->inode = dreq->inode;
cinfo->mds = &dreq->mds_cinfo;
cinfo->ds = &dreq->ds_cinfo;
cinfo->dreq = dreq;
@@ -591,7 +594,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
goto out_unlock;
dreq->inode = inode;
- dreq->bytes_left = count;
+ dreq->bytes_left = dreq->max_count = count;
dreq->io_start = iocb->ki_pos;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
@@ -630,13 +633,13 @@ nfs_direct_write_scan_commit_list(struct inode *inode,
struct list_head *list,
struct nfs_commit_info *cinfo)
{
- spin_lock(cinfo->lock);
+ spin_lock(&cinfo->inode->i_lock);
#ifdef CONFIG_NFS_V4_1
if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
#endif
nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
- spin_unlock(cinfo->lock);
+ spin_unlock(&cinfo->inode->i_lock);
}
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
@@ -671,13 +674,13 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
if (!nfs_pageio_add_request(&desc, req)) {
nfs_list_remove_request(req);
nfs_list_add_request(req, &failed);
- spin_lock(cinfo.lock);
+ spin_lock(&cinfo.inode->i_lock);
dreq->flags = 0;
if (desc.pg_error < 0)
dreq->error = desc.pg_error;
else
dreq->error = -EIO;
- spin_unlock(cinfo.lock);
+ spin_unlock(&cinfo.inode->i_lock);
}
nfs_release_request(req);
}
@@ -1023,7 +1026,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
goto out_unlock;
dreq->inode = inode;
- dreq->bytes_left = iov_iter_count(iter);
+ dreq->bytes_left = dreq->max_count = iov_iter_count(iter);
dreq->io_start = pos;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
l_ctx = nfs_get_lock_context(dreq->ctx);
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 3384dc8e6683..aa59757389dc 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -795,7 +795,7 @@ filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
}
- spin_lock(cinfo->lock);
+ spin_lock(&cinfo->inode->i_lock);
if (cinfo->ds->nbuckets >= size)
goto out;
for (i = 0; i < cinfo->ds->nbuckets; i++) {
@@ -811,7 +811,7 @@ filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
swap(cinfo->ds->buckets, buckets);
cinfo->ds->nbuckets = size;
out:
- spin_unlock(cinfo->lock);
+ spin_unlock(&cinfo->inode->i_lock);
kfree(buckets);
return 0;
}
@@ -890,6 +890,7 @@ filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
0,
NFS4_MAX_UINT64,
IOMODE_READ,
+ false,
GFP_KERNEL);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
@@ -915,6 +916,7 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
0,
NFS4_MAX_UINT64,
IOMODE_RW,
+ false,
GFP_NOFS);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 0cb1abd535e3..0e8018bc9880 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -26,6 +26,8 @@
#define FF_LAYOUT_POLL_RETRY_MAX (15*HZ)
+static struct group_info *ff_zero_group;
+
static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
{
@@ -53,14 +55,15 @@ ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
kfree(FF_LAYOUT_FROM_HDR(lo));
}
-static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
__be32 *p;
p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
if (unlikely(p == NULL))
return -ENOBUFS;
- memcpy(stateid, p, NFS4_STATEID_SIZE);
+ stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
+ memcpy(stateid->data, p, NFS4_STATEID_SIZE);
dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
p[0], p[1], p[2], p[3]);
return 0;
@@ -211,10 +214,16 @@ static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
{
+ struct rpc_cred *cred;
+
ff_layout_remove_mirror(mirror);
kfree(mirror->fh_versions);
- if (mirror->cred)
- put_rpccred(mirror->cred);
+ cred = rcu_access_pointer(mirror->ro_cred);
+ if (cred)
+ put_rpccred(cred);
+ cred = rcu_access_pointer(mirror->rw_cred);
+ if (cred)
+ put_rpccred(cred);
nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
kfree(mirror);
}
@@ -290,6 +299,8 @@ ff_lseg_merge(struct pnfs_layout_segment *new,
{
u64 new_end, old_end;
+ if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
+ return false;
if (new->pls_range.iomode != old->pls_range.iomode)
return false;
old_end = pnfs_calc_offset_end(old->pls_range.offset,
@@ -310,8 +321,6 @@ ff_lseg_merge(struct pnfs_layout_segment *new,
new_end);
if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
set_bit(NFS_LSEG_ROC, &new->pls_flags);
- if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
- set_bit(NFS_LSEG_LAYOUTRETURN, &new->pls_flags);
return true;
}
@@ -407,8 +416,9 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
struct nfs4_ff_layout_mirror *mirror;
struct nfs4_deviceid devid;
struct nfs4_deviceid_node *idnode;
- u32 ds_count;
- u32 fh_count;
+ struct auth_cred acred = { .group_info = ff_zero_group };
+ struct rpc_cred __rcu *cred;
+ u32 ds_count, fh_count, id;
int j;
rc = -EIO;
@@ -456,7 +466,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
fls->mirror_array[i]->efficiency = be32_to_cpup(p);
/* stateid */
- rc = decode_stateid(&stream, &fls->mirror_array[i]->stateid);
+ rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
if (rc)
goto out_err_free;
@@ -484,24 +494,49 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
fls->mirror_array[i]->fh_versions_cnt = fh_count;
/* user */
- rc = decode_name(&stream, &fls->mirror_array[i]->uid);
+ rc = decode_name(&stream, &id);
if (rc)
goto out_err_free;
+ acred.uid = make_kuid(&init_user_ns, id);
+
/* group */
- rc = decode_name(&stream, &fls->mirror_array[i]->gid);
+ rc = decode_name(&stream, &id);
if (rc)
goto out_err_free;
+ acred.gid = make_kgid(&init_user_ns, id);
+
+ /* find the cred for it */
+ rcu_assign_pointer(cred, rpc_lookup_generic_cred(&acred, 0, gfp_flags));
+ if (IS_ERR(cred)) {
+ rc = PTR_ERR(cred);
+ goto out_err_free;
+ }
+
+ if (lgr->range.iomode == IOMODE_READ)
+ rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
+ else
+ rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
+
mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
if (mirror != fls->mirror_array[i]) {
+ /* swap cred ptrs so free_mirror will clean up old */
+ if (lgr->range.iomode == IOMODE_READ) {
+ cred = xchg(&mirror->ro_cred, cred);
+ rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
+ } else {
+ cred = xchg(&mirror->rw_cred, cred);
+ rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
+ }
ff_layout_free_mirror(fls->mirror_array[i]);
fls->mirror_array[i] = mirror;
}
- dprintk("%s: uid %d gid %d\n", __func__,
- fls->mirror_array[i]->uid,
- fls->mirror_array[i]->gid);
+ dprintk("%s: iomode %s uid %u gid %u\n", __func__,
+ lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
+ from_kuid(&init_user_ns, acred.uid),
+ from_kgid(&init_user_ns, acred.gid));
}
p = xdr_inline_decode(&stream, 4);
@@ -745,7 +780,7 @@ ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
else {
int i;
- spin_lock(cinfo->lock);
+ spin_lock(&cinfo->inode->i_lock);
if (cinfo->ds->nbuckets != 0)
kfree(buckets);
else {
@@ -759,7 +794,7 @@ ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg,
NFS_INVALID_STABLE_HOW;
}
}
- spin_unlock(cinfo->lock);
+ spin_unlock(&cinfo->inode->i_lock);
return 0;
}
}
@@ -786,6 +821,36 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
}
static void
+ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
+ struct nfs_page *req,
+ bool strict_iomode)
+{
+retry_strict:
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
+ req->wb_context,
+ 0,
+ NFS4_MAX_UINT64,
+ IOMODE_READ,
+ strict_iomode,
+ GFP_KERNEL);
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ }
+
+ /* If we don't have checking, do get a IOMODE_RW
+ * segment, and the server wants to avoid READs
+ * there, then retry!
+ */
+ if (pgio->pg_lseg && !strict_iomode &&
+ ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
+ strict_iomode = true;
+ goto retry_strict;
+ }
+}
+
+static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
@@ -795,26 +860,23 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
int ds_idx;
/* Use full layout for now */
- if (!pgio->pg_lseg) {
- pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
- req->wb_context,
- 0,
- NFS4_MAX_UINT64,
- IOMODE_READ,
- GFP_KERNEL);
- if (IS_ERR(pgio->pg_lseg)) {
- pgio->pg_error = PTR_ERR(pgio->pg_lseg);
- pgio->pg_lseg = NULL;
- return;
- }
- }
+ if (!pgio->pg_lseg)
+ ff_layout_pg_get_read(pgio, req, false);
+ else if (ff_layout_avoid_read_on_rw(pgio->pg_lseg))
+ ff_layout_pg_get_read(pgio, req, true);
+
/* If no lseg, fall back to read through mds */
if (pgio->pg_lseg == NULL)
goto out_mds;
ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx);
- if (!ds)
- goto out_mds;
+ if (!ds) {
+ if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+ goto out_pnfs;
+ else
+ goto out_mds;
+ }
+
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
pgio->pg_mirror_idx = ds_idx;
@@ -828,6 +890,12 @@ out_mds:
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
nfs_pageio_reset_read_mds(pgio);
+ return;
+
+out_pnfs:
+ pnfs_set_lo_fail(pgio->pg_lseg);
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
}
static void
@@ -847,6 +915,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
0,
NFS4_MAX_UINT64,
IOMODE_RW,
+ false,
GFP_NOFS);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
@@ -870,8 +939,12 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
for (i = 0; i < pgio->pg_mirror_count; i++) {
ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, i, true);
- if (!ds)
- goto out_mds;
+ if (!ds) {
+ if (ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+ goto out_pnfs;
+ else
+ goto out_mds;
+ }
pgm = &pgio->pg_mirrors[i];
mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
@@ -883,6 +956,12 @@ out_mds:
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
nfs_pageio_reset_write_mds(pgio);
+ return;
+
+out_pnfs:
+ pnfs_set_lo_fail(pgio->pg_lseg);
+ pnfs_put_lseg(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
}
static unsigned int
@@ -895,6 +974,7 @@ ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
0,
NFS4_MAX_UINT64,
IOMODE_RW,
+ false,
GFP_NOFS);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
@@ -1067,8 +1147,7 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
rpc_wake_up(&tbl->slot_tbl_waitq);
/* fall through */
default:
- if (ff_layout_no_fallback_to_mds(lseg) ||
- ff_layout_has_available_ds(lseg))
+ if (ff_layout_avoid_mds_available_ds(lseg))
return -NFS4ERR_RESET_TO_PNFS;
reset:
dprintk("%s Retry through MDS. Error %d\n", __func__,
@@ -1215,8 +1294,6 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
hdr->pgio_mirror_idx + 1,
&hdr->pgio_mirror_idx))
goto out_eagain;
- set_bit(NFS_LAYOUT_RETURN_REQUESTED,
- &hdr->lseg->pls_layout->plh_flags);
pnfs_read_resend_pnfs(hdr);
return task->tk_status;
case -NFS4ERR_RESET_TO_MDS:
@@ -1260,7 +1337,7 @@ ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr)
}
static bool
-ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx)
+ff_layout_device_unavailable(struct pnfs_layout_segment *lseg, int idx)
{
/* No mirroring for now */
struct nfs4_deviceid_node *node = FF_LAYOUT_DEVID_NODE(lseg, idx);
@@ -1297,16 +1374,10 @@ static int ff_layout_read_prepare_common(struct rpc_task *task,
rpc_exit(task, -EIO);
return -EIO;
}
- if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
- dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
- if (ff_layout_has_available_ds(hdr->lseg))
- pnfs_read_resend_pnfs(hdr);
- else
- ff_layout_reset_read(hdr);
- rpc_exit(task, 0);
+ if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
+ rpc_exit(task, -EHOSTDOWN);
return -EAGAIN;
}
- hdr->pgio_done_cb = ff_layout_read_done_cb;
ff_layout_read_record_layoutstats_start(task, hdr);
return 0;
@@ -1496,14 +1567,8 @@ static int ff_layout_write_prepare_common(struct rpc_task *task,
return -EIO;
}
- if (ff_layout_reset_to_mds(hdr->lseg, hdr->pgio_mirror_idx)) {
- bool retry_pnfs;
-
- retry_pnfs = ff_layout_has_available_ds(hdr->lseg);
- dprintk("%s task %u reset io to %s\n", __func__,
- task->tk_pid, retry_pnfs ? "pNFS" : "MDS");
- ff_layout_reset_write(hdr, retry_pnfs);
- rpc_exit(task, 0);
+ if (ff_layout_device_unavailable(hdr->lseg, hdr->pgio_mirror_idx)) {
+ rpc_exit(task, -EHOSTDOWN);
return -EAGAIN;
}
@@ -1712,7 +1777,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
goto out_failed;
ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
- if (IS_ERR(ds_cred))
+ if (!ds_cred)
goto out_failed;
vers = nfs4_ff_layout_ds_version(lseg, idx);
@@ -1720,6 +1785,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
+ hdr->pgio_done_cb = ff_layout_read_done_cb;
atomic_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
@@ -1737,11 +1803,11 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
vers == 3 ? &ff_layout_read_call_ops_v3 :
&ff_layout_read_call_ops_v4,
0, RPC_TASK_SOFTCONN);
-
+ put_rpccred(ds_cred);
return PNFS_ATTEMPTED;
out_failed:
- if (ff_layout_has_available_ds(lseg))
+ if (ff_layout_avoid_mds_available_ds(lseg))
return PNFS_TRY_AGAIN;
return PNFS_NOT_ATTEMPTED;
}
@@ -1769,7 +1835,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
return PNFS_NOT_ATTEMPTED;
ds_cred = ff_layout_get_ds_cred(lseg, idx, hdr->cred);
- if (IS_ERR(ds_cred))
+ if (!ds_cred)
return PNFS_NOT_ATTEMPTED;
vers = nfs4_ff_layout_ds_version(lseg, idx);
@@ -1798,6 +1864,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
vers == 3 ? &ff_layout_write_call_ops_v3 :
&ff_layout_write_call_ops_v4,
sync, RPC_TASK_SOFTCONN);
+ put_rpccred(ds_cred);
return PNFS_ATTEMPTED;
}
@@ -1824,7 +1891,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
struct rpc_clnt *ds_clnt;
struct rpc_cred *ds_cred;
u32 idx;
- int vers;
+ int vers, ret;
struct nfs_fh *fh;
idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
@@ -1838,7 +1905,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
goto out_err;
ds_cred = ff_layout_get_ds_cred(lseg, idx, data->cred);
- if (IS_ERR(ds_cred))
+ if (!ds_cred)
goto out_err;
vers = nfs4_ff_layout_ds_version(lseg, idx);
@@ -1854,10 +1921,12 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
if (fh)
data->args.fh = fh;
- return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
+ ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
vers == 3 ? &ff_layout_commit_call_ops_v3 :
&ff_layout_commit_call_ops_v4,
how, RPC_TASK_SOFTCONN);
+ put_rpccred(ds_cred);
+ return ret;
out_err:
pnfs_generic_prepare_to_resend_writes(data);
pnfs_generic_commit_release(data);
@@ -2223,6 +2292,11 @@ static int __init nfs4flexfilelayout_init(void)
{
printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
__func__);
+ if (!ff_zero_group) {
+ ff_zero_group = groups_alloc(0);
+ if (!ff_zero_group)
+ return -ENOMEM;
+ }
return pnfs_register_layoutdriver(&flexfilelayout_type);
}
@@ -2231,6 +2305,10 @@ static void __exit nfs4flexfilelayout_exit(void)
printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
__func__);
pnfs_unregister_layoutdriver(&flexfilelayout_type);
+ if (ff_zero_group) {
+ put_group_info(ff_zero_group);
+ ff_zero_group = NULL;
+ }
}
MODULE_ALIAS("nfs-layouttype4-4");
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
index dd353bb7dc0a..1bcdb15d0c41 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.h
+++ b/fs/nfs/flexfilelayout/flexfilelayout.h
@@ -10,7 +10,8 @@
#define FS_NFS_NFS4FLEXFILELAYOUT_H
#define FF_FLAGS_NO_LAYOUTCOMMIT 1
-#define FF_FLAGS_NO_IO_THRU_MDS 2
+#define FF_FLAGS_NO_IO_THRU_MDS 2
+#define FF_FLAGS_NO_READ_IO 4
#include "../pnfs.h"
@@ -76,9 +77,8 @@ struct nfs4_ff_layout_mirror {
u32 fh_versions_cnt;
struct nfs_fh *fh_versions;
nfs4_stateid stateid;
- u32 uid;
- u32 gid;
- struct rpc_cred *cred;
+ struct rpc_cred __rcu *ro_cred;
+ struct rpc_cred __rcu *rw_cred;
atomic_t ref;
spinlock_t lock;
struct nfs4_ff_layoutstat read_stat;
@@ -154,6 +154,12 @@ ff_layout_no_fallback_to_mds(struct pnfs_layout_segment *lseg)
}
static inline bool
+ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg)
+{
+ return FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_READ_IO;
+}
+
+static inline bool
ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node)
{
return nfs4_test_deviceid_unavailable(node);
@@ -192,4 +198,7 @@ nfs4_ff_find_or_create_ds_client(struct pnfs_layout_segment *lseg,
struct rpc_cred *ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg,
u32 ds_idx, struct rpc_cred *mdscred);
bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg);
+bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg);
+bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg);
+
#endif /* FS_NFS_NFS4FLEXFILELAYOUT_H */
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index add0e5a70bd6..0aa36be71fce 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -228,7 +228,8 @@ ff_ds_error_match(const struct nfs4_ff_layout_ds_err *e1,
return e1->opnum < e2->opnum ? -1 : 1;
if (e1->status != e2->status)
return e1->status < e2->status ? -1 : 1;
- ret = memcmp(&e1->stateid, &e2->stateid, sizeof(e1->stateid));
+ ret = memcmp(e1->stateid.data, e2->stateid.data,
+ sizeof(e1->stateid.data));
if (ret != 0)
return ret;
ret = memcmp(&e1->deviceid, &e2->deviceid, sizeof(e1->deviceid));
@@ -302,40 +303,26 @@ int ff_layout_track_ds_error(struct nfs4_flexfile_layout *flo,
return 0;
}
-/* currently we only support AUTH_NONE and AUTH_SYS */
-static rpc_authflavor_t
-nfs4_ff_layout_choose_authflavor(struct nfs4_ff_layout_mirror *mirror)
+static struct rpc_cred *
+ff_layout_get_mirror_cred(struct nfs4_ff_layout_mirror *mirror, u32 iomode)
{
- if (mirror->uid == (u32)-1)
- return RPC_AUTH_NULL;
- return RPC_AUTH_UNIX;
-}
+ struct rpc_cred *cred, __rcu **pcred;
-/* fetch cred for NFSv3 DS */
-static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror,
- struct nfs4_pnfs_ds *ds)
-{
- if (ds->ds_clp && !mirror->cred &&
- mirror->mirror_ds->ds_versions[0].version == 3) {
- struct rpc_auth *auth = ds->ds_clp->cl_rpcclient->cl_auth;
- struct rpc_cred *cred;
- struct auth_cred acred = {
- .uid = make_kuid(&init_user_ns, mirror->uid),
- .gid = make_kgid(&init_user_ns, mirror->gid),
- };
-
- /* AUTH_NULL ignores acred */
- cred = auth->au_ops->lookup_cred(auth, &acred, 0);
- if (IS_ERR(cred)) {
- dprintk("%s: lookup_cred failed with %ld\n",
- __func__, PTR_ERR(cred));
- return PTR_ERR(cred);
- } else {
- if (cmpxchg(&mirror->cred, NULL, cred))
- put_rpccred(cred);
- }
- }
- return 0;
+ if (iomode == IOMODE_READ)
+ pcred = &mirror->ro_cred;
+ else
+ pcred = &mirror->rw_cred;
+
+ rcu_read_lock();
+ do {
+ cred = rcu_dereference(*pcred);
+ if (!cred)
+ break;
+
+ cred = get_rpccred_rcu(cred);
+ } while(!cred);
+ rcu_read_unlock();
+ return cred;
}
struct nfs_fh *
@@ -356,7 +343,23 @@ out:
return fh;
}
-/* Upon return, either ds is connected, or ds is NULL */
+/**
+ * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
+ * @lseg: the layout segment we're operating on
+ * @ds_idx: index of the DS to use
+ * @fail_return: return layout on connect failure?
+ *
+ * Try to prepare a DS connection to accept an RPC call. This involves
+ * selecting a mirror to use and connecting the client to it if it's not
+ * already connected.
+ *
+ * Since we only need a single functioning mirror to satisfy a read, we don't
+ * want to return the layout if there is one. For writes though, any down
+ * mirror should result in a LAYOUTRETURN. @fail_return is how we distinguish
+ * between the two cases.
+ *
+ * Returns a pointer to a connected DS object on success or NULL on failure.
+ */
struct nfs4_pnfs_ds *
nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
bool fail_return)
@@ -367,7 +370,6 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
struct inode *ino = lseg->pls_layout->plh_inode;
struct nfs_server *s = NFS_SERVER(ino);
unsigned int max_payload;
- rpc_authflavor_t flavor;
if (!ff_layout_mirror_valid(lseg, mirror)) {
pr_err_ratelimited("NFS: %s: No data server for offset index %d\n",
@@ -383,9 +385,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
/* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */
smp_rmb();
if (ds->ds_clp)
- goto out_update_creds;
-
- flavor = nfs4_ff_layout_choose_authflavor(mirror);
+ goto out;
/* FIXME: For now we assume the server sent only one version of NFS
* to use for the DS.
@@ -394,7 +394,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
dataserver_retrans,
mirror->mirror_ds->ds_versions[0].version,
mirror->mirror_ds->ds_versions[0].minor_version,
- flavor);
+ RPC_AUTH_UNIX);
/* connect success, check rsize/wsize limit */
if (ds->ds_clp) {
@@ -410,20 +410,10 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
mirror, lseg->pls_range.offset,
lseg->pls_range.length, NFS4ERR_NXIO,
OP_ILLEGAL, GFP_NOIO);
- if (!fail_return) {
- if (ff_layout_has_available_ds(lseg))
- set_bit(NFS_LAYOUT_RETURN_REQUESTED,
- &lseg->pls_layout->plh_flags);
- else
- pnfs_error_mark_layout_for_return(ino, lseg);
- } else
+ if (fail_return || !ff_layout_has_available_ds(lseg))
pnfs_error_mark_layout_for_return(ino, lseg);
ds = NULL;
- goto out;
}
-out_update_creds:
- if (ff_layout_update_mirror_cred(mirror, ds))
- ds = NULL;
out:
return ds;
}
@@ -433,16 +423,15 @@ ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
struct rpc_cred *mdscred)
{
struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
- struct rpc_cred *cred = ERR_PTR(-EINVAL);
-
- if (!nfs4_ff_layout_prepare_ds(lseg, ds_idx, true))
- goto out;
+ struct rpc_cred *cred;
- if (mirror && mirror->cred)
- cred = mirror->cred;
- else
- cred = mdscred;
-out:
+ if (mirror) {
+ cred = ff_layout_get_mirror_cred(mirror, lseg->pls_range.iomode);
+ if (!cred)
+ cred = get_rpccred(mdscred);
+ } else {
+ cred = get_rpccred(mdscred);
+ }
return cred;
}
@@ -562,6 +551,18 @@ bool ff_layout_has_available_ds(struct pnfs_layout_segment *lseg)
return ff_rw_layout_has_available_ds(lseg);
}
+bool ff_layout_avoid_mds_available_ds(struct pnfs_layout_segment *lseg)
+{
+ return ff_layout_no_fallback_to_mds(lseg) ||
+ ff_layout_has_available_ds(lseg);
+}
+
+bool ff_layout_avoid_read_on_rw(struct pnfs_layout_segment *lseg)
+{
+ return lseg->pls_range.iomode == IOMODE_RW &&
+ ff_layout_no_read_on_rw(lseg);
+}
+
module_param(dataserver_retrans, uint, 0644);
MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
"retries a request before it attempts further "
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index f1d1d2c472e9..5154fa65a2f2 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -477,6 +477,7 @@ void nfs_mark_request_commit(struct nfs_page *req,
u32 ds_commit_idx);
int nfs_write_need_commit(struct nfs_pgio_header *);
void nfs_writeback_update_inode(struct nfs_pgio_header *hdr);
+int nfs_commit_file(struct file *file, struct nfs_write_verifier *verf);
int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
int how, struct nfs_commit_info *cinfo);
void nfs_retry_commit(struct list_head *page_list,
diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h
index b587ccd31083..b6cd15314bab 100644
--- a/fs/nfs/nfs42.h
+++ b/fs/nfs/nfs42.h
@@ -13,6 +13,7 @@
/* nfs4.2proc.c */
int nfs42_proc_allocate(struct file *, loff_t, loff_t);
+ssize_t nfs42_proc_copy(struct file *, loff_t, struct file *, loff_t, size_t);
int nfs42_proc_deallocate(struct file *, loff_t, loff_t);
loff_t nfs42_proc_llseek(struct file *, loff_t, int);
int nfs42_proc_layoutstats_generic(struct nfs_server *,
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index dff83460e5a6..aa03ed09ba06 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -126,6 +126,111 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
return err;
}
+static ssize_t _nfs42_proc_copy(struct file *src, loff_t pos_src,
+ struct nfs_lock_context *src_lock,
+ struct file *dst, loff_t pos_dst,
+ struct nfs_lock_context *dst_lock,
+ size_t count)
+{
+ struct nfs42_copy_args args = {
+ .src_fh = NFS_FH(file_inode(src)),
+ .src_pos = pos_src,
+ .dst_fh = NFS_FH(file_inode(dst)),
+ .dst_pos = pos_dst,
+ .count = count,
+ };
+ struct nfs42_copy_res res;
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
+ .rpc_argp = &args,
+ .rpc_resp = &res,
+ };
+ struct inode *dst_inode = file_inode(dst);
+ struct nfs_server *server = NFS_SERVER(dst_inode);
+ int status;
+
+ status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
+ src_lock, FMODE_READ);
+ if (status)
+ return status;
+
+ status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
+ dst_lock, FMODE_WRITE);
+ if (status)
+ return status;
+
+ status = nfs4_call_sync(server->client, server, &msg,
+ &args.seq_args, &res.seq_res, 0);
+ if (status == -ENOTSUPP)
+ server->caps &= ~NFS_CAP_COPY;
+ if (status)
+ return status;
+
+ if (res.write_res.verifier.committed != NFS_FILE_SYNC) {
+ status = nfs_commit_file(dst, &res.write_res.verifier.verifier);
+ if (status)
+ return status;
+ }
+
+ truncate_pagecache_range(dst_inode, pos_dst,
+ pos_dst + res.write_res.count);
+
+ return res.write_res.count;
+}
+
+ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
+ struct file *dst, loff_t pos_dst,
+ size_t count)
+{
+ struct nfs_server *server = NFS_SERVER(file_inode(dst));
+ struct nfs_lock_context *src_lock;
+ struct nfs_lock_context *dst_lock;
+ struct nfs4_exception src_exception = { };
+ struct nfs4_exception dst_exception = { };
+ ssize_t err, err2;
+
+ if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
+ return -EOPNOTSUPP;
+
+ src_lock = nfs_get_lock_context(nfs_file_open_context(src));
+ if (IS_ERR(src_lock))
+ return PTR_ERR(src_lock);
+
+ src_exception.inode = file_inode(src);
+ src_exception.state = src_lock->open_context->state;
+
+ dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
+ if (IS_ERR(dst_lock)) {
+ err = PTR_ERR(dst_lock);
+ goto out_put_src_lock;
+ }
+
+ dst_exception.inode = file_inode(dst);
+ dst_exception.state = dst_lock->open_context->state;
+
+ do {
+ inode_lock(file_inode(dst));
+ err = _nfs42_proc_copy(src, pos_src, src_lock,
+ dst, pos_dst, dst_lock, count);
+ inode_unlock(file_inode(dst));
+
+ if (err == -ENOTSUPP) {
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ err2 = nfs4_handle_exception(server, err, &src_exception);
+ err = nfs4_handle_exception(server, err, &dst_exception);
+ if (!err)
+ err = err2;
+ } while (src_exception.retry || dst_exception.retry);
+
+ nfs_put_lock_context(dst_lock);
+out_put_src_lock:
+ nfs_put_lock_context(src_lock);
+ return err;
+}
+
static loff_t _nfs42_proc_llseek(struct file *filep,
struct nfs_lock_context *lock, loff_t offset, int whence)
{
@@ -232,7 +337,7 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
* with the current stateid.
*/
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
- pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
+ pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
} else
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 0ca482a51e53..6dc6f2aea0d6 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -9,9 +9,22 @@
#define encode_fallocate_maxsz (encode_stateid_maxsz + \
2 /* offset */ + \
2 /* length */)
+#define NFS42_WRITE_RES_SIZE (1 /* wr_callback_id size */ +\
+ XDR_QUADLEN(NFS4_STATEID_SIZE) + \
+ 2 /* wr_count */ + \
+ 1 /* wr_committed */ + \
+ XDR_QUADLEN(NFS4_VERIFIER_SIZE))
#define encode_allocate_maxsz (op_encode_hdr_maxsz + \
encode_fallocate_maxsz)
#define decode_allocate_maxsz (op_decode_hdr_maxsz)
+#define encode_copy_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_STATEID_SIZE) + \
+ XDR_QUADLEN(NFS4_STATEID_SIZE) + \
+ 2 + 2 + 2 + 1 + 1 + 1)
+#define decode_copy_maxsz (op_decode_hdr_maxsz + \
+ NFS42_WRITE_RES_SIZE + \
+ 1 /* cr_consecutive */ + \
+ 1 /* cr_synchronous */)
#define encode_deallocate_maxsz (op_encode_hdr_maxsz + \
encode_fallocate_maxsz)
#define decode_deallocate_maxsz (op_decode_hdr_maxsz)
@@ -49,6 +62,16 @@
decode_putfh_maxsz + \
decode_allocate_maxsz + \
decode_getattr_maxsz)
+#define NFS4_enc_copy_sz (compound_encode_hdr_maxsz + \
+ encode_putfh_maxsz + \
+ encode_savefh_maxsz + \
+ encode_putfh_maxsz + \
+ encode_copy_maxsz)
+#define NFS4_dec_copy_sz (compound_decode_hdr_maxsz + \
+ decode_putfh_maxsz + \
+ decode_savefh_maxsz + \
+ decode_putfh_maxsz + \
+ decode_copy_maxsz)
#define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \
encode_putfh_maxsz + \
encode_deallocate_maxsz + \
@@ -102,6 +125,23 @@ static void encode_allocate(struct xdr_stream *xdr,
encode_fallocate(xdr, args);
}
+static void encode_copy(struct xdr_stream *xdr,
+ struct nfs42_copy_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_COPY, decode_copy_maxsz, hdr);
+ encode_nfs4_stateid(xdr, &args->src_stateid);
+ encode_nfs4_stateid(xdr, &args->dst_stateid);
+
+ encode_uint64(xdr, args->src_pos);
+ encode_uint64(xdr, args->dst_pos);
+ encode_uint64(xdr, args->count);
+
+ encode_uint32(xdr, 1); /* consecutive = true */
+ encode_uint32(xdr, 1); /* synchronous = true */
+ encode_uint32(xdr, 0); /* src server list */
+}
+
static void encode_deallocate(struct xdr_stream *xdr,
struct nfs42_falloc_args *args,
struct compound_hdr *hdr)
@@ -182,6 +222,26 @@ static void nfs4_xdr_enc_allocate(struct rpc_rqst *req,
}
/*
+ * Encode COPY request
+ */
+static void nfs4_xdr_enc_copy(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ struct nfs42_copy_args *args)
+{
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->seq_args, &hdr);
+ encode_putfh(xdr, args->src_fh, &hdr);
+ encode_savefh(xdr, &hdr);
+ encode_putfh(xdr, args->dst_fh, &hdr);
+ encode_copy(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* Encode DEALLOCATE request
*/
static void nfs4_xdr_enc_deallocate(struct rpc_rqst *req,
@@ -266,6 +326,62 @@ static int decode_allocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res)
return decode_op_hdr(xdr, OP_ALLOCATE);
}
+static int decode_write_response(struct xdr_stream *xdr,
+ struct nfs42_write_res *res)
+{
+ __be32 *p;
+ int stateids;
+
+ p = xdr_inline_decode(xdr, 4 + 8 + 4);
+ if (unlikely(!p))
+ goto out_overflow;
+
+ stateids = be32_to_cpup(p++);
+ p = xdr_decode_hyper(p, &res->count);
+ res->verifier.committed = be32_to_cpup(p);
+ return decode_verifier(xdr, &res->verifier.verifier);
+
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_copy_requirements(struct xdr_stream *xdr,
+ struct nfs42_copy_res *res) {
+ __be32 *p;
+
+ p = xdr_inline_decode(xdr, 4 + 4);
+ if (unlikely(!p))
+ goto out_overflow;
+
+ res->consecutive = be32_to_cpup(p++);
+ res->synchronous = be32_to_cpup(p++);
+ return 0;
+out_overflow:
+ print_overflow_msg(__func__, xdr);
+ return -EIO;
+}
+
+static int decode_copy(struct xdr_stream *xdr, struct nfs42_copy_res *res)
+{
+ int status;
+
+ status = decode_op_hdr(xdr, OP_COPY);
+ if (status == NFS4ERR_OFFLOAD_NO_REQS) {
+ status = decode_copy_requirements(xdr, res);
+ if (status)
+ return status;
+ return NFS4ERR_OFFLOAD_NO_REQS;
+ } else if (status)
+ return status;
+
+ status = decode_write_response(xdr, &res->write_res);
+ if (status)
+ return status;
+
+ return decode_copy_requirements(xdr, res);
+}
+
static int decode_deallocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res)
{
return decode_op_hdr(xdr, OP_DEALLOCATE);
@@ -331,6 +447,36 @@ out:
}
/*
+ * Decode COPY response
+ */
+static int nfs4_xdr_dec_copy(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ struct nfs42_copy_res *res)
+{
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_savefh(xdr);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_copy(xdr, res);
+out:
+ return status;
+}
+
+/*
* Decode DEALLOCATE request
*/
static int nfs4_xdr_dec_deallocate(struct rpc_rqst *rqstp,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 4afdee420d25..768456fa1b17 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -438,8 +438,9 @@ extern void nfs41_handle_server_scope(struct nfs_client *,
struct nfs41_server_scope **);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
-extern int nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
- fmode_t, const struct nfs_lockowner *);
+extern int nfs4_select_rw_stateid(struct nfs4_state *, fmode_t,
+ const struct nfs_lockowner *, nfs4_stateid *,
+ struct rpc_cred **);
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
@@ -496,12 +497,15 @@ extern struct svc_version nfs4_callback_version4;
static inline void nfs4_stateid_copy(nfs4_stateid *dst, const nfs4_stateid *src)
{
- memcpy(dst, src, sizeof(*dst));
+ memcpy(dst->data, src->data, sizeof(dst->data));
+ dst->type = src->type;
}
static inline bool nfs4_stateid_match(const nfs4_stateid *dst, const nfs4_stateid *src)
{
- return memcmp(dst, src, sizeof(*dst)) == 0;
+ if (dst->type != src->type)
+ return false;
+ return memcmp(dst->data, src->data, sizeof(dst->data)) == 0;
}
static inline bool nfs4_stateid_match_other(const nfs4_stateid *dst, const nfs4_stateid *src)
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index d0390516467c..014b0e41ace5 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -129,6 +129,28 @@ nfs4_file_flush(struct file *file, fl_owner_t id)
}
#ifdef CONFIG_NFS_V4_2
+static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out,
+ size_t count, unsigned int flags)
+{
+ struct inode *in_inode = file_inode(file_in);
+ struct inode *out_inode = file_inode(file_out);
+ int ret;
+
+ if (in_inode == out_inode)
+ return -EINVAL;
+
+ /* flush any pending writes */
+ ret = nfs_sync_inode(in_inode);
+ if (ret)
+ return ret;
+ ret = nfs_sync_inode(out_inode);
+ if (ret)
+ return ret;
+
+ return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
+}
+
static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
{
loff_t ret;
@@ -243,6 +265,7 @@ const struct file_operations nfs4_file_operations = {
.check_flags = nfs_check_flags,
.setlease = simple_nosetlease,
#ifdef CONFIG_NFS_V4_2
+ .copy_file_range = nfs4_copy_file_range,
.llseek = nfs4_file_llseek,
.fallocate = nfs42_fallocate,
.clone_file_range = nfs42_clone_file_range,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 084e8570da18..de97567795a5 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -74,6 +74,17 @@
#define NFS4_POLL_RETRY_MIN (HZ/10)
#define NFS4_POLL_RETRY_MAX (15*HZ)
+/* file attributes which can be mapped to nfs attributes */
+#define NFS4_VALID_ATTRS (ATTR_MODE \
+ | ATTR_UID \
+ | ATTR_GID \
+ | ATTR_SIZE \
+ | ATTR_ATIME \
+ | ATTR_MTIME \
+ | ATTR_CTIME \
+ | ATTR_ATIME_SET \
+ | ATTR_MTIME_SET)
+
struct nfs4_opendata;
static int _nfs4_proc_open(struct nfs4_opendata *data);
static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
@@ -416,6 +427,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
case -NFS4ERR_DELAY:
nfs_inc_server_stats(server, NFSIOS_DELAY);
case -NFS4ERR_GRACE:
+ case -NFS4ERR_RECALLCONFLICT:
exception->delay = 1;
return 0;
@@ -2558,15 +2570,20 @@ static int _nfs4_do_open(struct inode *dir,
if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
(opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
nfs4_exclusive_attrset(opendata, sattr, &label);
-
- nfs_fattr_init(opendata->o_res.f_attr);
- status = nfs4_do_setattr(state->inode, cred,
- opendata->o_res.f_attr, sattr,
- state, label, olabel);
- if (status == 0) {
- nfs_setattr_update_inode(state->inode, sattr,
- opendata->o_res.f_attr);
- nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
+ /*
+ * send create attributes which was not set by open
+ * with an extra setattr.
+ */
+ if (sattr->ia_valid & NFS4_VALID_ATTRS) {
+ nfs_fattr_init(opendata->o_res.f_attr);
+ status = nfs4_do_setattr(state->inode, cred,
+ opendata->o_res.f_attr, sattr,
+ state, label, olabel);
+ if (status == 0) {
+ nfs_setattr_update_inode(state->inode, sattr,
+ opendata->o_res.f_attr);
+ nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
+ }
}
}
if (opened && opendata->file_created)
@@ -2676,6 +2693,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
.rpc_resp = &res,
.rpc_cred = cred,
};
+ struct rpc_cred *delegation_cred = NULL;
unsigned long timestamp = jiffies;
fmode_t fmode;
bool truncate;
@@ -2691,7 +2709,7 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
truncate = (sattr->ia_valid & ATTR_SIZE) ? true : false;
fmode = truncate ? FMODE_WRITE : FMODE_READ;
- if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
+ if (nfs4_copy_delegation_stateid(inode, fmode, &arg.stateid, &delegation_cred)) {
/* Use that stateid */
} else if (truncate && state != NULL) {
struct nfs_lockowner lockowner = {
@@ -2700,13 +2718,17 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
};
if (!nfs4_valid_open_stateid(state))
return -EBADF;
- if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
- &lockowner) == -EIO)
+ if (nfs4_select_rw_stateid(state, FMODE_WRITE, &lockowner,
+ &arg.stateid, &delegation_cred) == -EIO)
return -EBADF;
} else
nfs4_stateid_copy(&arg.stateid, &zero_stateid);
+ if (delegation_cred)
+ msg.rpc_cred = delegation_cred;
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
+
+ put_rpccred(delegation_cred);
if (status == 0 && state != NULL)
renew_lease(server, timestamp);
trace_nfs4_setattr(inode, &arg.stateid, status);
@@ -4285,7 +4307,7 @@ int nfs4_set_rw_stateid(nfs4_stateid *stateid,
if (l_ctx != NULL)
lockowner = &l_ctx->lockowner;
- return nfs4_select_rw_stateid(stateid, ctx->state, fmode, lockowner);
+ return nfs4_select_rw_stateid(ctx->state, fmode, lockowner, stateid, NULL);
}
EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
@@ -4993,12 +5015,11 @@ static int nfs4_do_set_security_label(struct inode *inode,
}
static int
-nfs4_set_security_label(struct dentry *dentry, const void *buf, size_t buflen)
+nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
{
struct nfs4_label ilabel, *olabel = NULL;
struct nfs_fattr fattr;
struct rpc_cred *cred;
- struct inode *inode = d_inode(dentry);
int status;
if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
@@ -6054,6 +6075,7 @@ static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *reques
static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
+ struct nfs4_state_owner *sp = state->owner;
unsigned char fl_flags = request->fl_flags;
int status = -ENOLCK;
@@ -6068,6 +6090,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
status = do_vfs_lock(state->inode, request);
if (status < 0)
goto out;
+ mutex_lock(&sp->so_delegreturn_mutex);
down_read(&nfsi->rwsem);
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
/* Yes: cache locks! */
@@ -6075,9 +6098,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
request->fl_flags = fl_flags & ~FL_SLEEP;
status = do_vfs_lock(state->inode, request);
up_read(&nfsi->rwsem);
+ mutex_unlock(&sp->so_delegreturn_mutex);
goto out;
}
up_read(&nfsi->rwsem);
+ mutex_unlock(&sp->so_delegreturn_mutex);
status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
out:
request->fl_flags = fl_flags;
@@ -6255,11 +6280,11 @@ nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
- struct dentry *dentry, const char *key,
- const void *buf, size_t buflen,
- int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *key, const void *buf,
+ size_t buflen, int flags)
{
- return nfs4_proc_set_acl(d_inode(dentry), buf, buflen);
+ return nfs4_proc_set_acl(inode, buf, buflen);
}
static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
@@ -6277,12 +6302,12 @@ static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
- struct dentry *dentry, const char *key,
- const void *buf, size_t buflen,
- int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *key, const void *buf,
+ size_t buflen, int flags)
{
if (security_ismaclabel(key))
- return nfs4_set_security_label(dentry, buf, buflen);
+ return nfs4_set_security_label(inode, buf, buflen);
return -EOPNOTSUPP;
}
@@ -7351,9 +7376,11 @@ int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
* always set csa_cachethis to FALSE because the current implementation
* of the back channel DRC only supports caching the CB_SEQUENCE operation.
*/
-static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
+static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
+ struct rpc_clnt *clnt)
{
unsigned int max_rqst_sz, max_resp_sz;
+ unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
@@ -7371,8 +7398,8 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
/* Back channel attributes */
- args->bc_attrs.max_rqst_sz = PAGE_SIZE;
- args->bc_attrs.max_resp_sz = PAGE_SIZE;
+ args->bc_attrs.max_rqst_sz = max_bc_payload;
+ args->bc_attrs.max_resp_sz = max_bc_payload;
args->bc_attrs.max_resp_sz_cached = 0;
args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS;
@@ -7476,7 +7503,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
};
int status;
- nfs4_init_channel_attrs(&args);
+ nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
@@ -7820,40 +7847,34 @@ nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
struct nfs4_layoutget *lgp = calldata;
struct nfs_server *server = NFS_SERVER(lgp->args.inode);
struct nfs4_session *session = nfs4_get_session(server);
- int ret;
dprintk("--> %s\n", __func__);
- /* Note the is a race here, where a CB_LAYOUTRECALL can come in
- * right now covering the LAYOUTGET we are about to send.
- * However, that is not so catastrophic, and there seems
- * to be no way to prevent it completely.
- */
- if (nfs41_setup_sequence(session, &lgp->args.seq_args,
- &lgp->res.seq_res, task))
- return;
- ret = pnfs_choose_layoutget_stateid(&lgp->args.stateid,
- NFS_I(lgp->args.inode)->layout,
- &lgp->args.range,
- lgp->args.ctx->state);
- if (ret < 0)
- rpc_exit(task, ret);
+ nfs41_setup_sequence(session, &lgp->args.seq_args,
+ &lgp->res.seq_res, task);
+ dprintk("<-- %s\n", __func__);
}
static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
+
+ dprintk("--> %s\n", __func__);
+ nfs41_sequence_done(task, &lgp->res.seq_res);
+ dprintk("<-- %s\n", __func__);
+}
+
+static int
+nfs4_layoutget_handle_exception(struct rpc_task *task,
+ struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
+{
struct inode *inode = lgp->args.inode;
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layout_hdr *lo;
- struct nfs4_state *state = NULL;
- unsigned long timeo, now, giveup;
+ int status = task->tk_status;
dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
- if (!nfs41_sequence_done(task, &lgp->res.seq_res))
- goto out;
-
- switch (task->tk_status) {
+ switch (status) {
case 0:
goto out;
@@ -7863,57 +7884,43 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
* retry go inband.
*/
case -NFS4ERR_LAYOUTUNAVAILABLE:
- task->tk_status = -ENODATA;
+ status = -ENODATA;
goto out;
/*
* NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
* length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
*/
case -NFS4ERR_BADLAYOUT:
- goto out_overflow;
+ status = -EOVERFLOW;
+ goto out;
/*
* NFS4ERR_LAYOUTTRYLATER is a conflict with another client
* (or clients) writing to the same RAID stripe except when
* the minlength argument is 0 (see RFC5661 section 18.43.3).
+ *
+ * Treat it like we would RECALLCONFLICT -- we retry for a little
+ * while, and then eventually give up.
*/
case -NFS4ERR_LAYOUTTRYLATER:
- if (lgp->args.minlength == 0)
- goto out_overflow;
- /*
- * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall
- * existing layout before getting a new one).
- */
- case -NFS4ERR_RECALLCONFLICT:
- timeo = rpc_get_timeout(task->tk_client);
- giveup = lgp->args.timestamp + timeo;
- now = jiffies;
- if (time_after(giveup, now)) {
- unsigned long delay;
-
- /* Delay for:
- * - Not less then NFS4_POLL_RETRY_MIN.
- * - One last time a jiffie before we give up
- * - exponential backoff (time_now minus start_attempt)
- */
- delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN,
- min((giveup - now - 1),
- now - lgp->args.timestamp));
-
- dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n",
- __func__, delay);
- rpc_delay(task, delay);
- /* Do not call nfs4_async_handle_error() */
- goto out_restart;
+ if (lgp->args.minlength == 0) {
+ status = -EOVERFLOW;
+ goto out;
}
- break;
+ /* Fallthrough */
+ case -NFS4ERR_RECALLCONFLICT:
+ nfs4_handle_exception(server, -NFS4ERR_RECALLCONFLICT,
+ exception);
+ status = -ERECALLCONFLICT;
+ goto out;
case -NFS4ERR_EXPIRED:
case -NFS4ERR_BAD_STATEID:
+ exception->timeout = 0;
spin_lock(&inode->i_lock);
if (nfs4_stateid_match(&lgp->args.stateid,
&lgp->args.ctx->state->stateid)) {
spin_unlock(&inode->i_lock);
/* If the open stateid was bad, then recover it. */
- state = lgp->args.ctx->state;
+ exception->state = lgp->args.ctx->state;
break;
}
lo = NFS_I(inode)->layout;
@@ -7926,25 +7933,21 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
* with the current stateid.
*/
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
- pnfs_mark_matching_lsegs_invalid(lo, &head, NULL);
+ pnfs_mark_matching_lsegs_invalid(lo, &head, NULL, 0);
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
} else
spin_unlock(&inode->i_lock);
- goto out_restart;
+ status = -EAGAIN;
+ goto out;
}
- if (nfs4_async_handle_error(task, server, state, &lgp->timeout) == -EAGAIN)
- goto out_restart;
+
+ status = nfs4_handle_exception(server, status, exception);
+ if (exception->retry)
+ status = -EAGAIN;
out:
dprintk("<-- %s\n", __func__);
- return;
-out_restart:
- task->tk_status = 0;
- rpc_restart_call_prepare(task);
- return;
-out_overflow:
- task->tk_status = -EOVERFLOW;
- goto out;
+ return status;
}
static size_t max_response_pages(struct nfs_server *server)
@@ -8013,7 +8016,7 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
};
struct pnfs_layout_segment *
-nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
+nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
{
struct inode *inode = lgp->args.inode;
struct nfs_server *server = NFS_SERVER(inode);
@@ -8033,6 +8036,7 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
.flags = RPC_TASK_ASYNC,
};
struct pnfs_layout_segment *lseg = NULL;
+ struct nfs4_exception exception = { .timeout = *timeout };
int status = 0;
dprintk("--> %s\n", __func__);
@@ -8046,7 +8050,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
return ERR_PTR(-ENOMEM);
}
lgp->args.layout.pglen = max_pages * PAGE_SIZE;
- lgp->args.timestamp = jiffies;
lgp->res.layoutp = &lgp->args.layout;
lgp->res.seq_res.sr_slot = NULL;
@@ -8056,13 +8059,17 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
if (IS_ERR(task))
return ERR_CAST(task);
status = nfs4_wait_for_completion_rpc_task(task);
- if (status == 0)
- status = task->tk_status;
+ if (status == 0) {
+ status = nfs4_layoutget_handle_exception(task, lgp, &exception);
+ *timeout = exception.timeout;
+ }
+
trace_nfs4_layoutget(lgp->args.ctx,
&lgp->args.range,
&lgp->res.range,
&lgp->res.stateid,
status);
+
/* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
if (status == 0 && lgp->res.layoutp->len)
lseg = pnfs_layout_process(lgp);
@@ -8118,7 +8125,8 @@ static void nfs4_layoutreturn_release(void *calldata)
dprintk("--> %s\n", __func__);
spin_lock(&lo->plh_inode->i_lock);
- pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
+ pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range,
+ be32_to_cpu(lrp->args.stateid.seqid));
pnfs_mark_layout_returned_if_empty(lo);
if (lrp->res.lrs_present)
pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
@@ -8653,6 +8661,9 @@ nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
static bool nfs41_match_stateid(const nfs4_stateid *s1,
const nfs4_stateid *s2)
{
+ if (s1->type != s2->type)
+ return false;
+
if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
return false;
@@ -8793,6 +8804,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
| NFS_CAP_STATEID_NFSV41
| NFS_CAP_ATOMIC_OPEN_V1
| NFS_CAP_ALLOCATE
+ | NFS_CAP_COPY
| NFS_CAP_DEALLOCATE
| NFS_CAP_SEEK
| NFS_CAP_LAYOUTSTATS
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index d854693a15b0..9679f4749364 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -65,7 +65,10 @@
#define OPENOWNER_POOL_SIZE 8
-const nfs4_stateid zero_stateid;
+const nfs4_stateid zero_stateid = {
+ { .data = { 0 } },
+ .type = NFS4_SPECIAL_STATEID_TYPE,
+};
static DEFINE_MUTEX(nfs_clid_init_mutex);
int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
@@ -985,15 +988,20 @@ static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
* Byte-range lock aware utility to initialize the stateid of read/write
* requests.
*/
-int nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
- fmode_t fmode, const struct nfs_lockowner *lockowner)
+int nfs4_select_rw_stateid(struct nfs4_state *state,
+ fmode_t fmode, const struct nfs_lockowner *lockowner,
+ nfs4_stateid *dst, struct rpc_cred **cred)
{
- int ret = nfs4_copy_lock_stateid(dst, state, lockowner);
+ int ret;
+
+ if (cred != NULL)
+ *cred = NULL;
+ ret = nfs4_copy_lock_stateid(dst, state, lockowner);
if (ret == -EIO)
/* A lost lock - don't even consider delegations */
goto out;
/* returns true if delegation stateid found and copied */
- if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) {
+ if (nfs4_copy_delegation_stateid(state->inode, fmode, dst, cred)) {
ret = 0;
goto out;
}
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 2c8d05dae5b1..9c150b153782 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -1520,6 +1520,8 @@ DEFINE_NFS4_INODE_EVENT(nfs4_layoutreturn_on_close);
{ PNFS_UPDATE_LAYOUT_FOUND_CACHED, "found cached" }, \
{ PNFS_UPDATE_LAYOUT_RETURN, "layoutreturn" }, \
{ PNFS_UPDATE_LAYOUT_BLOCKED, "layouts blocked" }, \
+ { PNFS_UPDATE_LAYOUT_INVALID_OPEN, "invalid open" }, \
+ { PNFS_UPDATE_LAYOUT_RETRY, "retrying" }, \
{ PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET, "sent layoutget" })
TRACE_EVENT(pnfs_update_layout,
@@ -1528,9 +1530,10 @@ TRACE_EVENT(pnfs_update_layout,
u64 count,
enum pnfs_iomode iomode,
struct pnfs_layout_hdr *lo,
+ struct pnfs_layout_segment *lseg,
enum pnfs_update_layout_reason reason
),
- TP_ARGS(inode, pos, count, iomode, lo, reason),
+ TP_ARGS(inode, pos, count, iomode, lo, lseg, reason),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(u64, fileid)
@@ -1540,6 +1543,7 @@ TRACE_EVENT(pnfs_update_layout,
__field(enum pnfs_iomode, iomode)
__field(int, layoutstateid_seq)
__field(u32, layoutstateid_hash)
+ __field(long, lseg)
__field(enum pnfs_update_layout_reason, reason)
),
TP_fast_assign(
@@ -1559,11 +1563,12 @@ TRACE_EVENT(pnfs_update_layout,
__entry->layoutstateid_seq = 0;
__entry->layoutstateid_hash = 0;
}
+ __entry->lseg = (long)lseg;
),
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
"iomode=%s pos=%llu count=%llu "
- "layoutstateid=%d:0x%08x (%s)",
+ "layoutstateid=%d:0x%08x lseg=0x%lx (%s)",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
@@ -1571,6 +1576,7 @@ TRACE_EVENT(pnfs_update_layout,
(unsigned long long)__entry->pos,
(unsigned long long)__entry->count,
__entry->layoutstateid_seq, __entry->layoutstateid_hash,
+ __entry->lseg,
show_pnfs_update_layout_reason(__entry->reason)
)
);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 88474a4fc669..661e753fe1c9 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -4270,6 +4270,24 @@ static int decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
return decode_opaque_fixed(xdr, stateid, NFS4_STATEID_SIZE);
}
+static int decode_open_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ stateid->type = NFS4_OPEN_STATEID_TYPE;
+ return decode_stateid(xdr, stateid);
+}
+
+static int decode_lock_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ stateid->type = NFS4_LOCK_STATEID_TYPE;
+ return decode_stateid(xdr, stateid);
+}
+
+static int decode_delegation_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ stateid->type = NFS4_DELEGATION_STATEID_TYPE;
+ return decode_stateid(xdr, stateid);
+}
+
static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
{
int status;
@@ -4278,7 +4296,7 @@ static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (!status)
- status = decode_stateid(xdr, &res->stateid);
+ status = decode_open_stateid(xdr, &res->stateid);
return status;
}
@@ -4937,7 +4955,7 @@ static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
if (status == -EIO)
goto out;
if (status == 0) {
- status = decode_stateid(xdr, &res->stateid);
+ status = decode_lock_stateid(xdr, &res->stateid);
if (unlikely(status))
goto out;
} else if (status == -NFS4ERR_DENIED)
@@ -4966,7 +4984,7 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
if (status != -EIO)
nfs_increment_lock_seqid(status, res->seqid);
if (status == 0)
- status = decode_stateid(xdr, &res->stateid);
+ status = decode_lock_stateid(xdr, &res->stateid);
return status;
}
@@ -5016,7 +5034,7 @@ static int decode_rw_delegation(struct xdr_stream *xdr,
__be32 *p;
int status;
- status = decode_stateid(xdr, &res->delegation);
+ status = decode_delegation_stateid(xdr, &res->delegation);
if (unlikely(status))
return status;
p = xdr_inline_decode(xdr, 4);
@@ -5096,7 +5114,7 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
nfs_increment_open_seqid(status, res->seqid);
if (status)
return status;
- status = decode_stateid(xdr, &res->stateid);
+ status = decode_open_stateid(xdr, &res->stateid);
if (unlikely(status))
return status;
@@ -5136,7 +5154,7 @@ static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmre
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (!status)
- status = decode_stateid(xdr, &res->stateid);
+ status = decode_open_stateid(xdr, &res->stateid);
return status;
}
@@ -5148,7 +5166,7 @@ static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *re
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (!status)
- status = decode_stateid(xdr, &res->stateid);
+ status = decode_open_stateid(xdr, &res->stateid);
return status;
}
@@ -5838,6 +5856,12 @@ out_overflow:
}
#if defined(CONFIG_NFS_V4_1)
+static int decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
+{
+ stateid->type = NFS4_LAYOUT_STATEID_TYPE;
+ return decode_stateid(xdr, stateid);
+}
+
static int decode_getdeviceinfo(struct xdr_stream *xdr,
struct nfs4_getdeviceinfo_res *res)
{
@@ -5919,7 +5943,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
if (unlikely(!p))
goto out_overflow;
res->return_on_close = be32_to_cpup(p);
- decode_stateid(xdr, &res->stateid);
+ decode_layout_stateid(xdr, &res->stateid);
p = xdr_inline_decode(xdr, 4);
if (unlikely(!p))
goto out_overflow;
@@ -5985,7 +6009,7 @@ static int decode_layoutreturn(struct xdr_stream *xdr,
goto out_overflow;
res->lrs_present = be32_to_cpup(p);
if (res->lrs_present)
- status = decode_stateid(xdr, &res->stateid);
+ status = decode_layout_stateid(xdr, &res->stateid);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);
@@ -7515,6 +7539,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(DEALLOCATE, enc_deallocate, dec_deallocate),
PROC(LAYOUTSTATS, enc_layoutstats, dec_layoutstats),
PROC(CLONE, enc_clone, dec_clone),
+ PROC(COPY, enc_copy, dec_copy),
#endif /* CONFIG_NFS_V4_2 */
};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1f6db4231057..174dd4cf5747 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -341,8 +341,10 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page,
* long write-back delay. This will be adjusted in
* update_nfs_request below if the region is not locked. */
req->wb_page = page;
- req->wb_index = page_file_index(page);
- get_page(page);
+ if (page) {
+ req->wb_index = page_file_index(page);
+ get_page(page);
+ }
req->wb_offset = offset;
req->wb_pgbase = offset;
req->wb_bytes = count;
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 89a5ef4df08a..0c7e0d45a4de 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -270,7 +270,7 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
};
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
- return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range);
+ return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range, 0);
}
static int
@@ -308,7 +308,7 @@ pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
spin_lock(&inode->i_lock);
pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
- pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
+ pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
@@ -522,13 +522,35 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
return rv;
}
-/* Returns count of number of matching invalid lsegs remaining in list
- * after call.
+/*
+ * Compare 2 layout stateid sequence ids, to see which is newer,
+ * taking into account wraparound issues.
+ */
+static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
+{
+ return (s32)(s1 - s2) > 0;
+}
+
+/**
+ * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
+ * @lo: layout header containing the lsegs
+ * @tmp_list: list head where doomed lsegs should go
+ * @recall_range: optional recall range argument to match (may be NULL)
+ * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
+ *
+ * Walk the list of lsegs in the layout header, and tear down any that should
+ * be destroyed. If "recall_range" is specified then the segment must match
+ * that range. If "seq" is non-zero, then only match segments that were handed
+ * out at or before that sequence.
+ *
+ * Returns number of matching invalid lsegs remaining in list after scanning
+ * it and purging them.
*/
int
pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
- const struct pnfs_layout_range *recall_range)
+ const struct pnfs_layout_range *recall_range,
+ u32 seq)
{
struct pnfs_layout_segment *lseg, *next;
int remaining = 0;
@@ -540,10 +562,12 @@ pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
if (!recall_range ||
should_free_lseg(&lseg->pls_range, recall_range)) {
- dprintk("%s: freeing lseg %p iomode %d "
+ if (seq && pnfs_seqid_is_newer(lseg->pls_seq, seq))
+ continue;
+ dprintk("%s: freeing lseg %p iomode %d seq %u"
"offset %llu length %llu\n", __func__,
- lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
- lseg->pls_range.length);
+ lseg, lseg->pls_range.iomode, lseg->pls_seq,
+ lseg->pls_range.offset, lseg->pls_range.length);
if (!mark_lseg_invalid(lseg, tmp_list))
remaining++;
}
@@ -730,15 +754,6 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
pnfs_destroy_layouts_byclid(clp, false);
}
-/*
- * Compare 2 layout stateid sequence ids, to see which is newer,
- * taking into account wraparound issues.
- */
-static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
-{
- return (s32)(s1 - s2) > 0;
-}
-
/* update lo->plh_stateid with new if is more recent */
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
@@ -781,50 +796,22 @@ pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
}
-int
-pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
- const struct pnfs_layout_range *range,
- struct nfs4_state *open_state)
-{
- int status = 0;
-
- dprintk("--> %s\n", __func__);
- spin_lock(&lo->plh_inode->i_lock);
- if (pnfs_layoutgets_blocked(lo)) {
- status = -EAGAIN;
- } else if (!nfs4_valid_open_stateid(open_state)) {
- status = -EBADF;
- } else if (list_empty(&lo->plh_segs) ||
- test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
- int seq;
-
- do {
- seq = read_seqbegin(&open_state->seqlock);
- nfs4_stateid_copy(dst, &open_state->stateid);
- } while (read_seqretry(&open_state->seqlock, seq));
- } else
- nfs4_stateid_copy(dst, &lo->plh_stateid);
- spin_unlock(&lo->plh_inode->i_lock);
- dprintk("<-- %s\n", __func__);
- return status;
-}
-
/*
-* Get layout from server.
-* for now, assume that whole file layouts are requested.
-* arg->offset: 0
-* arg->length: all ones
-*/
+ * Get layout from server.
+ * for now, assume that whole file layouts are requested.
+ * arg->offset: 0
+ * arg->length: all ones
+ */
static struct pnfs_layout_segment *
send_layoutget(struct pnfs_layout_hdr *lo,
struct nfs_open_context *ctx,
+ nfs4_stateid *stateid,
const struct pnfs_layout_range *range,
- gfp_t gfp_flags)
+ long *timeout, gfp_t gfp_flags)
{
struct inode *ino = lo->plh_inode;
struct nfs_server *server = NFS_SERVER(ino);
struct nfs4_layoutget *lgp;
- struct pnfs_layout_segment *lseg;
loff_t i_size;
dprintk("--> %s\n", __func__);
@@ -834,40 +821,31 @@ send_layoutget(struct pnfs_layout_hdr *lo,
* store in lseg. If we race with a concurrent seqid morphing
* op, then re-send the LAYOUTGET.
*/
- do {
- lgp = kzalloc(sizeof(*lgp), gfp_flags);
- if (lgp == NULL)
- return NULL;
-
- i_size = i_size_read(ino);
-
- lgp->args.minlength = PAGE_SIZE;
- if (lgp->args.minlength > range->length)
- lgp->args.minlength = range->length;
- if (range->iomode == IOMODE_READ) {
- if (range->offset >= i_size)
- lgp->args.minlength = 0;
- else if (i_size - range->offset < lgp->args.minlength)
- lgp->args.minlength = i_size - range->offset;
- }
- lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
- pnfs_copy_range(&lgp->args.range, range);
- lgp->args.type = server->pnfs_curr_ld->id;
- lgp->args.inode = ino;
- lgp->args.ctx = get_nfs_open_context(ctx);
- lgp->gfp_flags = gfp_flags;
- lgp->cred = lo->plh_lc_cred;
-
- lseg = nfs4_proc_layoutget(lgp, gfp_flags);
- } while (lseg == ERR_PTR(-EAGAIN));
-
- if (IS_ERR(lseg) && !nfs_error_is_fatal(PTR_ERR(lseg)))
- lseg = NULL;
- else
- pnfs_layout_clear_fail_bit(lo,
- pnfs_iomode_to_fail_bit(range->iomode));
+ lgp = kzalloc(sizeof(*lgp), gfp_flags);
+ if (lgp == NULL)
+ return ERR_PTR(-ENOMEM);
- return lseg;
+ i_size = i_size_read(ino);
+
+ lgp->args.minlength = PAGE_SIZE;
+ if (lgp->args.minlength > range->length)
+ lgp->args.minlength = range->length;
+ if (range->iomode == IOMODE_READ) {
+ if (range->offset >= i_size)
+ lgp->args.minlength = 0;
+ else if (i_size - range->offset < lgp->args.minlength)
+ lgp->args.minlength = i_size - range->offset;
+ }
+ lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
+ pnfs_copy_range(&lgp->args.range, range);
+ lgp->args.type = server->pnfs_curr_ld->id;
+ lgp->args.inode = ino;
+ lgp->args.ctx = get_nfs_open_context(ctx);
+ nfs4_stateid_copy(&lgp->args.stateid, stateid);
+ lgp->gfp_flags = gfp_flags;
+ lgp->cred = lo->plh_lc_cred;
+
+ return nfs4_proc_layoutget(lgp, timeout, gfp_flags);
}
static void pnfs_clear_layoutcommit(struct inode *inode,
@@ -899,6 +877,7 @@ pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
return false;
lo->plh_return_iomode = 0;
+ lo->plh_return_seq = 0;
pnfs_get_layout_hdr(lo);
clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
return true;
@@ -969,6 +948,7 @@ static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
bool send;
nfs4_stateid_copy(&stateid, &lo->plh_stateid);
+ stateid.seqid = cpu_to_be32(lo->plh_return_seq);
iomode = lo->plh_return_iomode;
send = pnfs_prepare_layoutreturn(lo);
spin_unlock(&inode->i_lock);
@@ -1012,7 +992,7 @@ _pnfs_return_layout(struct inode *ino)
pnfs_get_layout_hdr(lo);
empty = list_empty(&lo->plh_segs);
pnfs_clear_layoutcommit(ino, &tmp_list);
- pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
+ pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
struct pnfs_layout_range range = {
@@ -1341,23 +1321,28 @@ out_existing:
/*
* iomode matching rules:
- * iomode lseg match
- * ----- ----- -----
- * ANY READ true
- * ANY RW true
- * RW READ false
- * RW RW true
- * READ READ true
- * READ RW true
+ * iomode lseg strict match
+ * iomode
+ * ----- ----- ------ -----
+ * ANY READ N/A true
+ * ANY RW N/A true
+ * RW READ N/A false
+ * RW RW N/A true
+ * READ READ N/A true
+ * READ RW true false
+ * READ RW false true
*/
static bool
pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
- const struct pnfs_layout_range *range)
+ const struct pnfs_layout_range *range,
+ bool strict_iomode)
{
struct pnfs_layout_range range1;
if ((range->iomode == IOMODE_RW &&
ls_range->iomode != IOMODE_RW) ||
+ (range->iomode != ls_range->iomode &&
+ strict_iomode == true) ||
!pnfs_lseg_range_intersecting(ls_range, range))
return 0;
@@ -1372,7 +1357,8 @@ pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
*/
static struct pnfs_layout_segment *
pnfs_find_lseg(struct pnfs_layout_hdr *lo,
- struct pnfs_layout_range *range)
+ struct pnfs_layout_range *range,
+ bool strict_iomode)
{
struct pnfs_layout_segment *lseg, *ret = NULL;
@@ -1381,7 +1367,8 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
- pnfs_lseg_range_match(&lseg->pls_range, range)) {
+ pnfs_lseg_range_match(&lseg->pls_range, range,
+ strict_iomode)) {
ret = pnfs_get_lseg(lseg);
break;
}
@@ -1498,6 +1485,7 @@ pnfs_update_layout(struct inode *ino,
loff_t pos,
u64 count,
enum pnfs_iomode iomode,
+ bool strict_iomode,
gfp_t gfp_flags)
{
struct pnfs_layout_range arg = {
@@ -1505,27 +1493,30 @@ pnfs_update_layout(struct inode *ino,
.offset = pos,
.length = count,
};
- unsigned pg_offset;
+ unsigned pg_offset, seq;
struct nfs_server *server = NFS_SERVER(ino);
struct nfs_client *clp = server->nfs_client;
- struct pnfs_layout_hdr *lo;
+ struct pnfs_layout_hdr *lo = NULL;
struct pnfs_layout_segment *lseg = NULL;
+ nfs4_stateid stateid;
+ long timeout = 0;
+ unsigned long giveup = jiffies + rpc_get_timeout(server->client);
bool first;
if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
- trace_pnfs_update_layout(ino, pos, count, iomode, NULL,
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_NO_PNFS);
goto out;
}
if (iomode == IOMODE_READ && i_size_read(ino) == 0) {
- trace_pnfs_update_layout(ino, pos, count, iomode, NULL,
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_RD_ZEROLEN);
goto out;
}
if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
- trace_pnfs_update_layout(ino, pos, count, iomode, NULL,
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_MDSTHRESH);
goto out;
}
@@ -1536,14 +1527,14 @@ lookup_again:
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
if (lo == NULL) {
spin_unlock(&ino->i_lock);
- trace_pnfs_update_layout(ino, pos, count, iomode, NULL,
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_NOMEM);
goto out;
}
/* Do we even need to bother with this? */
if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
- trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_BULK_RECALL);
dprintk("%s matches recall, use MDS\n", __func__);
goto out_unlock;
@@ -1551,14 +1542,34 @@ lookup_again:
/* if LAYOUTGET already failed once we don't try again */
if (pnfs_layout_io_test_failed(lo, iomode)) {
- trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
goto out_unlock;
}
- first = list_empty(&lo->plh_segs);
- if (first) {
- /* The first layoutget for the file. Need to serialize per
+ lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
+ if (lseg) {
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+ PNFS_UPDATE_LAYOUT_FOUND_CACHED);
+ goto out_unlock;
+ }
+
+ if (!nfs4_valid_open_stateid(ctx->state)) {
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
+ PNFS_UPDATE_LAYOUT_INVALID_OPEN);
+ goto out_unlock;
+ }
+
+ /*
+ * Choose a stateid for the LAYOUTGET. If we don't have a layout
+ * stateid, or it has been invalidated, then we must use the open
+ * stateid.
+ */
+ if (lo->plh_stateid.seqid == 0 ||
+ test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
+
+ /*
+ * The first layoutget for the file. Need to serialize per
* RFC 5661 Errata 3208.
*/
if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
@@ -1567,18 +1578,17 @@ lookup_again:
wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
TASK_UNINTERRUPTIBLE);
pnfs_put_layout_hdr(lo);
+ dprintk("%s retrying\n", __func__);
goto lookup_again;
}
+
+ first = true;
+ do {
+ seq = read_seqbegin(&ctx->state->seqlock);
+ nfs4_stateid_copy(&stateid, &ctx->state->stateid);
+ } while (read_seqretry(&ctx->state->seqlock, seq));
} else {
- /* Check to see if the layout for the given range
- * already exists
- */
- lseg = pnfs_find_lseg(lo, &arg);
- if (lseg) {
- trace_pnfs_update_layout(ino, pos, count, iomode, lo,
- PNFS_UPDATE_LAYOUT_FOUND_CACHED);
- goto out_unlock;
- }
+ nfs4_stateid_copy(&stateid, &lo->plh_stateid);
}
/*
@@ -1593,15 +1603,17 @@ lookup_again:
pnfs_clear_first_layoutget(lo);
pnfs_put_layout_hdr(lo);
dprintk("%s retrying\n", __func__);
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+ lseg, PNFS_UPDATE_LAYOUT_RETRY);
goto lookup_again;
}
- trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_RETURN);
goto out_put_layout_hdr;
}
if (pnfs_layoutgets_blocked(lo)) {
- trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_BLOCKED);
goto out_unlock;
}
@@ -1626,10 +1638,36 @@ lookup_again:
if (arg.length != NFS4_MAX_UINT64)
arg.length = PAGE_ALIGN(arg.length);
- lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
- atomic_dec(&lo->plh_outstanding);
- trace_pnfs_update_layout(ino, pos, count, iomode, lo,
+ lseg = send_layoutget(lo, ctx, &stateid, &arg, &timeout, gfp_flags);
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
+ if (IS_ERR(lseg)) {
+ switch(PTR_ERR(lseg)) {
+ case -ERECALLCONFLICT:
+ if (time_after(jiffies, giveup))
+ lseg = NULL;
+ /* Fallthrough */
+ case -EAGAIN:
+ pnfs_put_layout_hdr(lo);
+ if (first)
+ pnfs_clear_first_layoutget(lo);
+ if (lseg) {
+ trace_pnfs_update_layout(ino, pos, count,
+ iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
+ goto lookup_again;
+ }
+ /* Fallthrough */
+ default:
+ if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
+ pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
+ lseg = NULL;
+ }
+ }
+ } else {
+ pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
+ }
+
+ atomic_dec(&lo->plh_outstanding);
out_put_layout_hdr:
if (first)
pnfs_clear_first_layoutget(lo);
@@ -1678,38 +1716,36 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
struct pnfs_layout_segment *lseg;
struct inode *ino = lo->plh_inode;
LIST_HEAD(free_me);
- int status = -EINVAL;
if (!pnfs_sanity_check_layout_range(&res->range))
- goto out;
+ return ERR_PTR(-EINVAL);
/* Inject layout blob into I/O device driver */
lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
- if (!lseg || IS_ERR(lseg)) {
+ if (IS_ERR_OR_NULL(lseg)) {
if (!lseg)
- status = -ENOMEM;
- else
- status = PTR_ERR(lseg);
- dprintk("%s: Could not allocate layout: error %d\n",
- __func__, status);
- goto out;
+ lseg = ERR_PTR(-ENOMEM);
+
+ dprintk("%s: Could not allocate layout: error %ld\n",
+ __func__, PTR_ERR(lseg));
+ return lseg;
}
init_lseg(lo, lseg);
lseg->pls_range = res->range;
+ lseg->pls_seq = be32_to_cpu(res->stateid.seqid);
spin_lock(&ino->i_lock);
if (pnfs_layoutgets_blocked(lo)) {
dprintk("%s forget reply due to state\n", __func__);
- goto out_forget_reply;
+ goto out_forget;
}
if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
/* existing state ID, make sure the sequence number matches. */
if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
dprintk("%s forget reply due to sequence\n", __func__);
- status = -EAGAIN;
- goto out_forget_reply;
+ goto out_forget;
}
pnfs_set_layout_stateid(lo, &res->stateid, false);
} else {
@@ -1718,7 +1754,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
* inode invalid, and don't bother validating the stateid
* sequence number.
*/
- pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL);
+ pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL, 0);
nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
@@ -1735,18 +1771,17 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
spin_unlock(&ino->i_lock);
pnfs_free_lseg_list(&free_me);
return lseg;
-out:
- return ERR_PTR(status);
-out_forget_reply:
+out_forget:
spin_unlock(&ino->i_lock);
lseg->pls_layout = lo;
NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
- goto out;
+ return ERR_PTR(-EAGAIN);
}
static void
-pnfs_set_plh_return_iomode(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode)
+pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
+ u32 seq)
{
if (lo->plh_return_iomode == iomode)
return;
@@ -1754,6 +1789,8 @@ pnfs_set_plh_return_iomode(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode)
iomode = IOMODE_ANY;
lo->plh_return_iomode = iomode;
set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+ if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
+ lo->plh_return_seq = seq;
}
/**
@@ -1769,7 +1806,8 @@ pnfs_set_plh_return_iomode(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode)
int
pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
- const struct pnfs_layout_range *return_range)
+ const struct pnfs_layout_range *return_range,
+ u32 seq)
{
struct pnfs_layout_segment *lseg, *next;
int remaining = 0;
@@ -1792,8 +1830,11 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
continue;
remaining++;
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
- pnfs_set_plh_return_iomode(lo, return_range->iomode);
}
+
+ if (remaining)
+ pnfs_set_plh_return_info(lo, return_range->iomode, seq);
+
return remaining;
}
@@ -1810,13 +1851,14 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
bool return_now = false;
spin_lock(&inode->i_lock);
- pnfs_set_plh_return_iomode(lo, range.iomode);
+ pnfs_set_plh_return_info(lo, range.iomode, lseg->pls_seq);
/*
* mark all matching lsegs so that we are sure to have no live
* segments at hand when sending layoutreturn. See pnfs_put_lseg()
* for how it works.
*/
- if (!pnfs_mark_matching_lsegs_return(lo, &free_me, &range)) {
+ if (!pnfs_mark_matching_lsegs_return(lo, &free_me,
+ &range, lseg->pls_seq)) {
nfs4_stateid stateid;
enum pnfs_iomode iomode = lo->plh_return_iomode;
@@ -1849,6 +1891,7 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
req_offset(req),
rd_size,
IOMODE_READ,
+ false,
GFP_KERNEL);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
@@ -1873,6 +1916,7 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
req_offset(req),
wb_size,
IOMODE_RW,
+ false,
GFP_NOFS);
if (IS_ERR(pgio->pg_lseg)) {
pgio->pg_error = PTR_ERR(pgio->pg_lseg);
@@ -2143,12 +2187,15 @@ pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
}
/* Resend all requests through pnfs. */
-int pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
+void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
{
struct nfs_pageio_descriptor pgio;
- nfs_pageio_init_read(&pgio, hdr->inode, false, hdr->completion_ops);
- return nfs_pageio_resend(&pgio, hdr);
+ if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ nfs_pageio_init_read(&pgio, hdr->inode, false,
+ hdr->completion_ops);
+ hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
+ }
}
EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);
@@ -2158,12 +2205,11 @@ pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
struct pnfs_layout_segment *lseg = desc->pg_lseg;
enum pnfs_try_status trypnfs;
- int err = 0;
trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
if (trypnfs == PNFS_TRY_AGAIN)
- err = pnfs_read_resend_pnfs(hdr);
- if (trypnfs == PNFS_NOT_ATTEMPTED || err)
+ pnfs_read_resend_pnfs(hdr);
+ if (trypnfs == PNFS_NOT_ATTEMPTED || hdr->task.tk_status)
pnfs_read_through_mds(desc, hdr);
}
@@ -2405,7 +2451,7 @@ pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
spin_lock(&inode->i_lock);
if (!NFS_I(inode)->layout) {
spin_unlock(&inode->i_lock);
- goto out;
+ goto out_clear_layoutstats;
}
hdr = NFS_I(inode)->layout;
pnfs_get_layout_hdr(hdr);
@@ -2434,6 +2480,7 @@ out_free:
kfree(data);
out_put:
pnfs_put_layout_hdr(hdr);
+out_clear_layoutstats:
smp_mb__before_atomic();
clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
smp_mb__after_atomic();
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 1ac1db5f6dad..b21bd0bee784 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -64,6 +64,7 @@ struct pnfs_layout_segment {
struct list_head pls_lc_list;
struct pnfs_layout_range pls_range;
atomic_t pls_refcount;
+ u32 pls_seq;
unsigned long pls_flags;
struct pnfs_layout_hdr *pls_layout;
struct work_struct pls_work;
@@ -194,6 +195,7 @@ struct pnfs_layout_hdr {
unsigned long plh_flags;
nfs4_stateid plh_stateid;
u32 plh_barrier; /* ignore lower seqids */
+ u32 plh_return_seq;
enum pnfs_iomode plh_return_iomode;
loff_t plh_lwb; /* last write byte for layoutcommit */
struct rpc_cred *plh_lc_cred; /* layoutcommit cred */
@@ -226,7 +228,7 @@ extern void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *);
extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
struct pnfs_device *dev,
struct rpc_cred *cred);
-extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags);
+extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags);
extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync);
/* pnfs.c */
@@ -258,16 +260,14 @@ void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo);
void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
const nfs4_stateid *new,
bool update_barrier);
-int pnfs_choose_layoutget_stateid(nfs4_stateid *dst,
- struct pnfs_layout_hdr *lo,
- const struct pnfs_layout_range *range,
- struct nfs4_state *open_state);
int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
- const struct pnfs_layout_range *recall_range);
+ const struct pnfs_layout_range *recall_range,
+ u32 seq);
int pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
- const struct pnfs_layout_range *recall_range);
+ const struct pnfs_layout_range *recall_range,
+ u32 seq);
bool pnfs_roc(struct inode *ino);
void pnfs_roc_release(struct inode *ino);
void pnfs_roc_set_barrier(struct inode *ino, u32 barrier);
@@ -282,12 +282,13 @@ int _pnfs_return_layout(struct inode *);
int pnfs_commit_and_return_layout(struct inode *);
void pnfs_ld_write_done(struct nfs_pgio_header *);
void pnfs_ld_read_done(struct nfs_pgio_header *);
-int pnfs_read_resend_pnfs(struct nfs_pgio_header *);
+void pnfs_read_resend_pnfs(struct nfs_pgio_header *);
struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
struct nfs_open_context *ctx,
loff_t pos,
u64 count,
enum pnfs_iomode iomode,
+ bool strict_iomode,
gfp_t gfp_flags);
void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo);
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index 4aaed890048f..0dfc476da3e1 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
/* The generic layer is about to remove the req from the commit list.
* If this will make the bucket empty, it will need to put the lseg reference.
- * Note this must be called holding the inode (/cinfo) lock
+ * Note this must be called holding i_lock
*/
void
pnfs_generic_clear_request_commit(struct nfs_page *req,
@@ -98,7 +98,7 @@ pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst,
if (!nfs_lock_request(req))
continue;
kref_get(&req->wb_kref);
- if (cond_resched_lock(cinfo->lock))
+ if (cond_resched_lock(&cinfo->inode->i_lock))
list_safe_reset_next(req, tmp, wb_list);
nfs_request_remove_commit_list(req, cinfo);
clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
@@ -119,7 +119,7 @@ pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
struct list_head *dst = &bucket->committing;
int ret;
- lockdep_assert_held(cinfo->lock);
+ lockdep_assert_held(&cinfo->inode->i_lock);
ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max);
if (ret) {
cinfo->ds->nwritten -= ret;
@@ -142,7 +142,7 @@ int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo,
{
int i, rv = 0, cnt;
- lockdep_assert_held(cinfo->lock);
+ lockdep_assert_held(&cinfo->inode->i_lock);
for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
cinfo, max);
@@ -161,16 +161,16 @@ void pnfs_generic_recover_commit_reqs(struct list_head *dst,
struct pnfs_layout_segment *freeme;
int i;
- lockdep_assert_held(cinfo->lock);
+ lockdep_assert_held(&cinfo->inode->i_lock);
restart:
for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
if (pnfs_generic_transfer_commit_list(&b->written, dst,
cinfo, 0)) {
freeme = b->wlseg;
b->wlseg = NULL;
- spin_unlock(cinfo->lock);
+ spin_unlock(&cinfo->inode->i_lock);
pnfs_put_lseg(freeme);
- spin_lock(cinfo->lock);
+ spin_lock(&cinfo->inode->i_lock);
goto restart;
}
}
@@ -186,7 +186,7 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
LIST_HEAD(pages);
int i;
- spin_lock(cinfo->lock);
+ spin_lock(&cinfo->inode->i_lock);
for (i = idx; i < fl_cinfo->nbuckets; i++) {
bucket = &fl_cinfo->buckets[i];
if (list_empty(&bucket->committing))
@@ -194,12 +194,12 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
freeme = bucket->clseg;
bucket->clseg = NULL;
list_splice_init(&bucket->committing, &pages);
- spin_unlock(cinfo->lock);
+ spin_unlock(&cinfo->inode->i_lock);
nfs_retry_commit(&pages, freeme, cinfo, i);
pnfs_put_lseg(freeme);
- spin_lock(cinfo->lock);
+ spin_lock(&cinfo->inode->i_lock);
}
- spin_unlock(cinfo->lock);
+ spin_unlock(&cinfo->inode->i_lock);
}
static unsigned int
@@ -238,14 +238,31 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
struct pnfs_commit_bucket *bucket;
bucket = &cinfo->ds->buckets[data->ds_commit_index];
- spin_lock(cinfo->lock);
+ spin_lock(&cinfo->inode->i_lock);
list_splice_init(&bucket->committing, pages);
data->lseg = bucket->clseg;
bucket->clseg = NULL;
- spin_unlock(cinfo->lock);
+ spin_unlock(&cinfo->inode->i_lock);
}
+/* Helper function for pnfs_generic_commit_pagelist to catch an empty
+ * page list. This can happen when two commits race. */
+static bool
+pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
+ struct nfs_commit_data *data,
+ struct nfs_commit_info *cinfo)
+{
+ if (list_empty(pages)) {
+ if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
+ wake_up_atomic_t(&cinfo->mds->rpcs_out);
+ nfs_commitdata_release(data);
+ return true;
+ }
+
+ return false;
+}
+
/* This follows nfs_commit_list pretty closely */
int
pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
@@ -280,6 +297,11 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
list_for_each_entry_safe(data, tmp, &list, pages) {
list_del_init(&data->pages);
if (data->ds_commit_index < 0) {
+ /* another commit raced with us */
+ if (pnfs_generic_commit_cancel_empty_pagelist(mds_pages,
+ data, cinfo))
+ continue;
+
nfs_init_commit(data, mds_pages, NULL, cinfo);
nfs_initiate_commit(NFS_CLIENT(inode), data,
NFS_PROTO(data->inode),
@@ -288,6 +310,12 @@ pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
LIST_HEAD(pages);
pnfs_fetch_commit_bucket_list(&pages, data, cinfo);
+
+ /* another commit raced with us */
+ if (pnfs_generic_commit_cancel_empty_pagelist(&pages,
+ data, cinfo))
+ continue;
+
nfs_init_commit(data, &pages, data->lseg, cinfo);
initiate_commit(data, how);
}
@@ -874,12 +902,12 @@ pnfs_layout_mark_request_commit(struct nfs_page *req,
struct list_head *list;
struct pnfs_commit_bucket *buckets;
- spin_lock(cinfo->lock);
+ spin_lock(&cinfo->inode->i_lock);
buckets = cinfo->ds->buckets;
list = &buckets[ds_commit_idx].written;
if (list_empty(list)) {
if (!pnfs_is_valid_lseg(lseg)) {
- spin_unlock(cinfo->lock);
+ spin_unlock(&cinfo->inode->i_lock);
cinfo->completion_ops->resched_write(cinfo, req);
return;
}
@@ -896,7 +924,7 @@ pnfs_layout_mark_request_commit(struct nfs_page *req,
cinfo->ds->nwritten++;
nfs_request_add_commit_list_locked(req, list, cinfo);
- spin_unlock(cinfo->lock);
+ spin_unlock(&cinfo->inode->i_lock);
nfs_mark_page_unstable(req->wb_page, cinfo);
}
EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index f1268280244e..2137e0202f25 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -191,6 +191,7 @@ static const match_table_t nfs_mount_option_tokens = {
enum {
Opt_xprt_udp, Opt_xprt_udp6, Opt_xprt_tcp, Opt_xprt_tcp6, Opt_xprt_rdma,
+ Opt_xprt_rdma6,
Opt_xprt_err
};
@@ -201,6 +202,7 @@ static const match_table_t nfs_xprt_protocol_tokens = {
{ Opt_xprt_tcp, "tcp" },
{ Opt_xprt_tcp6, "tcp6" },
{ Opt_xprt_rdma, "rdma" },
+ { Opt_xprt_rdma6, "rdma6" },
{ Opt_xprt_err, NULL }
};
@@ -1456,6 +1458,8 @@ static int nfs_parse_mount_options(char *raw,
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
break;
+ case Opt_xprt_rdma6:
+ protofamily = AF_INET6;
case Opt_xprt_rdma:
/* vector side protocols to TCP */
mnt->flags |= NFS_MOUNT_TCP;
@@ -2408,6 +2412,11 @@ static int nfs_compare_super_address(struct nfs_server *server1,
struct nfs_server *server2)
{
struct sockaddr *sap1, *sap2;
+ struct rpc_xprt *xprt1 = server1->client->cl_xprt;
+ struct rpc_xprt *xprt2 = server2->client->cl_xprt;
+
+ if (!net_eq(xprt1->xprt_net, xprt2->xprt_net))
+ return 0;
sap1 = (struct sockaddr *)&server1->nfs_client->cl_addr;
sap2 = (struct sockaddr *)&server2->nfs_client->cl_addr;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 5f4fd53e5764..e1c74d3db64d 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -245,8 +245,7 @@ static void nfs_mark_uptodate(struct nfs_page *req)
static int wb_priority(struct writeback_control *wbc)
{
int ret = 0;
- if (wbc->for_reclaim)
- return FLUSH_HIGHPRI | FLUSH_COND_STABLE;
+
if (wbc->sync_mode == WB_SYNC_ALL)
ret = FLUSH_COND_STABLE;
return ret;
@@ -737,7 +736,7 @@ static void nfs_inode_remove_request(struct nfs_page *req)
head = req->wb_head;
spin_lock(&inode->i_lock);
- if (likely(!PageSwapCache(head->wb_page))) {
+ if (likely(head->wb_page && !PageSwapCache(head->wb_page))) {
set_page_private(head->wb_page, 0);
ClearPagePrivate(head->wb_page);
smp_mb__after_atomic();
@@ -759,7 +758,8 @@ static void nfs_inode_remove_request(struct nfs_page *req)
static void
nfs_mark_request_dirty(struct nfs_page *req)
{
- __set_page_dirty_nobuffers(req->wb_page);
+ if (req->wb_page)
+ __set_page_dirty_nobuffers(req->wb_page);
}
/*
@@ -804,7 +804,7 @@ nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
* number of outstanding requests requiring a commit as well as
* the MM page stats.
*
- * The caller must hold the cinfo->lock, and the nfs_page lock.
+ * The caller must hold cinfo->inode->i_lock, and the nfs_page lock.
*/
void
nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
@@ -832,10 +832,11 @@ EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
void
nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
{
- spin_lock(cinfo->lock);
+ spin_lock(&cinfo->inode->i_lock);
nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
- spin_unlock(cinfo->lock);
- nfs_mark_page_unstable(req->wb_page, cinfo);
+ spin_unlock(&cinfo->inode->i_lock);
+ if (req->wb_page)
+ nfs_mark_page_unstable(req->wb_page, cinfo);
}
EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
@@ -864,7 +865,7 @@ EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
struct inode *inode)
{
- cinfo->lock = &inode->i_lock;
+ cinfo->inode = inode;
cinfo->mds = &NFS_I(inode)->commit_info;
cinfo->ds = pnfs_get_ds_info(inode);
cinfo->dreq = NULL;
@@ -967,7 +968,7 @@ nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
return cinfo->mds->ncommit;
}
-/* cinfo->lock held by caller */
+/* cinfo->inode->i_lock held by caller */
int
nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
struct nfs_commit_info *cinfo, int max)
@@ -979,7 +980,7 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
if (!nfs_lock_request(req))
continue;
kref_get(&req->wb_kref);
- if (cond_resched_lock(cinfo->lock))
+ if (cond_resched_lock(&cinfo->inode->i_lock))
list_safe_reset_next(req, tmp, wb_list);
nfs_request_remove_commit_list(req, cinfo);
nfs_list_add_request(req, dst);
@@ -1005,7 +1006,7 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst,
{
int ret = 0;
- spin_lock(cinfo->lock);
+ spin_lock(&cinfo->inode->i_lock);
if (cinfo->mds->ncommit > 0) {
const int max = INT_MAX;
@@ -1013,7 +1014,7 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst,
cinfo, max);
ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
}
- spin_unlock(cinfo->lock);
+ spin_unlock(&cinfo->inode->i_lock);
return ret;
}
@@ -1709,6 +1710,10 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
{
struct nfs_commit_data *data;
+ /* another commit raced with us */
+ if (list_empty(head))
+ return 0;
+
data = nfs_commitdata_alloc();
if (!data)
@@ -1724,6 +1729,36 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how,
return -ENOMEM;
}
+int nfs_commit_file(struct file *file, struct nfs_write_verifier *verf)
+{
+ struct inode *inode = file_inode(file);
+ struct nfs_open_context *open;
+ struct nfs_commit_info cinfo;
+ struct nfs_page *req;
+ int ret;
+
+ open = get_nfs_open_context(nfs_file_open_context(file));
+ req = nfs_create_request(open, NULL, NULL, 0, i_size_read(inode));
+ if (IS_ERR(req)) {
+ ret = PTR_ERR(req);
+ goto out_put;
+ }
+
+ nfs_init_cinfo_from_inode(&cinfo, inode);
+
+ memcpy(&req->wb_verf, verf, sizeof(struct nfs_write_verifier));
+ nfs_request_add_commit_list(req, &cinfo);
+ ret = nfs_commit_inode(inode, FLUSH_SYNC);
+ if (ret > 0)
+ ret = 0;
+
+ nfs_free_request(req);
+out_put:
+ put_nfs_open_context(open);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nfs_commit_file);
+
/*
* COMMIT call returned
*/
@@ -1748,7 +1783,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
- nfs_clear_page_commit(req->wb_page);
+ if (req->wb_page)
+ nfs_clear_page_commit(req->wb_page);
dprintk("NFS: commit (%s/%llu %d@%lld)",
req->wb_context->dentry->d_sb->s_id,
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index a8d15beee5cb..6aaf3e351391 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -272,10 +272,21 @@ struct o2hb_region {
struct delayed_work hr_write_timeout_work;
unsigned long hr_last_timeout_start;
+ /* negotiate timer, used to negotiate extending hb timeout. */
+ struct delayed_work hr_nego_timeout_work;
+ unsigned long hr_nego_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+
/* Used during o2hb_check_slot to hold a copy of the block
* being checked because we temporarily have to zero out the
* crc field. */
struct o2hb_disk_heartbeat_block *hr_tmp_block;
+
+ /* Message key for negotiate timeout message. */
+ unsigned int hr_key;
+ struct list_head hr_handler_list;
+
+ /* last hb status, 0 for success, other value for error. */
+ int hr_last_hb_status;
};
struct o2hb_bio_wait_ctxt {
@@ -284,6 +295,17 @@ struct o2hb_bio_wait_ctxt {
int wc_error;
};
+#define O2HB_NEGO_TIMEOUT_MS (O2HB_MAX_WRITE_TIMEOUT_MS/2)
+
+enum {
+ O2HB_NEGO_TIMEOUT_MSG = 1,
+ O2HB_NEGO_APPROVE_MSG = 2,
+};
+
+struct o2hb_nego_msg {
+ u8 node_num;
+};
+
static void o2hb_write_timeout(struct work_struct *work)
{
int failed, quorum;
@@ -319,7 +341,7 @@ static void o2hb_write_timeout(struct work_struct *work)
o2quo_disk_timeout();
}
-static void o2hb_arm_write_timeout(struct o2hb_region *reg)
+static void o2hb_arm_timeout(struct o2hb_region *reg)
{
/* Arm writeout only after thread reaches steady state */
if (atomic_read(&reg->hr_steady_iterations) != 0)
@@ -334,14 +356,132 @@ static void o2hb_arm_write_timeout(struct o2hb_region *reg)
spin_unlock(&o2hb_live_lock);
}
cancel_delayed_work(&reg->hr_write_timeout_work);
- reg->hr_last_timeout_start = jiffies;
schedule_delayed_work(&reg->hr_write_timeout_work,
msecs_to_jiffies(O2HB_MAX_WRITE_TIMEOUT_MS));
+
+ cancel_delayed_work(&reg->hr_nego_timeout_work);
+ /* negotiate timeout must be less than write timeout. */
+ schedule_delayed_work(&reg->hr_nego_timeout_work,
+ msecs_to_jiffies(O2HB_NEGO_TIMEOUT_MS));
+ memset(reg->hr_nego_node_bitmap, 0, sizeof(reg->hr_nego_node_bitmap));
}
-static void o2hb_disarm_write_timeout(struct o2hb_region *reg)
+static void o2hb_disarm_timeout(struct o2hb_region *reg)
{
cancel_delayed_work_sync(&reg->hr_write_timeout_work);
+ cancel_delayed_work_sync(&reg->hr_nego_timeout_work);
+}
+
+static int o2hb_send_nego_msg(int key, int type, u8 target)
+{
+ struct o2hb_nego_msg msg;
+ int status, ret;
+
+ msg.node_num = o2nm_this_node();
+again:
+ ret = o2net_send_message(type, key, &msg, sizeof(msg),
+ target, &status);
+
+ if (ret == -EAGAIN || ret == -ENOMEM) {
+ msleep(100);
+ goto again;
+ }
+
+ return ret;
+}
+
+static void o2hb_nego_timeout(struct work_struct *work)
+{
+ unsigned long live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ int master_node, i, ret;
+ struct o2hb_region *reg;
+
+ reg = container_of(work, struct o2hb_region, hr_nego_timeout_work.work);
+ /* don't negotiate timeout if last hb failed since it is very
+ * possible io failed. Should let write timeout fence self.
+ */
+ if (reg->hr_last_hb_status)
+ return;
+
+ o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
+ /* lowest node as master node to make negotiate decision. */
+ master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
+
+ if (master_node == o2nm_this_node()) {
+ if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {
+ printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%s).\n",
+ o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000,
+ config_item_name(&reg->hr_item), reg->hr_dev_name);
+ set_bit(master_node, reg->hr_nego_node_bitmap);
+ }
+ if (memcmp(reg->hr_nego_node_bitmap, live_node_bitmap,
+ sizeof(reg->hr_nego_node_bitmap))) {
+ /* check negotiate bitmap every second to do timeout
+ * approve decision.
+ */
+ schedule_delayed_work(&reg->hr_nego_timeout_work,
+ msecs_to_jiffies(1000));
+
+ return;
+ }
+
+ printk(KERN_NOTICE "o2hb: all nodes hb write hung, maybe region %s (%s) is down.\n",
+ config_item_name(&reg->hr_item), reg->hr_dev_name);
+ /* approve negotiate timeout request. */
+ o2hb_arm_timeout(reg);
+
+ i = -1;
+ while ((i = find_next_bit(live_node_bitmap,
+ O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
+ if (i == master_node)
+ continue;
+
+ mlog(ML_HEARTBEAT, "send NEGO_APPROVE msg to node %d\n", i);
+ ret = o2hb_send_nego_msg(reg->hr_key,
+ O2HB_NEGO_APPROVE_MSG, i);
+ if (ret)
+ mlog(ML_ERROR, "send NEGO_APPROVE msg to node %d fail %d\n",
+ i, ret);
+ }
+ } else {
+ /* negotiate timeout with master node. */
+ printk(KERN_NOTICE "o2hb: node %d hb write hung for %ds on region %s (%s), negotiate timeout with node %d.\n",
+ o2nm_this_node(), O2HB_NEGO_TIMEOUT_MS/1000, config_item_name(&reg->hr_item),
+ reg->hr_dev_name, master_node);
+ ret = o2hb_send_nego_msg(reg->hr_key, O2HB_NEGO_TIMEOUT_MSG,
+ master_node);
+ if (ret)
+ mlog(ML_ERROR, "send NEGO_TIMEOUT msg to node %d fail %d\n",
+ master_node, ret);
+ }
+}
+
+static int o2hb_nego_timeout_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct o2hb_region *reg = data;
+ struct o2hb_nego_msg *nego_msg;
+
+ nego_msg = (struct o2hb_nego_msg *)msg->buf;
+ printk(KERN_NOTICE "o2hb: receive negotiate timeout message from node %d on region %s (%s).\n",
+ nego_msg->node_num, config_item_name(&reg->hr_item), reg->hr_dev_name);
+ if (nego_msg->node_num < O2NM_MAX_NODES)
+ set_bit(nego_msg->node_num, reg->hr_nego_node_bitmap);
+ else
+ mlog(ML_ERROR, "got nego timeout message from bad node.\n");
+
+ return 0;
+}
+
+static int o2hb_nego_approve_handler(struct o2net_msg *msg, u32 len, void *data,
+ void **ret_data)
+{
+ struct o2hb_region *reg = data;
+
+ printk(KERN_NOTICE "o2hb: negotiate timeout approved by master node on region %s (%s).\n",
+ config_item_name(&reg->hr_item), reg->hr_dev_name);
+ o2hb_arm_timeout(reg);
+ return 0;
}
static inline void o2hb_bio_wait_init(struct o2hb_bio_wait_ctxt *wc)
@@ -1032,7 +1172,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
/* Skip disarming the timeout if own slot has stale/bad data */
if (own_slot_ok) {
o2hb_set_quorum_device(reg);
- o2hb_arm_write_timeout(reg);
+ o2hb_arm_timeout(reg);
+ reg->hr_last_timeout_start = jiffies;
}
bail:
@@ -1096,6 +1237,7 @@ static int o2hb_thread(void *data)
before_hb = ktime_get_real();
ret = o2hb_do_disk_heartbeat(reg);
+ reg->hr_last_hb_status = ret;
after_hb = ktime_get_real();
@@ -1114,7 +1256,7 @@ static int o2hb_thread(void *data)
}
}
- o2hb_disarm_write_timeout(reg);
+ o2hb_disarm_timeout(reg);
/* unclean stop is only used in very bad situation */
for(i = 0; !reg->hr_unclean_stop && i < reg->hr_blocks; i++)
@@ -1451,6 +1593,7 @@ static void o2hb_region_release(struct config_item *item)
list_del(&reg->hr_all_item);
spin_unlock(&o2hb_live_lock);
+ o2net_unregister_handler_list(&reg->hr_handler_list);
kfree(reg);
}
@@ -1762,6 +1905,7 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
}
INIT_DELAYED_WORK(&reg->hr_write_timeout_work, o2hb_write_timeout);
+ INIT_DELAYED_WORK(&reg->hr_nego_timeout_work, o2hb_nego_timeout);
/*
* A node is considered live after it has beat LIVE_THRESHOLD
@@ -1995,13 +2139,37 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
config_item_init_type_name(&reg->hr_item, name, &o2hb_region_type);
+ /* this is the same way to generate msg key as dlm, for local heartbeat,
+ * name is also the same, so make initial crc value different to avoid
+ * message key conflict.
+ */
+ reg->hr_key = crc32_le(reg->hr_region_num + O2NM_MAX_REGIONS,
+ name, strlen(name));
+ INIT_LIST_HEAD(&reg->hr_handler_list);
+ ret = o2net_register_handler(O2HB_NEGO_TIMEOUT_MSG, reg->hr_key,
+ sizeof(struct o2hb_nego_msg),
+ o2hb_nego_timeout_handler,
+ reg, NULL, &reg->hr_handler_list);
+ if (ret)
+ goto free;
+
+ ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
+ sizeof(struct o2hb_nego_msg),
+ o2hb_nego_approve_handler,
+ reg, NULL, &reg->hr_handler_list);
+ if (ret)
+ goto unregister_handler;
+
ret = o2hb_debug_region_init(reg, o2hb_debug_dir);
if (ret) {
config_item_put(&reg->hr_item);
- goto free;
+ goto unregister_handler;
}
return &reg->hr_item;
+
+unregister_handler:
+ o2net_unregister_handler_list(&reg->hr_handler_list);
free:
kfree(reg);
return ERR_PTR(ret);
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index b95e7df5b76a..94b18369b1cc 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -44,6 +44,9 @@
* version here in tcp_internal.h should not need to be bumped for
* filesystem locking changes.
*
+ * New in version 12
+ * - Negotiate hb timeout when storage is down.
+ *
* New in version 11
* - Negotiation of filesystem locking in the dlm join.
*
@@ -75,7 +78,7 @@
* - full 64 bit i_size in the metadata lock lvbs
* - introduction of "rw" lock and pushing meta/data locking down
*/
-#define O2NET_PROTOCOL_VERSION 11ULL
+#define O2NET_PROTOCOL_VERSION 12ULL
struct o2net_handshake {
__be64 protocol_version;
__be64 connector_id;
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0748777f2e2a..c56a7679df93 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -176,12 +176,7 @@ struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
}
if (is_bad_inode(inode)) {
iput(inode);
- if ((flags & OCFS2_FI_FLAG_FILECHECK_CHK) ||
- (flags & OCFS2_FI_FLAG_FILECHECK_FIX))
- /* Return OCFS2_FILECHECK_ERR_XXX related errno */
- inode = ERR_PTR(rc);
- else
- inode = ERR_PTR(-ESTALE);
+ inode = ERR_PTR(rc);
goto bail;
}
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index ad16995c9e7a..d2053853951e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7254,10 +7254,11 @@ static int ocfs2_xattr_security_get(const struct xattr_handler *handler,
}
static int ocfs2_xattr_security_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY,
+ return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_SECURITY,
name, value, size, flags);
}
@@ -7325,10 +7326,11 @@ static int ocfs2_xattr_trusted_get(const struct xattr_handler *handler,
}
static int ocfs2_xattr_trusted_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED,
+ return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_TRUSTED,
name, value, size, flags);
}
@@ -7354,15 +7356,16 @@ static int ocfs2_xattr_user_get(const struct xattr_handler *handler,
}
static int ocfs2_xattr_user_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR)
return -EOPNOTSUPP;
- return ocfs2_xattr_set(d_inode(dentry), OCFS2_XATTR_INDEX_USER,
+ return ocfs2_xattr_set(inode, OCFS2_XATTR_INDEX_USER,
name, value, size, flags);
}
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 99c19545752c..5893ddde0e4b 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -448,13 +448,14 @@ out_unlock:
}
static int orangefs_xattr_set_default(const struct xattr_handler *handler,
- struct dentry *dentry,
+ struct dentry *unused,
+ struct inode *inode,
const char *name,
const void *buffer,
size_t size,
int flags)
{
- return orangefs_inode_setxattr(dentry->d_inode,
+ return orangefs_inode_setxattr(inode,
ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
name,
buffer,
@@ -478,13 +479,14 @@ static int orangefs_xattr_get_default(const struct xattr_handler *handler,
}
static int orangefs_xattr_set_trusted(const struct xattr_handler *handler,
- struct dentry *dentry,
+ struct dentry *unused,
+ struct inode *inode,
const char *name,
const void *buffer,
size_t size,
int flags)
{
- return orangefs_inode_setxattr(dentry->d_inode,
+ return orangefs_inode_setxattr(inode,
ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
name,
buffer,
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index cc514da6f3e7..80aa6f1eb336 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -336,7 +336,6 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
struct dentry *upperdir;
struct dentry *upperdentry;
const struct cred *old_cred;
- struct cred *override_cred;
char *link = NULL;
if (WARN_ON(!workdir))
@@ -357,28 +356,7 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
return PTR_ERR(link);
}
- err = -ENOMEM;
- override_cred = prepare_creds();
- if (!override_cred)
- goto out_free_link;
-
- override_cred->fsuid = stat->uid;
- override_cred->fsgid = stat->gid;
- /*
- * CAP_SYS_ADMIN for copying up extended attributes
- * CAP_DAC_OVERRIDE for create
- * CAP_FOWNER for chmod, timestamp update
- * CAP_FSETID for chmod
- * CAP_CHOWN for chown
- * CAP_MKNOD for mknod
- */
- cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
- cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
- cap_raise(override_cred->cap_effective, CAP_FOWNER);
- cap_raise(override_cred->cap_effective, CAP_FSETID);
- cap_raise(override_cred->cap_effective, CAP_CHOWN);
- cap_raise(override_cred->cap_effective, CAP_MKNOD);
- old_cred = override_creds(override_cred);
+ old_cred = ovl_override_creds(dentry->d_sb);
err = -EIO;
if (lock_rename(workdir, upperdir) != NULL) {
@@ -401,9 +379,7 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
out_unlock:
unlock_rename(workdir, upperdir);
revert_creds(old_cred);
- put_cred(override_cred);
-out_free_link:
if (link)
free_page((unsigned long) link);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index b3fc0a35bf62..22f0253a3567 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -405,28 +405,13 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
} else {
const struct cred *old_cred;
- struct cred *override_cred;
- err = -ENOMEM;
- override_cred = prepare_creds();
- if (!override_cred)
- goto out_iput;
-
- /*
- * CAP_SYS_ADMIN for setting opaque xattr
- * CAP_DAC_OVERRIDE for create in workdir, rename
- * CAP_FOWNER for removing whiteout from sticky dir
- */
- cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
- cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
- cap_raise(override_cred->cap_effective, CAP_FOWNER);
- old_cred = override_creds(override_cred);
+ old_cred = ovl_override_creds(dentry->d_sb);
err = ovl_create_over_whiteout(dentry, inode, &stat, link,
hardlink);
revert_creds(old_cred);
- put_cred(override_cred);
}
if (!err)
@@ -662,32 +647,11 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
if (OVL_TYPE_PURE_UPPER(type)) {
err = ovl_remove_upper(dentry, is_dir);
} else {
- const struct cred *old_cred;
- struct cred *override_cred;
-
- err = -ENOMEM;
- override_cred = prepare_creds();
- if (!override_cred)
- goto out_drop_write;
-
- /*
- * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
- * CAP_DAC_OVERRIDE for create in workdir, rename
- * CAP_FOWNER for removing whiteout from sticky dir
- * CAP_FSETID for chmod of opaque dir
- * CAP_CHOWN for chown of opaque dir
- */
- cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
- cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
- cap_raise(override_cred->cap_effective, CAP_FOWNER);
- cap_raise(override_cred->cap_effective, CAP_FSETID);
- cap_raise(override_cred->cap_effective, CAP_CHOWN);
- old_cred = override_creds(override_cred);
+ const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
err = ovl_remove_and_whiteout(dentry, is_dir);
revert_creds(old_cred);
- put_cred(override_cred);
}
out_drop_write:
ovl_drop_write(dentry);
@@ -725,7 +689,6 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
bool new_is_dir = false;
struct dentry *opaquedir = NULL;
const struct cred *old_cred = NULL;
- struct cred *override_cred = NULL;
err = -EINVAL;
if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE))
@@ -794,26 +757,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
- if (old_opaque || new_opaque) {
- err = -ENOMEM;
- override_cred = prepare_creds();
- if (!override_cred)
- goto out_drop_write;
-
- /*
- * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
- * CAP_DAC_OVERRIDE for create in workdir
- * CAP_FOWNER for removing whiteout from sticky dir
- * CAP_FSETID for chmod of opaque dir
- * CAP_CHOWN for chown of opaque dir
- */
- cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
- cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
- cap_raise(override_cred->cap_effective, CAP_FOWNER);
- cap_raise(override_cred->cap_effective, CAP_FSETID);
- cap_raise(override_cred->cap_effective, CAP_CHOWN);
- old_cred = override_creds(override_cred);
- }
+ if (old_opaque || new_opaque)
+ old_cred = ovl_override_creds(old->d_sb);
if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
opaquedir = ovl_check_empty_and_clear(new);
@@ -943,10 +888,8 @@ out_dput_old:
out_unlock:
unlock_rename(new_upperdir, old_upperdir);
out_revert_creds:
- if (old_opaque || new_opaque) {
+ if (old_opaque || new_opaque)
revert_creds(old_cred);
- put_cred(override_cred);
- }
out_drop_write:
ovl_drop_write(old);
out:
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c7b31a03dc9c..0ed7c4012437 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -210,8 +210,9 @@ static bool ovl_is_private_xattr(const char *name)
return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
}
-int ovl_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+int ovl_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
int err;
struct dentry *upperdentry;
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 99ec4b035237..4bd9b5ba8f42 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -153,6 +153,7 @@ void ovl_drop_write(struct dentry *dentry);
bool ovl_dentry_is_opaque(struct dentry *dentry);
void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
bool ovl_is_whiteout(struct dentry *dentry);
+const struct cred *ovl_override_creds(struct super_block *sb);
void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
@@ -171,8 +172,9 @@ int ovl_check_d_type_supported(struct path *realpath);
/* inode.c */
int ovl_setattr(struct dentry *dentry, struct iattr *attr);
int ovl_permission(struct inode *inode, int mask);
-int ovl_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
+int ovl_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags);
ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
const char *name, void *value, size_t size);
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index da186ee4f846..cf37fc76fc9f 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -36,6 +36,7 @@ struct ovl_dir_cache {
struct ovl_readdir_data {
struct dir_context ctx;
+ struct dentry *dentry;
bool is_lowest;
struct rb_root root;
struct list_head *list;
@@ -206,21 +207,10 @@ static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
struct ovl_cache_entry *p;
struct dentry *dentry;
const struct cred *old_cred;
- struct cred *override_cred;
-
- override_cred = prepare_creds();
- if (!override_cred)
- return -ENOMEM;
- /*
- * CAP_DAC_OVERRIDE for lookup
- */
- cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
- old_cred = override_creds(override_cred);
+ old_cred = ovl_override_creds(rdd->dentry->d_sb);
- inode_lock(dir->d_inode);
- err = 0;
- // XXX: err = mutex_lock_killable(&dir->d_inode->i_mutex);
+ err = down_write_killable(&dir->d_inode->i_rwsem);
if (!err) {
while (rdd->first_maybe_whiteout) {
p = rdd->first_maybe_whiteout;
@@ -234,7 +224,6 @@ static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
inode_unlock(dir->d_inode);
}
revert_creds(old_cred);
- put_cred(override_cred);
return err;
}
@@ -290,6 +279,7 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
struct path realpath;
struct ovl_readdir_data rdd = {
.ctx.actor = ovl_fill_merge,
+ .dentry = dentry,
.list = list,
.root = RB_ROOT,
.is_lowest = false,
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index ed53ae0fe868..ce02f46029da 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -42,6 +42,8 @@ struct ovl_fs {
long lower_namelen;
/* pathnames of lower and upper dirs, for show_options */
struct ovl_config config;
+ /* creds of process who forced instantiation of super block */
+ const struct cred *creator_cred;
};
struct ovl_dir_cache;
@@ -265,6 +267,13 @@ bool ovl_is_whiteout(struct dentry *dentry)
return inode && IS_WHITEOUT(inode);
}
+const struct cred *ovl_override_creds(struct super_block *sb)
+{
+ struct ovl_fs *ofs = sb->s_fs_info;
+
+ return override_creds(ofs->creator_cred);
+}
+
static bool ovl_is_opaquedir(struct dentry *dentry)
{
int res;
@@ -603,6 +612,7 @@ static void ovl_put_super(struct super_block *sb)
kfree(ufs->config.lowerdir);
kfree(ufs->config.upperdir);
kfree(ufs->config.workdir);
+ put_cred(ufs->creator_cred);
kfree(ufs);
}
@@ -1064,16 +1074,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
/*
* Upper should support d_type, else whiteouts are visible.
* Given workdir and upper are on same fs, we can do
- * iterate_dir() on workdir.
+ * iterate_dir() on workdir. This check requires successful
+ * creation of workdir in previous step.
*/
- err = ovl_check_d_type_supported(&workpath);
- if (err < 0)
- goto out_put_workdir;
+ if (ufs->workdir) {
+ err = ovl_check_d_type_supported(&workpath);
+ if (err < 0)
+ goto out_put_workdir;
- if (!err) {
- pr_err("overlayfs: upper fs needs to support d_type.\n");
- err = -EINVAL;
- goto out_put_workdir;
+ if (!err) {
+ pr_err("overlayfs: upper fs needs to support d_type.\n");
+ err = -EINVAL;
+ goto out_put_workdir;
+ }
}
}
@@ -1108,10 +1121,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
else
sb->s_d_op = &ovl_dentry_operations;
+ ufs->creator_cred = prepare_creds();
+ if (!ufs->creator_cred)
+ goto out_put_lower_mnt;
+
err = -ENOMEM;
oe = ovl_alloc_entry(numlower);
if (!oe)
- goto out_put_lower_mnt;
+ goto out_put_cred;
root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe));
if (!root_dentry)
@@ -1144,6 +1161,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
out_free_oe:
kfree(oe);
+out_put_cred:
+ put_cred(ufs->creator_cred);
out_put_lower_mnt:
for (i = 0; i < ufs->numlower; i++)
mntput(ufs->lower_mnt[i]);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 2c60f17e7d92..8a4a266beff3 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -822,10 +822,10 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
static int
posix_acl_xattr_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct inode *inode = d_backing_inode(dentry);
struct posix_acl *acl = NULL;
int ret;
diff --git a/fs/readdir.c b/fs/readdir.c
index 68ef06efe6bc..9d0212c374d6 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -35,13 +35,13 @@ int iterate_dir(struct file *file, struct dir_context *ctx)
if (res)
goto out;
- if (shared)
+ if (shared) {
inode_lock_shared(inode);
- else
- inode_lock(inode);
- // res = mutex_lock_killable(&inode->i_mutex);
- // if (res)
- // goto out;
+ } else {
+ res = down_write_killable(&inode->i_rwsem);
+ if (res)
+ goto out;
+ }
res = -ENOENT;
if (!IS_DEADDIR(inode)) {
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index 86aeb9dd805a..e4cbb7719906 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -20,13 +20,14 @@ security_get(const struct xattr_handler *handler, struct dentry *unused,
}
static int
-security_set(const struct xattr_handler *handler, struct dentry *dentry,
- const char *name, const void *buffer, size_t size, int flags)
+security_set(const struct xattr_handler *handler, struct dentry *unused,
+ struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags)
{
- if (IS_PRIVATE(d_inode(dentry)))
+ if (IS_PRIVATE(inode))
return -EPERM;
- return reiserfs_xattr_set(d_inode(dentry),
+ return reiserfs_xattr_set(inode,
xattr_full_name(handler, name),
buffer, size, flags);
}
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
index 31837f031f59..f15a5f9e84ce 100644
--- a/fs/reiserfs/xattr_trusted.c
+++ b/fs/reiserfs/xattr_trusted.c
@@ -19,13 +19,14 @@ trusted_get(const struct xattr_handler *handler, struct dentry *unused,
}
static int
-trusted_set(const struct xattr_handler *handler, struct dentry *dentry,
- const char *name, const void *buffer, size_t size, int flags)
+trusted_set(const struct xattr_handler *handler, struct dentry *unused,
+ struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags)
{
- if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry)))
+ if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
return -EPERM;
- return reiserfs_xattr_set(d_inode(dentry),
+ return reiserfs_xattr_set(inode,
xattr_full_name(handler, name),
buffer, size, flags);
}
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
index f7c39731684b..dc59df43b2db 100644
--- a/fs/reiserfs/xattr_user.c
+++ b/fs/reiserfs/xattr_user.c
@@ -17,12 +17,13 @@ user_get(const struct xattr_handler *handler, struct dentry *unused,
}
static int
-user_set(const struct xattr_handler *handler, struct dentry *dentry,
- const char *name, const void *buffer, size_t size, int flags)
+user_set(const struct xattr_handler *handler, struct dentry *unused,
+ struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags)
{
- if (!reiserfs_xattrs_user(dentry->d_sb))
+ if (!reiserfs_xattrs_user(inode->i_sb))
return -EOPNOTSUPP;
- return reiserfs_xattr_set(d_inode(dentry),
+ return reiserfs_xattr_set(inode,
xattr_full_name(handler, name),
buffer, size, flags);
}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 595ca0debe11..69e287e20732 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -260,7 +260,7 @@ void ubifs_dump_inode(struct ubifs_info *c, const struct inode *inode)
pr_err("\txattr_names %u\n", ui->xattr_names);
pr_err("\tdirty %u\n", ui->dirty);
pr_err("\txattr %u\n", ui->xattr);
- pr_err("\tbulk_read %u\n", ui->xattr);
+ pr_err("\tbulk_read %u\n", ui->bulk_read);
pr_err("\tsynced_i_size %llu\n",
(unsigned long long)ui->synced_i_size);
pr_err("\tui_size %llu\n",
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 6c277eb6aef9..b5fc27969e9d 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -579,11 +579,10 @@ static int ubifs_xattr_get(const struct xattr_handler *handler,
}
static int ubifs_xattr_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
-
dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd",
name, inode->i_ino, dentry, size);
diff --git a/fs/xattr.c b/fs/xattr.c
index b11945e15fde..4beafc43daa5 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -100,7 +100,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name,
if (issec)
inode->i_flags &= ~S_NOSEC;
if (inode->i_op->setxattr) {
- error = inode->i_op->setxattr(dentry, name, value, size, flags);
+ error = inode->i_op->setxattr(dentry, inode, name, value, size, flags);
if (!error) {
fsnotify_xattr(dentry);
security_inode_post_setxattr(dentry, name, value,
@@ -655,6 +655,7 @@ strcmp_prefix(const char *a, const char *a_prefix)
* operations to the correct xattr_handler.
*/
#define for_each_xattr_handler(handlers, handler) \
+ if (handlers) \
for ((handler) = *(handlers)++; \
(handler) != NULL; \
(handler) = *(handlers)++)
@@ -668,7 +669,7 @@ xattr_resolve_name(const struct xattr_handler **handlers, const char **name)
const struct xattr_handler *handler;
if (!*name)
- return NULL;
+ return ERR_PTR(-EINVAL);
for_each_xattr_handler(handlers, handler) {
const char *n;
@@ -744,7 +745,8 @@ generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
* Find the handler for the prefix and dispatch its set() operation.
*/
int
-generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags)
+generic_setxattr(struct dentry *dentry, struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
{
const struct xattr_handler *handler;
@@ -753,7 +755,7 @@ generic_setxattr(struct dentry *dentry, const char *name, const void *value, siz
handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
if (IS_ERR(handler))
return PTR_ERR(handler);
- return handler->set(handler, dentry, name, value, size, flags);
+ return handler->set(handler, dentry, inode, name, value, size, flags);
}
/*
@@ -768,7 +770,8 @@ generic_removexattr(struct dentry *dentry, const char *name)
handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name);
if (IS_ERR(handler))
return PTR_ERR(handler);
- return handler->set(handler, dentry, name, NULL, 0, XATTR_REPLACE);
+ return handler->set(handler, dentry, d_inode(dentry), name, NULL,
+ 0, XATTR_REPLACE);
}
EXPORT_SYMBOL(generic_getxattr);
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 686ba6fb20dd..339c696bbc01 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -93,19 +93,23 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags)
}
void *
-kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
- xfs_km_flags_t flags)
+kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags)
{
- void *new;
+ int retries = 0;
+ gfp_t lflags = kmem_flags_convert(flags);
+ void *ptr;
- new = kmem_alloc(newsize, flags);
- if (ptr) {
- if (new)
- memcpy(new, ptr,
- ((oldsize < newsize) ? oldsize : newsize));
- kmem_free(ptr);
- }
- return new;
+ do {
+ ptr = krealloc(old, newsize, lflags);
+ if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
+ return ptr;
+ if (!(++retries % 100))
+ xfs_err(NULL,
+ "%s(%u) possible memory allocation deadlock size %zu in %s (mode:0x%x)",
+ current->comm, current->pid,
+ newsize, __func__, lflags);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ } while (1);
}
void *
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index d1c66e465ca5..689f746224e7 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -62,7 +62,7 @@ kmem_flags_convert(xfs_km_flags_t flags)
extern void *kmem_alloc(size_t, xfs_km_flags_t);
extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t);
-extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
+extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t);
static inline void kmem_free(const void *ptr)
{
kvfree(ptr);
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index fa3b948ef9c2..4e126f41a0aa 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -242,37 +242,21 @@ xfs_attr_set(
return error;
}
- /*
- * Start our first transaction of the day.
- *
- * All future transactions during this code must be "chained" off
- * this one via the trans_dup() call. All transactions will contain
- * the inode, and the inode will always be marked with trans_ihold().
- * Since the inode will be locked in all transactions, we must log
- * the inode in every transaction to let it float upward through
- * the log.
- */
- args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_SET);
+ tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
+ M_RES(mp)->tr_attrsetrt.tr_logres * args.total;
+ tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
+ tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
/*
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
-
- if (rsvd)
- args.trans->t_flags |= XFS_TRANS_RESERVE;
-
- tres.tr_logres = M_RES(mp)->tr_attrsetm.tr_logres +
- M_RES(mp)->tr_attrsetrt.tr_logres * args.total;
- tres.tr_logcount = XFS_ATTRSET_LOG_COUNT;
- tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
- error = xfs_trans_reserve(args.trans, &tres, args.total, 0);
- if (error) {
- xfs_trans_cancel(args.trans);
+ error = xfs_trans_alloc(mp, &tres, args.total, 0,
+ rsvd ? XFS_TRANS_RESERVE : 0, &args.trans);
+ if (error)
return error;
- }
- xfs_ilock(dp, XFS_ILOCK_EXCL);
+ xfs_ilock(dp, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
@@ -429,31 +413,15 @@ xfs_attr_remove(
return error;
/*
- * Start our first transaction of the day.
- *
- * All future transactions during this code must be "chained" off
- * this one via the trans_dup() call. All transactions will contain
- * the inode, and the inode will always be marked with trans_ihold().
- * Since the inode will be locked in all transactions, we must log
- * the inode in every transaction to let it float upward through
- * the log.
- */
- args.trans = xfs_trans_alloc(mp, XFS_TRANS_ATTR_RM);
-
- /*
* Root fork attributes can use reserved data blocks for this
* operation if necessary
*/
-
- if (flags & ATTR_ROOT)
- args.trans->t_flags |= XFS_TRANS_RESERVE;
-
- error = xfs_trans_reserve(args.trans, &M_RES(mp)->tr_attrrm,
- XFS_ATTRRM_SPACE_RES(mp), 0);
- if (error) {
- xfs_trans_cancel(args.trans);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrrm,
+ XFS_ATTRRM_SPACE_RES(mp), 0,
+ (flags & ATTR_ROOT) ? XFS_TRANS_RESERVE : 0,
+ &args.trans);
+ if (error)
return error;
- }
xfs_ilock(dp, XFS_ILOCK_EXCL);
/*
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index ce41d7fe753c..932381caef1b 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1121,15 +1121,14 @@ xfs_bmap_add_attrfork(
mp = ip->i_mount;
ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
- tp = xfs_trans_alloc(mp, XFS_TRANS_ADDAFORK);
+
blks = XFS_ADDAFORK_SPACE_RES(mp);
- if (rsvd)
- tp->t_flags |= XFS_TRANS_RESERVE;
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_addafork, blks, 0);
- if (error) {
- xfs_trans_cancel(tp);
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
+ rsvd ? XFS_TRANS_RESERVE : 0, &tp);
+ if (error)
return error;
- }
+
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
@@ -6026,13 +6025,10 @@ xfs_bmap_split_extent(
xfs_fsblock_t firstfsb;
int error;
- tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
- XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
+ XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
+ if (error)
return error;
- }
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 974d62e677f4..e5bb9cc3b243 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -257,15 +257,12 @@ xfs_dir2_block_to_sf(
*
* Convert the inode to local format and copy the data in.
*/
- dp->i_df.if_flags &= ~XFS_IFEXTENTS;
- dp->i_df.if_flags |= XFS_IFINLINE;
- dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
ASSERT(dp->i_df.if_bytes == 0);
- xfs_idata_realloc(dp, size, XFS_DATA_FORK);
+ xfs_init_local_fork(dp, XFS_DATA_FORK, dst, size);
+ dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
+ dp->i_d.di_size = size;
logflags |= XFS_ILOG_DDATA;
- memcpy(dp->i_df.if_u1.if_data, dst, size);
- dp->i_d.di_size = size;
xfs_dir2_sf_check(args);
out:
xfs_trans_log_inode(args->trans, dp, logflags);
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 11faf7df14c8..bbcc8c7a44b3 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -231,6 +231,48 @@ xfs_iformat_fork(
return error;
}
+void
+xfs_init_local_fork(
+ struct xfs_inode *ip,
+ int whichfork,
+ const void *data,
+ int size)
+{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ int mem_size = size, real_size = 0;
+ bool zero_terminate;
+
+ /*
+ * If we are using the local fork to store a symlink body we need to
+ * zero-terminate it so that we can pass it back to the VFS directly.
+ * Overallocate the in-memory fork by one for that and add a zero
+ * to terminate it below.
+ */
+ zero_terminate = S_ISLNK(VFS_I(ip)->i_mode);
+ if (zero_terminate)
+ mem_size++;
+
+ if (size == 0)
+ ifp->if_u1.if_data = NULL;
+ else if (mem_size <= sizeof(ifp->if_u2.if_inline_data))
+ ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
+ else {
+ real_size = roundup(mem_size, 4);
+ ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
+ }
+
+ if (size) {
+ memcpy(ifp->if_u1.if_data, data, size);
+ if (zero_terminate)
+ ifp->if_u1.if_data[size] = '\0';
+ }
+
+ ifp->if_bytes = size;
+ ifp->if_real_bytes = real_size;
+ ifp->if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
+ ifp->if_flags |= XFS_IFINLINE;
+}
+
/*
* The file is in-lined in the on-disk inode.
* If it fits into if_inline_data, then copy
@@ -248,8 +290,6 @@ xfs_iformat_local(
int whichfork,
int size)
{
- xfs_ifork_t *ifp;
- int real_size;
/*
* If the size is unreasonable, then something
@@ -265,22 +305,8 @@ xfs_iformat_local(
ip->i_mount, dip);
return -EFSCORRUPTED;
}
- ifp = XFS_IFORK_PTR(ip, whichfork);
- real_size = 0;
- if (size == 0)
- ifp->if_u1.if_data = NULL;
- else if (size <= sizeof(ifp->if_u2.if_inline_data))
- ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
- else {
- real_size = roundup(size, 4);
- ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP | KM_NOFS);
- }
- ifp->if_bytes = size;
- ifp->if_real_bytes = real_size;
- if (size)
- memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
- ifp->if_flags &= ~XFS_IFEXTENTS;
- ifp->if_flags |= XFS_IFINLINE;
+
+ xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size);
return 0;
}
@@ -516,7 +542,6 @@ xfs_iroot_realloc(
new_max = cur_max + rec_diff;
new_size = XFS_BMAP_BROOT_SPACE_CALC(mp, new_max);
ifp->if_broot = kmem_realloc(ifp->if_broot, new_size,
- XFS_BMAP_BROOT_SPACE_CALC(mp, cur_max),
KM_SLEEP | KM_NOFS);
op = (char *)XFS_BMAP_BROOT_PTR_ADDR(mp, ifp->if_broot, 1,
ifp->if_broot_bytes);
@@ -660,7 +685,6 @@ xfs_idata_realloc(
ifp->if_u1.if_data =
kmem_realloc(ifp->if_u1.if_data,
real_size,
- ifp->if_real_bytes,
KM_SLEEP | KM_NOFS);
}
} else {
@@ -1376,8 +1400,7 @@ xfs_iext_realloc_direct(
if (rnew_size != ifp->if_real_bytes) {
ifp->if_u1.if_extents =
kmem_realloc(ifp->if_u1.if_extents,
- rnew_size,
- ifp->if_real_bytes, KM_NOFS);
+ rnew_size, KM_NOFS);
}
if (rnew_size > ifp->if_real_bytes) {
memset(&ifp->if_u1.if_extents[ifp->if_bytes /
@@ -1461,9 +1484,8 @@ xfs_iext_realloc_indirect(
if (new_size == 0) {
xfs_iext_destroy(ifp);
} else {
- ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
- kmem_realloc(ifp->if_u1.if_ext_irec,
- new_size, size, KM_NOFS);
+ ifp->if_u1.if_ext_irec =
+ kmem_realloc(ifp->if_u1.if_ext_irec, new_size, KM_NOFS);
}
}
@@ -1497,6 +1519,24 @@ xfs_iext_indirect_to_direct(
}
/*
+ * Remove all records from the indirection array.
+ */
+STATIC void
+xfs_iext_irec_remove_all(
+ struct xfs_ifork *ifp)
+{
+ int nlists;
+ int i;
+
+ ASSERT(ifp->if_flags & XFS_IFEXTIREC);
+ nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
+ for (i = 0; i < nlists; i++)
+ kmem_free(ifp->if_u1.if_ext_irec[i].er_extbuf);
+ kmem_free(ifp->if_u1.if_ext_irec);
+ ifp->if_flags &= ~XFS_IFEXTIREC;
+}
+
+/*
* Free incore file extents.
*/
void
@@ -1504,14 +1544,7 @@ xfs_iext_destroy(
xfs_ifork_t *ifp) /* inode fork pointer */
{
if (ifp->if_flags & XFS_IFEXTIREC) {
- int erp_idx;
- int nlists;
-
- nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
- for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
- xfs_iext_irec_remove(ifp, erp_idx);
- }
- ifp->if_flags &= ~XFS_IFEXTIREC;
+ xfs_iext_irec_remove_all(ifp);
} else if (ifp->if_real_bytes) {
kmem_free(ifp->if_u1.if_extents);
} else if (ifp->if_bytes) {
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 7d3b1ed6dcbe..f95e072ae646 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -134,6 +134,7 @@ void xfs_iroot_realloc(struct xfs_inode *, int, int);
int xfs_iread_extents(struct xfs_trans *, struct xfs_inode *, int);
int xfs_iextents_copy(struct xfs_inode *, struct xfs_bmbt_rec *,
int);
+void xfs_init_local_fork(struct xfs_inode *, int, const void *, int);
struct xfs_bmbt_rec_host *
xfs_iext_get_ext(struct xfs_ifork *, xfs_extnum_t);
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index d54a8018b079..e8f49c029ff0 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -212,6 +212,11 @@ typedef struct xfs_trans_header {
#define XFS_TRANS_HEADER_MAGIC 0x5452414e /* TRAN */
/*
+ * The only type valid for th_type in CIL-enabled file system logs:
+ */
+#define XFS_TRANS_CHECKPOINT 40
+
+/*
* Log item types.
*/
#define XFS_LI_EFI 0x1236
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 8a53eaa349f4..12ca86778e02 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -838,12 +838,10 @@ xfs_sync_sb(
struct xfs_trans *tp;
int error;
- tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_CHANGE, KM_SLEEP);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_sb, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_sb, 0, 0,
+ XFS_TRANS_NO_WRITECOUNT, &tp);
+ if (error)
return error;
- }
xfs_log_sb(tp);
if (wait)
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 81ac870834da..16002b5ec4eb 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -56,103 +56,6 @@ extern const struct xfs_buf_ops xfs_symlink_buf_ops;
extern const struct xfs_buf_ops xfs_rtbuf_ops;
/*
- * Transaction types. Used to distinguish types of buffers. These never reach
- * the log.
- */
-#define XFS_TRANS_SETATTR_NOT_SIZE 1
-#define XFS_TRANS_SETATTR_SIZE 2
-#define XFS_TRANS_INACTIVE 3
-#define XFS_TRANS_CREATE 4
-#define XFS_TRANS_CREATE_TRUNC 5
-#define XFS_TRANS_TRUNCATE_FILE 6
-#define XFS_TRANS_REMOVE 7
-#define XFS_TRANS_LINK 8
-#define XFS_TRANS_RENAME 9
-#define XFS_TRANS_MKDIR 10
-#define XFS_TRANS_RMDIR 11
-#define XFS_TRANS_SYMLINK 12
-#define XFS_TRANS_SET_DMATTRS 13
-#define XFS_TRANS_GROWFS 14
-#define XFS_TRANS_STRAT_WRITE 15
-#define XFS_TRANS_DIOSTRAT 16
-/* 17 was XFS_TRANS_WRITE_SYNC */
-#define XFS_TRANS_WRITEID 18
-#define XFS_TRANS_ADDAFORK 19
-#define XFS_TRANS_ATTRINVAL 20
-#define XFS_TRANS_ATRUNCATE 21
-#define XFS_TRANS_ATTR_SET 22
-#define XFS_TRANS_ATTR_RM 23
-#define XFS_TRANS_ATTR_FLAG 24
-#define XFS_TRANS_CLEAR_AGI_BUCKET 25
-#define XFS_TRANS_SB_CHANGE 26
-/*
- * Dummy entries since we use the transaction type to index into the
- * trans_type[] in xlog_recover_print_trans_head()
- */
-#define XFS_TRANS_DUMMY1 27
-#define XFS_TRANS_DUMMY2 28
-#define XFS_TRANS_QM_QUOTAOFF 29
-#define XFS_TRANS_QM_DQALLOC 30
-#define XFS_TRANS_QM_SETQLIM 31
-#define XFS_TRANS_QM_DQCLUSTER 32
-#define XFS_TRANS_QM_QINOCREATE 33
-#define XFS_TRANS_QM_QUOTAOFF_END 34
-#define XFS_TRANS_FSYNC_TS 35
-#define XFS_TRANS_GROWFSRT_ALLOC 36
-#define XFS_TRANS_GROWFSRT_ZERO 37
-#define XFS_TRANS_GROWFSRT_FREE 38
-#define XFS_TRANS_SWAPEXT 39
-#define XFS_TRANS_CHECKPOINT 40
-#define XFS_TRANS_ICREATE 41
-#define XFS_TRANS_CREATE_TMPFILE 42
-#define XFS_TRANS_TYPE_MAX 43
-/* new transaction types need to be reflected in xfs_logprint(8) */
-
-#define XFS_TRANS_TYPES \
- { XFS_TRANS_SETATTR_NOT_SIZE, "SETATTR_NOT_SIZE" }, \
- { XFS_TRANS_SETATTR_SIZE, "SETATTR_SIZE" }, \
- { XFS_TRANS_INACTIVE, "INACTIVE" }, \
- { XFS_TRANS_CREATE, "CREATE" }, \
- { XFS_TRANS_CREATE_TRUNC, "CREATE_TRUNC" }, \
- { XFS_TRANS_TRUNCATE_FILE, "TRUNCATE_FILE" }, \
- { XFS_TRANS_REMOVE, "REMOVE" }, \
- { XFS_TRANS_LINK, "LINK" }, \
- { XFS_TRANS_RENAME, "RENAME" }, \
- { XFS_TRANS_MKDIR, "MKDIR" }, \
- { XFS_TRANS_RMDIR, "RMDIR" }, \
- { XFS_TRANS_SYMLINK, "SYMLINK" }, \
- { XFS_TRANS_SET_DMATTRS, "SET_DMATTRS" }, \
- { XFS_TRANS_GROWFS, "GROWFS" }, \
- { XFS_TRANS_STRAT_WRITE, "STRAT_WRITE" }, \
- { XFS_TRANS_DIOSTRAT, "DIOSTRAT" }, \
- { XFS_TRANS_WRITEID, "WRITEID" }, \
- { XFS_TRANS_ADDAFORK, "ADDAFORK" }, \
- { XFS_TRANS_ATTRINVAL, "ATTRINVAL" }, \
- { XFS_TRANS_ATRUNCATE, "ATRUNCATE" }, \
- { XFS_TRANS_ATTR_SET, "ATTR_SET" }, \
- { XFS_TRANS_ATTR_RM, "ATTR_RM" }, \
- { XFS_TRANS_ATTR_FLAG, "ATTR_FLAG" }, \
- { XFS_TRANS_CLEAR_AGI_BUCKET, "CLEAR_AGI_BUCKET" }, \
- { XFS_TRANS_SB_CHANGE, "SBCHANGE" }, \
- { XFS_TRANS_DUMMY1, "DUMMY1" }, \
- { XFS_TRANS_DUMMY2, "DUMMY2" }, \
- { XFS_TRANS_QM_QUOTAOFF, "QM_QUOTAOFF" }, \
- { XFS_TRANS_QM_DQALLOC, "QM_DQALLOC" }, \
- { XFS_TRANS_QM_SETQLIM, "QM_SETQLIM" }, \
- { XFS_TRANS_QM_DQCLUSTER, "QM_DQCLUSTER" }, \
- { XFS_TRANS_QM_QINOCREATE, "QM_QINOCREATE" }, \
- { XFS_TRANS_QM_QUOTAOFF_END, "QM_QOFF_END" }, \
- { XFS_TRANS_FSYNC_TS, "FSYNC_TS" }, \
- { XFS_TRANS_GROWFSRT_ALLOC, "GROWFSRT_ALLOC" }, \
- { XFS_TRANS_GROWFSRT_ZERO, "GROWFSRT_ZERO" }, \
- { XFS_TRANS_GROWFSRT_FREE, "GROWFSRT_FREE" }, \
- { XFS_TRANS_SWAPEXT, "SWAPEXT" }, \
- { XFS_TRANS_CHECKPOINT, "CHECKPOINT" }, \
- { XFS_TRANS_ICREATE, "ICREATE" }, \
- { XFS_TRANS_CREATE_TMPFILE, "CREATE_TMPFILE" }, \
- { XLOG_UNMOUNT_REC_TYPE, "UNMOUNT" }
-
-/*
* This structure is used to track log items associated with
* a transaction. It points to the log item and keeps some
* flags to track the state of the log item. It also tracks
@@ -181,8 +84,9 @@ int xfs_log_calc_minimum_size(struct xfs_mount *);
#define XFS_TRANS_SYNC 0x08 /* make commit synchronous */
#define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */
#define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */
-#define XFS_TRANS_FREEZE_PROT 0x40 /* Transaction has elevated writer
- count in superblock */
+#define XFS_TRANS_NO_WRITECOUNT 0x40 /* do not elevate SB writecount */
+#define XFS_TRANS_NOFS 0x80 /* pass KM_NOFS to kmem_alloc */
+
/*
* Field values for xfs_trans_mod_sb.
*/
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index c535887c60a8..4c463b99fe57 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -84,23 +84,71 @@ xfs_find_bdev_for_inode(
}
/*
- * We're now finished for good with this ioend structure.
- * Update the page state via the associated buffer_heads,
- * release holds on the inode and bio, and finally free
- * up memory. Do not use the ioend after this.
+ * We're now finished for good with this page. Update the page state via the
+ * associated buffer_heads, paying attention to the start and end offsets that
+ * we need to process on the page.
+ */
+static void
+xfs_finish_page_writeback(
+ struct inode *inode,
+ struct bio_vec *bvec,
+ int error)
+{
+ unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
+ struct buffer_head *head, *bh;
+ unsigned int off = 0;
+
+ ASSERT(bvec->bv_offset < PAGE_SIZE);
+ ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
+ ASSERT(end < PAGE_SIZE);
+ ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
+
+ bh = head = page_buffers(bvec->bv_page);
+
+ do {
+ if (off < bvec->bv_offset)
+ goto next_bh;
+ if (off > end)
+ break;
+ bh->b_end_io(bh, !error);
+next_bh:
+ off += bh->b_size;
+ } while ((bh = bh->b_this_page) != head);
+}
+
+/*
+ * We're now finished for good with this ioend structure. Update the page
+ * state, release holds on bios, and finally free up memory. Do not use the
+ * ioend after this.
*/
STATIC void
xfs_destroy_ioend(
- xfs_ioend_t *ioend)
+ struct xfs_ioend *ioend,
+ int error)
{
- struct buffer_head *bh, *next;
+ struct inode *inode = ioend->io_inode;
+ struct bio *last = ioend->io_bio;
+ struct bio *bio, *next;
- for (bh = ioend->io_buffer_head; bh; bh = next) {
- next = bh->b_private;
- bh->b_end_io(bh, !ioend->io_error);
- }
+ for (bio = &ioend->io_inline_bio; bio; bio = next) {
+ struct bio_vec *bvec;
+ int i;
+
+ /*
+ * For the last bio, bi_private points to the ioend, so we
+ * need to explicitly end the iteration here.
+ */
+ if (bio == last)
+ next = NULL;
+ else
+ next = bio->bi_private;
- mempool_free(ioend, xfs_ioend_pool);
+ /* walk each page on bio, ending page IO on them */
+ bio_for_each_segment_all(bvec, bio, i)
+ xfs_finish_page_writeback(inode, bvec, error);
+
+ bio_put(bio);
+ }
}
/*
@@ -120,13 +168,9 @@ xfs_setfilesize_trans_alloc(
struct xfs_trans *tp;
int error;
- tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
-
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
+ if (error)
return error;
- }
ioend->io_append_trans = tp;
@@ -174,7 +218,8 @@ xfs_setfilesize(
STATIC int
xfs_setfilesize_ioend(
- struct xfs_ioend *ioend)
+ struct xfs_ioend *ioend,
+ int error)
{
struct xfs_inode *ip = XFS_I(ioend->io_inode);
struct xfs_trans *tp = ioend->io_append_trans;
@@ -188,53 +233,32 @@ xfs_setfilesize_ioend(
__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
/* we abort the update if there was an IO error */
- if (ioend->io_error) {
+ if (error) {
xfs_trans_cancel(tp);
- return ioend->io_error;
+ return error;
}
return xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
}
/*
- * Schedule IO completion handling on the final put of an ioend.
- *
- * If there is no work to do we might as well call it a day and free the
- * ioend right now.
- */
-STATIC void
-xfs_finish_ioend(
- struct xfs_ioend *ioend)
-{
- if (atomic_dec_and_test(&ioend->io_remaining)) {
- struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
-
- if (ioend->io_type == XFS_IO_UNWRITTEN)
- queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
- else if (ioend->io_append_trans)
- queue_work(mp->m_data_workqueue, &ioend->io_work);
- else
- xfs_destroy_ioend(ioend);
- }
-}
-
-/*
* IO write completion.
*/
STATIC void
xfs_end_io(
struct work_struct *work)
{
- xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
- struct xfs_inode *ip = XFS_I(ioend->io_inode);
- int error = 0;
+ struct xfs_ioend *ioend =
+ container_of(work, struct xfs_ioend, io_work);
+ struct xfs_inode *ip = XFS_I(ioend->io_inode);
+ int error = ioend->io_bio->bi_error;
/*
* Set an error if the mount has shut down and proceed with end I/O
* processing so it can perform whatever cleanups are necessary.
*/
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- ioend->io_error = -EIO;
+ error = -EIO;
/*
* For unwritten extents we need to issue transactions to convert a
@@ -244,55 +268,33 @@ xfs_end_io(
* on error.
*/
if (ioend->io_type == XFS_IO_UNWRITTEN) {
- if (ioend->io_error)
+ if (error)
goto done;
error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
ioend->io_size);
} else if (ioend->io_append_trans) {
- error = xfs_setfilesize_ioend(ioend);
+ error = xfs_setfilesize_ioend(ioend, error);
} else {
ASSERT(!xfs_ioend_is_append(ioend));
}
done:
- if (error)
- ioend->io_error = error;
- xfs_destroy_ioend(ioend);
+ xfs_destroy_ioend(ioend, error);
}
-/*
- * Allocate and initialise an IO completion structure.
- * We need to track unwritten extent write completion here initially.
- * We'll need to extend this for updating the ondisk inode size later
- * (vs. incore size).
- */
-STATIC xfs_ioend_t *
-xfs_alloc_ioend(
- struct inode *inode,
- unsigned int type)
+STATIC void
+xfs_end_bio(
+ struct bio *bio)
{
- xfs_ioend_t *ioend;
-
- ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
-
- /*
- * Set the count to 1 initially, which will prevent an I/O
- * completion callback from happening before we have started
- * all the I/O from calling the completion routine too early.
- */
- atomic_set(&ioend->io_remaining, 1);
- ioend->io_error = 0;
- INIT_LIST_HEAD(&ioend->io_list);
- ioend->io_type = type;
- ioend->io_inode = inode;
- ioend->io_buffer_head = NULL;
- ioend->io_buffer_tail = NULL;
- ioend->io_offset = 0;
- ioend->io_size = 0;
- ioend->io_append_trans = NULL;
+ struct xfs_ioend *ioend = bio->bi_private;
+ struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
- INIT_WORK(&ioend->io_work, xfs_end_io);
- return ioend;
+ if (ioend->io_type == XFS_IO_UNWRITTEN)
+ queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
+ else if (ioend->io_append_trans)
+ queue_work(mp->m_data_workqueue, &ioend->io_work);
+ else
+ xfs_destroy_ioend(ioend, bio->bi_error);
}
STATIC int
@@ -364,50 +366,6 @@ xfs_imap_valid(
offset < imap->br_startoff + imap->br_blockcount;
}
-/*
- * BIO completion handler for buffered IO.
- */
-STATIC void
-xfs_end_bio(
- struct bio *bio)
-{
- xfs_ioend_t *ioend = bio->bi_private;
-
- if (!ioend->io_error)
- ioend->io_error = bio->bi_error;
-
- /* Toss bio and pass work off to an xfsdatad thread */
- bio->bi_private = NULL;
- bio->bi_end_io = NULL;
- bio_put(bio);
-
- xfs_finish_ioend(ioend);
-}
-
-STATIC void
-xfs_submit_ioend_bio(
- struct writeback_control *wbc,
- xfs_ioend_t *ioend,
- struct bio *bio)
-{
- atomic_inc(&ioend->io_remaining);
- bio->bi_private = ioend;
- bio->bi_end_io = xfs_end_bio;
- submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
-}
-
-STATIC struct bio *
-xfs_alloc_ioend_bio(
- struct buffer_head *bh)
-{
- struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
-
- ASSERT(bio->bi_private == NULL);
- bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
- bio->bi_bdev = bh->b_bdev;
- return bio;
-}
-
STATIC void
xfs_start_buffer_writeback(
struct buffer_head *bh)
@@ -452,28 +410,35 @@ static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
}
/*
- * Submit all of the bios for an ioend. We are only passed a single ioend at a
- * time; the caller is responsible for chaining prior to submission.
+ * Submit the bio for an ioend. We are passed an ioend with a bio attached to
+ * it, and we submit that bio. The ioend may be used for multiple bio
+ * submissions, so we only want to allocate an append transaction for the ioend
+ * once. In the case of multiple bio submission, each bio will take an IO
+ * reference to the ioend to ensure that the ioend completion is only done once
+ * all bios have been submitted and the ioend is really done.
*
* If @fail is non-zero, it means that we have a situation where some part of
* the submission process has failed after we have marked paged for writeback
- * and unlocked them. In this situation, we need to fail the ioend chain rather
- * than submit it to IO. This typically only happens on a filesystem shutdown.
+ * and unlocked them. In this situation, we need to fail the bio and ioend
+ * rather than submit it to IO. This typically only happens on a filesystem
+ * shutdown.
*/
STATIC int
xfs_submit_ioend(
struct writeback_control *wbc,
- xfs_ioend_t *ioend,
+ struct xfs_ioend *ioend,
int status)
{
- struct buffer_head *bh;
- struct bio *bio;
- sector_t lastblock = 0;
-
/* Reserve log space if we might write beyond the on-disk inode size. */
if (!status &&
- ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
+ ioend->io_type != XFS_IO_UNWRITTEN &&
+ xfs_ioend_is_append(ioend) &&
+ !ioend->io_append_trans)
status = xfs_setfilesize_trans_alloc(ioend);
+
+ ioend->io_bio->bi_private = ioend;
+ ioend->io_bio->bi_end_io = xfs_end_bio;
+
/*
* If we are failing the IO now, just mark the ioend with an
* error and finish it. This will run IO completion immediately
@@ -481,33 +446,73 @@ xfs_submit_ioend(
* time.
*/
if (status) {
- ioend->io_error = status;
- xfs_finish_ioend(ioend);
+ ioend->io_bio->bi_error = status;
+ bio_endio(ioend->io_bio);
return status;
}
- bio = NULL;
- for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+ submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
+ ioend->io_bio);
+ return 0;
+}
- if (!bio) {
-retry:
- bio = xfs_alloc_ioend_bio(bh);
- } else if (bh->b_blocknr != lastblock + 1) {
- xfs_submit_ioend_bio(wbc, ioend, bio);
- goto retry;
- }
+static void
+xfs_init_bio_from_bh(
+ struct bio *bio,
+ struct buffer_head *bh)
+{
+ bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+ bio->bi_bdev = bh->b_bdev;
+}
- if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
- xfs_submit_ioend_bio(wbc, ioend, bio);
- goto retry;
- }
+static struct xfs_ioend *
+xfs_alloc_ioend(
+ struct inode *inode,
+ unsigned int type,
+ xfs_off_t offset,
+ struct buffer_head *bh)
+{
+ struct xfs_ioend *ioend;
+ struct bio *bio;
- lastblock = bh->b_blocknr;
- }
- if (bio)
- xfs_submit_ioend_bio(wbc, ioend, bio);
- xfs_finish_ioend(ioend);
- return 0;
+ bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
+ xfs_init_bio_from_bh(bio, bh);
+
+ ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
+ INIT_LIST_HEAD(&ioend->io_list);
+ ioend->io_type = type;
+ ioend->io_inode = inode;
+ ioend->io_size = 0;
+ ioend->io_offset = offset;
+ INIT_WORK(&ioend->io_work, xfs_end_io);
+ ioend->io_append_trans = NULL;
+ ioend->io_bio = bio;
+ return ioend;
+}
+
+/*
+ * Allocate a new bio, and chain the old bio to the new one.
+ *
+ * Note that we have to do perform the chaining in this unintuitive order
+ * so that the bi_private linkage is set up in the right direction for the
+ * traversal in xfs_destroy_ioend().
+ */
+static void
+xfs_chain_bio(
+ struct xfs_ioend *ioend,
+ struct writeback_control *wbc,
+ struct buffer_head *bh)
+{
+ struct bio *new;
+
+ new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
+ xfs_init_bio_from_bh(new, bh);
+
+ bio_chain(ioend->io_bio, new);
+ bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
+ submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
+ ioend->io_bio);
+ ioend->io_bio = new;
}
/*
@@ -523,27 +528,24 @@ xfs_add_to_ioend(
struct buffer_head *bh,
xfs_off_t offset,
struct xfs_writepage_ctx *wpc,
+ struct writeback_control *wbc,
struct list_head *iolist)
{
if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
bh->b_blocknr != wpc->last_block + 1 ||
offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
- struct xfs_ioend *new;
-
if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist);
-
- new = xfs_alloc_ioend(inode, wpc->io_type);
- new->io_offset = offset;
- new->io_buffer_head = bh;
- new->io_buffer_tail = bh;
- wpc->ioend = new;
- } else {
- wpc->ioend->io_buffer_tail->b_private = bh;
- wpc->ioend->io_buffer_tail = bh;
+ wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
}
- bh->b_private = NULL;
+ /*
+ * If the buffer doesn't fit into the bio we need to allocate a new
+ * one. This shouldn't happen more than once for a given buffer.
+ */
+ while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
+ xfs_chain_bio(wpc->ioend, wbc, bh);
+
wpc->ioend->io_size += bh->b_size;
wpc->last_block = bh->b_blocknr;
xfs_start_buffer_writeback(bh);
@@ -803,7 +805,7 @@ xfs_writepage_map(
lock_buffer(bh);
if (wpc->io_type != XFS_IO_OVERWRITE)
xfs_map_at_offset(inode, bh, &wpc->imap, offset);
- xfs_add_to_ioend(inode, bh, offset, wpc, &submit_list);
+ xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
count++;
}
@@ -1391,13 +1393,10 @@ xfs_end_io_direct_write(
trace_xfs_end_io_direct_write_append(ip, offset, size);
- tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
- return error;
- }
- error = xfs_setfilesize(ip, tp, offset, size);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0,
+ &tp);
+ if (!error)
+ error = xfs_setfilesize(ip, tp, offset, size);
}
return error;
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index b4421177b68d..814aab790713 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -18,7 +18,7 @@
#ifndef __XFS_AOPS_H__
#define __XFS_AOPS_H__
-extern mempool_t *xfs_ioend_pool;
+extern struct bio_set *xfs_ioend_bioset;
/*
* Types of I/O for bmap clustering and I/O completion tracking.
@@ -37,22 +37,19 @@ enum {
{ XFS_IO_OVERWRITE, "overwrite" }
/*
- * xfs_ioend struct manages large extent writes for XFS.
- * It can manage several multi-page bio's at once.
+ * Structure for buffered I/O completions.
*/
-typedef struct xfs_ioend {
+struct xfs_ioend {
struct list_head io_list; /* next ioend in chain */
unsigned int io_type; /* delalloc / unwritten */
- int io_error; /* I/O error code */
- atomic_t io_remaining; /* hold count */
struct inode *io_inode; /* file being written to */
- struct buffer_head *io_buffer_head;/* buffer linked list head */
- struct buffer_head *io_buffer_tail;/* buffer linked list tail */
size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */
struct work_struct io_work; /* xfsdatad work queue */
struct xfs_trans *io_append_trans;/* xact. for size update */
-} xfs_ioend_t;
+ struct bio *io_bio; /* bio being built */
+ struct bio io_inline_bio; /* MUST BE LAST! */
+};
extern const struct address_space_operations xfs_address_space_operations;
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index dd4824589470..e3da5d448bcf 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -112,8 +112,9 @@ typedef struct attrlist_cursor_kern {
*========================================================================*/
+/* Return 0 on success, or -errno; other state communicated via *context */
typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
- unsigned char *, int, int, unsigned char *);
+ unsigned char *, int, int);
typedef struct xfs_attr_list_context {
struct xfs_inode *dp; /* inode */
@@ -126,7 +127,6 @@ typedef struct xfs_attr_list_context {
int firstu; /* first used byte in buffer */
int flags; /* from VOP call */
int resynch; /* T/F: resynch with cursor */
- int put_value; /* T/F: need value for listent */
put_listent_func_t put_listent; /* list output fmt function */
int index; /* index into output buffer */
} xfs_attr_list_context_t;
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 2bb959ada45b..55d214981ed2 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -405,21 +405,11 @@ xfs_attr_inactive(
goto out_destroy_fork;
xfs_iunlock(dp, lock_mode);
- /*
- * Start our first transaction of the day.
- *
- * All future transactions during this code must be "chained" off
- * this one via the trans_dup() call. All transactions will contain
- * the inode, and the inode will always be marked with trans_ihold().
- * Since the inode will be locked in all transactions, we must log
- * the inode in every transaction to let it float upward through
- * the log.
- */
lock_mode = 0;
- trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
- error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_attrinval, 0, 0, 0, &trans);
if (error)
- goto out_cancel;
+ goto out_destroy_fork;
lock_mode = XFS_ILOCK_EXCL;
xfs_ilock(dp, lock_mode);
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 4fa14820e2e2..d25f26b22ac9 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -106,18 +106,15 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
sfe->flags,
sfe->nameval,
(int)sfe->namelen,
- (int)sfe->valuelen,
- &sfe->nameval[sfe->namelen]);
-
+ (int)sfe->valuelen);
+ if (error)
+ return error;
/*
* Either search callback finished early or
* didn't fit it all in the buffer after all.
*/
if (context->seen_enough)
break;
-
- if (error)
- return error;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
}
trace_xfs_attr_list_sf_all(context);
@@ -200,8 +197,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
sbp->flags,
sbp->name,
sbp->namelen,
- sbp->valuelen,
- &sbp->name[sbp->namelen]);
+ sbp->valuelen);
if (error) {
kmem_free(sbuf);
return error;
@@ -416,6 +412,9 @@ xfs_attr3_leaf_list_int(
*/
retval = 0;
for (; i < ichdr.count; entry++, i++) {
+ char *name;
+ int namelen, valuelen;
+
if (be32_to_cpu(entry->hashval) != cursor->hashval) {
cursor->hashval = be32_to_cpu(entry->hashval);
cursor->offset = 0;
@@ -425,56 +424,25 @@ xfs_attr3_leaf_list_int(
continue; /* skip incomplete entries */
if (entry->flags & XFS_ATTR_LOCAL) {
- xfs_attr_leaf_name_local_t *name_loc =
- xfs_attr3_leaf_name_local(leaf, i);
-
- retval = context->put_listent(context,
- entry->flags,
- name_loc->nameval,
- (int)name_loc->namelen,
- be16_to_cpu(name_loc->valuelen),
- &name_loc->nameval[name_loc->namelen]);
- if (retval)
- return retval;
+ xfs_attr_leaf_name_local_t *name_loc;
+
+ name_loc = xfs_attr3_leaf_name_local(leaf, i);
+ name = name_loc->nameval;
+ namelen = name_loc->namelen;
+ valuelen = be16_to_cpu(name_loc->valuelen);
} else {
- xfs_attr_leaf_name_remote_t *name_rmt =
- xfs_attr3_leaf_name_remote(leaf, i);
-
- int valuelen = be32_to_cpu(name_rmt->valuelen);
-
- if (context->put_value) {
- xfs_da_args_t args;
-
- memset((char *)&args, 0, sizeof(args));
- args.geo = context->dp->i_mount->m_attr_geo;
- args.dp = context->dp;
- args.whichfork = XFS_ATTR_FORK;
- args.valuelen = valuelen;
- args.rmtvaluelen = valuelen;
- args.value = kmem_alloc(valuelen, KM_SLEEP | KM_NOFS);
- args.rmtblkno = be32_to_cpu(name_rmt->valueblk);
- args.rmtblkcnt = xfs_attr3_rmt_blocks(
- args.dp->i_mount, valuelen);
- retval = xfs_attr_rmtval_get(&args);
- if (!retval)
- retval = context->put_listent(context,
- entry->flags,
- name_rmt->name,
- (int)name_rmt->namelen,
- valuelen,
- args.value);
- kmem_free(args.value);
- } else {
- retval = context->put_listent(context,
- entry->flags,
- name_rmt->name,
- (int)name_rmt->namelen,
- valuelen,
- NULL);
- }
- if (retval)
- return retval;
+ xfs_attr_leaf_name_remote_t *name_rmt;
+
+ name_rmt = xfs_attr3_leaf_name_remote(leaf, i);
+ name = name_rmt->name;
+ namelen = name_rmt->namelen;
+ valuelen = be32_to_cpu(name_rmt->valuelen);
}
+
+ retval = context->put_listent(context, entry->flags,
+ name, namelen, valuelen);
+ if (retval)
+ break;
if (context->seen_enough)
break;
cursor->offset++;
@@ -551,8 +519,7 @@ xfs_attr_put_listent(
int flags,
unsigned char *name,
int namelen,
- int valuelen,
- unsigned char *value)
+ int valuelen)
{
struct attrlist *alist = (struct attrlist *)context->alist;
attrlist_ent_t *aep;
@@ -581,7 +548,7 @@ xfs_attr_put_listent(
trace_xfs_attr_list_full(context);
alist->al_more = 1;
context->seen_enough = 1;
- return 1;
+ return 0;
}
aep = (attrlist_ent_t *)&context->alist[context->firstu];
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 3b6309865c65..586bb64e674b 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -72,18 +72,11 @@ xfs_zero_extent(
struct xfs_mount *mp = ip->i_mount;
xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
sector_t block = XFS_BB_TO_FSBT(mp, sector);
- ssize_t size = XFS_FSB_TO_B(mp, count_fsb);
-
- if (IS_DAX(VFS_I(ip)))
- return dax_clear_sectors(xfs_find_bdev_for_inode(VFS_I(ip)),
- sector, size);
-
- /*
- * let the block layer decide on the fastest method of
- * implementing the zeroing.
- */
- return sb_issue_zeroout(mp->m_super, block, count_fsb, GFP_NOFS);
+ return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
+ block << (mp->m_super->s_blocksize_bits - 9),
+ count_fsb << (mp->m_super->s_blocksize_bits - 9),
+ GFP_NOFS, true);
}
/*
@@ -900,19 +893,15 @@ xfs_free_eofblocks(
* Free them up now by truncating the file to
* its current size.
*/
- tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-
if (need_iolock) {
- if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
- xfs_trans_cancel(tp);
+ if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
return -EAGAIN;
- }
}
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
+ &tp);
if (error) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
- xfs_trans_cancel(tp);
if (need_iolock)
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return error;
@@ -1037,9 +1026,9 @@ xfs_alloc_file_space(
/*
* Allocate and setup the transaction.
*/
- tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
- resblks, resrtextents);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
+ resrtextents, 0, &tp);
+
/*
* Check for running out of space
*/
@@ -1048,7 +1037,6 @@ xfs_alloc_file_space(
* Free the transaction structure.
*/
ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
- xfs_trans_cancel(tp);
break;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -1311,18 +1299,10 @@ xfs_free_file_space(
* transaction to dip into the reserve blocks to ensure
* the freeing of the space succeeds at ENOSPC.
*/
- tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write, resblks, 0);
-
- /*
- * check for running out of space
- */
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
+ &tp);
if (error) {
- /*
- * Free the transaction structure.
- */
ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
- xfs_trans_cancel(tp);
break;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -1482,19 +1462,16 @@ xfs_shift_file_space(
}
while (!error && !done) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
/*
* We would need to reserve permanent block for transaction.
* This will come into picture when after shifting extent into
* hole we found that adjacent extents can be merged which
* may lead to freeing of a block during record update.
*/
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
- XFS_DIOSTRAT_SPACE_RES(mp, 0), 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
+ XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
+ if (error)
break;
- }
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
@@ -1747,12 +1724,9 @@ xfs_swap_extents(
if (error)
goto out_unlock;
- tp = xfs_trans_alloc(mp, XFS_TRANS_SWAPEXT);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+ if (error)
goto out_unlock;
- }
/*
* Lock and join the inodes to the tansaction so that transaction commit
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 9a2191b91137..e71cfbd5acb3 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1100,22 +1100,18 @@ xfs_bwrite(
return error;
}
-STATIC void
+static void
xfs_buf_bio_end_io(
struct bio *bio)
{
- xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
+ struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
/*
* don't overwrite existing errors - otherwise we can lose errors on
* buffers that require multiple bios to complete.
*/
- if (bio->bi_error) {
- spin_lock(&bp->b_lock);
- if (!bp->b_io_error)
- bp->b_io_error = bio->bi_error;
- spin_unlock(&bp->b_lock);
- }
+ if (bio->bi_error)
+ cmpxchg(&bp->b_io_error, 0, bio->bi_error);
if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 4eb89bd4ee73..8bfb974f0772 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -183,6 +183,26 @@ typedef struct xfs_buf {
unsigned int b_page_count; /* size of page array */
unsigned int b_offset; /* page offset in first page */
int b_error; /* error code on I/O */
+
+ /*
+ * async write failure retry count. Initialised to zero on the first
+ * failure, then when it exceeds the maximum configured without a
+ * success the write is considered to be failed permanently and the
+ * iodone handler will take appropriate action.
+ *
+ * For retry timeouts, we record the jiffie of the first failure. This
+ * means that we can change the retry timeout for buffers already under
+ * I/O and thus avoid getting stuck in a retry loop with a long timeout.
+ *
+ * last_error is used to ensure that we are getting repeated errors, not
+ * different errors. e.g. a block device might change ENOSPC to EIO when
+ * a failure timeout occurs, so we want to re-initialise the error
+ * retry behaviour appropriately when that happens.
+ */
+ int b_retries;
+ unsigned long b_first_retry_time; /* in jiffies */
+ int b_last_error;
+
const struct xfs_buf_ops *b_ops;
#ifdef XFS_BUF_LOCK_TRACKING
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 99e91a0e554e..34257992934c 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1042,35 +1042,22 @@ xfs_buf_do_callbacks(
}
}
-/*
- * This is the iodone() function for buffers which have had callbacks
- * attached to them by xfs_buf_attach_iodone(). It should remove each
- * log item from the buffer's list and call the callback of each in turn.
- * When done, the buffer's fsprivate field is set to NULL and the buffer
- * is unlocked with a call to iodone().
- */
-void
-xfs_buf_iodone_callbacks(
+static bool
+xfs_buf_iodone_callback_error(
struct xfs_buf *bp)
{
struct xfs_log_item *lip = bp->b_fspriv;
struct xfs_mount *mp = lip->li_mountp;
static ulong lasttime;
static xfs_buftarg_t *lasttarg;
-
- if (likely(!bp->b_error))
- goto do_callbacks;
+ struct xfs_error_cfg *cfg;
/*
* If we've already decided to shutdown the filesystem because of
* I/O errors, there's no point in giving this a retry.
*/
- if (XFS_FORCED_SHUTDOWN(mp)) {
- xfs_buf_stale(bp);
- bp->b_flags |= XBF_DONE;
- trace_xfs_buf_item_iodone(bp, _RET_IP_);
- goto do_callbacks;
- }
+ if (XFS_FORCED_SHUTDOWN(mp))
+ goto out_stale;
if (bp->b_target != lasttarg ||
time_after(jiffies, (lasttime + 5*HZ))) {
@@ -1079,45 +1066,93 @@ xfs_buf_iodone_callbacks(
}
lasttarg = bp->b_target;
+ /* synchronous writes will have callers process the error */
+ if (!(bp->b_flags & XBF_ASYNC))
+ goto out_stale;
+
+ trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
+ ASSERT(bp->b_iodone != NULL);
+
/*
* If the write was asynchronous then no one will be looking for the
- * error. Clear the error state and write the buffer out again.
- *
- * XXX: This helps against transient write errors, but we need to find
- * a way to shut the filesystem down if the writes keep failing.
- *
- * In practice we'll shut the filesystem down soon as non-transient
- * errors tend to affect the whole device and a failing log write
- * will make us give up. But we really ought to do better here.
+ * error. If this is the first failure of this type, clear the error
+ * state and write the buffer out again. This means we always retry an
+ * async write failure at least once, but we also need to set the buffer
+ * up to behave correctly now for repeated failures.
*/
- if (bp->b_flags & XBF_ASYNC) {
- ASSERT(bp->b_iodone != NULL);
+ if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL)) ||
+ bp->b_last_error != bp->b_error) {
+ bp->b_flags |= (XBF_WRITE | XBF_ASYNC |
+ XBF_DONE | XBF_WRITE_FAIL);
+ bp->b_last_error = bp->b_error;
+ bp->b_retries = 0;
+ bp->b_first_retry_time = jiffies;
+
+ xfs_buf_ioerror(bp, 0);
+ xfs_buf_submit(bp);
+ return true;
+ }
- trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
+ /*
+ * Repeated failure on an async write. Take action according to the
+ * error configuration we have been set up to use.
+ */
+ cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
- xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
+ if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
+ ++bp->b_retries > cfg->max_retries)
+ goto permanent_error;
+ if (cfg->retry_timeout &&
+ time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
+ goto permanent_error;
- if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
- bp->b_flags |= XBF_WRITE | XBF_ASYNC |
- XBF_DONE | XBF_WRITE_FAIL;
- xfs_buf_submit(bp);
- } else {
- xfs_buf_relse(bp);
- }
+ /* At unmount we may treat errors differently */
+ if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
+ goto permanent_error;
- return;
- }
+ /* still a transient error, higher layers will retry */
+ xfs_buf_ioerror(bp, 0);
+ xfs_buf_relse(bp);
+ return true;
/*
- * If the write of the buffer was synchronous, we want to make
- * sure to return the error to the caller of xfs_bwrite().
+ * Permanent error - we need to trigger a shutdown if we haven't already
+ * to indicate that inconsistency will result from this action.
*/
+permanent_error:
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+out_stale:
xfs_buf_stale(bp);
bp->b_flags |= XBF_DONE;
-
trace_xfs_buf_error_relse(bp, _RET_IP_);
+ return false;
+}
+
+/*
+ * This is the iodone() function for buffers which have had callbacks attached
+ * to them by xfs_buf_attach_iodone(). We need to iterate the items on the
+ * callback list, mark the buffer as having no more callbacks and then push the
+ * buffer through IO completion processing.
+ */
+void
+xfs_buf_iodone_callbacks(
+ struct xfs_buf *bp)
+{
+ /*
+ * If there is an error, process it. Some errors require us
+ * to run callbacks after failure processing is done so we
+ * detect that and take appropriate action.
+ */
+ if (bp->b_error && xfs_buf_iodone_callback_error(bp))
+ return;
+
+ /*
+ * Successful IO or permanent error. Either way, we can clear the
+ * retry state here in preparation for the next error that may occur.
+ */
+ bp->b_last_error = 0;
+ bp->b_retries = 0;
-do_callbacks:
xfs_buf_do_callbacks(bp);
bp->b_fspriv = NULL;
bp->b_iodone = NULL;
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 316b2a1bdba5..e0646659ce16 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -614,11 +614,10 @@ xfs_qm_dqread(
trace_xfs_dqread(dqp);
if (flags & XFS_QMOPT_DQALLOC) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc,
- XFS_QM_DQALLOC_SPACE_RES(mp), 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
+ XFS_QM_DQALLOC_SPACE_RES(mp), 0, 0, &tp);
if (error)
- goto error1;
+ goto error0;
}
/*
@@ -692,7 +691,7 @@ error0:
* end of the chunk, skip ahead to first id in next allocated chunk
* using the SEEK_DATA interface.
*/
-int
+static int
xfs_dq_get_next_id(
xfs_mount_t *mp,
uint type,
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 85ce3032f815..47fc63295422 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -145,12 +145,10 @@ xfs_update_prealloc_flags(
struct xfs_trans *tp;
int error;
- tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
- error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
+ 0, 0, 0, &tp);
+ if (error)
return error;
- }
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
@@ -1553,7 +1551,7 @@ xfs_filemap_page_mkwrite(
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) {
- ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault, NULL);
+ ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault);
} else {
ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
ret = block_page_mkwrite_return(ret);
@@ -1587,7 +1585,7 @@ xfs_filemap_fault(
* changes to xfs_get_blocks_direct() to map unwritten extent
* ioend for conversion on read-only mappings.
*/
- ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault, NULL);
+ ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault);
} else
ret = filemap_fault(vma, vmf);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
@@ -1624,8 +1622,7 @@ xfs_filemap_pmd_fault(
}
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault,
- NULL);
+ ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (flags & FAULT_FLAG_WRITE)
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index ee3aaa0a5317..b4d75825ae37 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -198,14 +198,10 @@ xfs_growfs_data_private(
return error;
}
- tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
- tp->t_flags |= XFS_TRANS_RESERVE;
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growdata,
- XFS_GROWFS_SPACE_RES(mp), 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
+ XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
+ if (error)
return error;
- }
/*
* Write new AG headers to disk. Non-transactional, but written
@@ -243,8 +239,8 @@ xfs_growfs_data_private(
agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
- agf->agf_flfirst = 0;
- agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
+ agf->agf_flfirst = cpu_to_be32(1);
+ agf->agf_fllast = 0;
agf->agf_flcount = 0;
tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
agf->agf_freeblks = cpu_to_be32(tmpsize);
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index bf2d60749278..99ee6eee5e0b 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -37,9 +37,6 @@
#include <linux/kthread.h>
#include <linux/freezer.h>
-STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
- struct xfs_perag *pag, struct xfs_inode *ip);
-
/*
* Allocate and initialise an xfs_inode.
*/
@@ -94,13 +91,6 @@ xfs_inode_free_callback(
struct inode *inode = container_of(head, struct inode, i_rcu);
struct xfs_inode *ip = XFS_I(inode);
- kmem_zone_free(xfs_inode_zone, ip);
-}
-
-void
-xfs_inode_free(
- struct xfs_inode *ip)
-{
switch (VFS_I(ip)->i_mode & S_IFMT) {
case S_IFREG:
case S_IFDIR:
@@ -118,6 +108,25 @@ xfs_inode_free(
ip->i_itemp = NULL;
}
+ kmem_zone_free(xfs_inode_zone, ip);
+}
+
+static void
+__xfs_inode_free(
+ struct xfs_inode *ip)
+{
+ /* asserts to verify all state is correct here */
+ ASSERT(atomic_read(&ip->i_pincount) == 0);
+ ASSERT(!xfs_isiflocked(ip));
+ XFS_STATS_DEC(ip->i_mount, vn_active);
+
+ call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
+}
+
+void
+xfs_inode_free(
+ struct xfs_inode *ip)
+{
/*
* Because we use RCU freeing we need to ensure the inode always
* appears to be reclaimed with an invalid inode number when in the
@@ -129,12 +138,123 @@ xfs_inode_free(
ip->i_ino = 0;
spin_unlock(&ip->i_flags_lock);
- /* asserts to verify all state is correct here */
- ASSERT(atomic_read(&ip->i_pincount) == 0);
- ASSERT(!xfs_isiflocked(ip));
- XFS_STATS_DEC(ip->i_mount, vn_active);
+ __xfs_inode_free(ip);
+}
- call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
+/*
+ * Queue a new inode reclaim pass if there are reclaimable inodes and there
+ * isn't a reclaim pass already in progress. By default it runs every 5s based
+ * on the xfs periodic sync default of 30s. Perhaps this should have it's own
+ * tunable, but that can be done if this method proves to be ineffective or too
+ * aggressive.
+ */
+static void
+xfs_reclaim_work_queue(
+ struct xfs_mount *mp)
+{
+
+ rcu_read_lock();
+ if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
+ queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
+ msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
+ }
+ rcu_read_unlock();
+}
+
+/*
+ * This is a fast pass over the inode cache to try to get reclaim moving on as
+ * many inodes as possible in a short period of time. It kicks itself every few
+ * seconds, as well as being kicked by the inode cache shrinker when memory
+ * goes low. It scans as quickly as possible avoiding locked inodes or those
+ * already being flushed, and once done schedules a future pass.
+ */
+void
+xfs_reclaim_worker(
+ struct work_struct *work)
+{
+ struct xfs_mount *mp = container_of(to_delayed_work(work),
+ struct xfs_mount, m_reclaim_work);
+
+ xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
+ xfs_reclaim_work_queue(mp);
+}
+
+static void
+xfs_perag_set_reclaim_tag(
+ struct xfs_perag *pag)
+{
+ struct xfs_mount *mp = pag->pag_mount;
+
+ ASSERT(spin_is_locked(&pag->pag_ici_lock));
+ if (pag->pag_ici_reclaimable++)
+ return;
+
+ /* propagate the reclaim tag up into the perag radix tree */
+ spin_lock(&mp->m_perag_lock);
+ radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
+ XFS_ICI_RECLAIM_TAG);
+ spin_unlock(&mp->m_perag_lock);
+
+ /* schedule periodic background inode reclaim */
+ xfs_reclaim_work_queue(mp);
+
+ trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
+}
+
+static void
+xfs_perag_clear_reclaim_tag(
+ struct xfs_perag *pag)
+{
+ struct xfs_mount *mp = pag->pag_mount;
+
+ ASSERT(spin_is_locked(&pag->pag_ici_lock));
+ if (--pag->pag_ici_reclaimable)
+ return;
+
+ /* clear the reclaim tag from the perag radix tree */
+ spin_lock(&mp->m_perag_lock);
+ radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
+ XFS_ICI_RECLAIM_TAG);
+ spin_unlock(&mp->m_perag_lock);
+ trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
+}
+
+
+/*
+ * We set the inode flag atomically with the radix tree tag.
+ * Once we get tag lookups on the radix tree, this inode flag
+ * can go away.
+ */
+void
+xfs_inode_set_reclaim_tag(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
+ spin_lock(&pag->pag_ici_lock);
+ spin_lock(&ip->i_flags_lock);
+
+ radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
+ XFS_ICI_RECLAIM_TAG);
+ xfs_perag_set_reclaim_tag(pag);
+ __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
+
+ spin_unlock(&ip->i_flags_lock);
+ spin_unlock(&pag->pag_ici_lock);
+ xfs_perag_put(pag);
+}
+
+STATIC void
+xfs_inode_clear_reclaim_tag(
+ struct xfs_perag *pag,
+ xfs_ino_t ino)
+{
+ radix_tree_tag_clear(&pag->pag_ici_root,
+ XFS_INO_TO_AGINO(pag->pag_mount, ino),
+ XFS_ICI_RECLAIM_TAG);
+ xfs_perag_clear_reclaim_tag(pag);
}
/*
@@ -264,7 +384,7 @@ xfs_iget_cache_hit(
*/
ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
ip->i_flags |= XFS_INEW;
- __xfs_inode_clear_reclaim_tag(mp, pag, ip);
+ xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
inode->i_state = I_NEW;
ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
@@ -723,121 +843,6 @@ xfs_inode_ag_iterator_tag(
}
/*
- * Queue a new inode reclaim pass if there are reclaimable inodes and there
- * isn't a reclaim pass already in progress. By default it runs every 5s based
- * on the xfs periodic sync default of 30s. Perhaps this should have it's own
- * tunable, but that can be done if this method proves to be ineffective or too
- * aggressive.
- */
-static void
-xfs_reclaim_work_queue(
- struct xfs_mount *mp)
-{
-
- rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
- queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
- msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
- }
- rcu_read_unlock();
-}
-
-/*
- * This is a fast pass over the inode cache to try to get reclaim moving on as
- * many inodes as possible in a short period of time. It kicks itself every few
- * seconds, as well as being kicked by the inode cache shrinker when memory
- * goes low. It scans as quickly as possible avoiding locked inodes or those
- * already being flushed, and once done schedules a future pass.
- */
-void
-xfs_reclaim_worker(
- struct work_struct *work)
-{
- struct xfs_mount *mp = container_of(to_delayed_work(work),
- struct xfs_mount, m_reclaim_work);
-
- xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
- xfs_reclaim_work_queue(mp);
-}
-
-static void
-__xfs_inode_set_reclaim_tag(
- struct xfs_perag *pag,
- struct xfs_inode *ip)
-{
- radix_tree_tag_set(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
- XFS_ICI_RECLAIM_TAG);
-
- if (!pag->pag_ici_reclaimable) {
- /* propagate the reclaim tag up into the perag radix tree */
- spin_lock(&ip->i_mount->m_perag_lock);
- radix_tree_tag_set(&ip->i_mount->m_perag_tree,
- XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
- XFS_ICI_RECLAIM_TAG);
- spin_unlock(&ip->i_mount->m_perag_lock);
-
- /* schedule periodic background inode reclaim */
- xfs_reclaim_work_queue(ip->i_mount);
-
- trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
- -1, _RET_IP_);
- }
- pag->pag_ici_reclaimable++;
-}
-
-/*
- * We set the inode flag atomically with the radix tree tag.
- * Once we get tag lookups on the radix tree, this inode flag
- * can go away.
- */
-void
-xfs_inode_set_reclaim_tag(
- xfs_inode_t *ip)
-{
- struct xfs_mount *mp = ip->i_mount;
- struct xfs_perag *pag;
-
- pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
- spin_lock(&pag->pag_ici_lock);
- spin_lock(&ip->i_flags_lock);
- __xfs_inode_set_reclaim_tag(pag, ip);
- __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
- spin_unlock(&ip->i_flags_lock);
- spin_unlock(&pag->pag_ici_lock);
- xfs_perag_put(pag);
-}
-
-STATIC void
-__xfs_inode_clear_reclaim(
- xfs_perag_t *pag,
- xfs_inode_t *ip)
-{
- pag->pag_ici_reclaimable--;
- if (!pag->pag_ici_reclaimable) {
- /* clear the reclaim tag from the perag radix tree */
- spin_lock(&ip->i_mount->m_perag_lock);
- radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
- XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
- XFS_ICI_RECLAIM_TAG);
- spin_unlock(&ip->i_mount->m_perag_lock);
- trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
- -1, _RET_IP_);
- }
-}
-
-STATIC void
-__xfs_inode_clear_reclaim_tag(
- xfs_mount_t *mp,
- xfs_perag_t *pag,
- xfs_inode_t *ip)
-{
- radix_tree_tag_clear(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
- __xfs_inode_clear_reclaim(pag, ip);
-}
-
-/*
* Grab the inode for reclaim exclusively.
* Return 0 if we grabbed it, non-zero otherwise.
*/
@@ -929,6 +934,7 @@ xfs_reclaim_inode(
int sync_mode)
{
struct xfs_buf *bp = NULL;
+ xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
int error;
restart:
@@ -993,6 +999,22 @@ restart:
xfs_iflock(ip);
reclaim:
+ /*
+ * Because we use RCU freeing we need to ensure the inode always appears
+ * to be reclaimed with an invalid inode number when in the free state.
+ * We do this as early as possible under the ILOCK and flush lock so
+ * that xfs_iflush_cluster() can be guaranteed to detect races with us
+ * here. By doing this, we guarantee that once xfs_iflush_cluster has
+ * locked both the XFS_ILOCK and the flush lock that it will see either
+ * a valid, flushable inode that will serialise correctly against the
+ * locks below, or it will see a clean (and invalid) inode that it can
+ * skip.
+ */
+ spin_lock(&ip->i_flags_lock);
+ ip->i_flags = XFS_IRECLAIM;
+ ip->i_ino = 0;
+ spin_unlock(&ip->i_flags_lock);
+
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1006,9 +1028,9 @@ reclaim:
*/
spin_lock(&pag->pag_ici_lock);
if (!radix_tree_delete(&pag->pag_ici_root,
- XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
+ XFS_INO_TO_AGINO(ip->i_mount, ino)))
ASSERT(0);
- __xfs_inode_clear_reclaim(pag, ip);
+ xfs_perag_clear_reclaim_tag(pag);
spin_unlock(&pag->pag_ici_lock);
/*
@@ -1023,7 +1045,7 @@ reclaim:
xfs_qm_dqdetach(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_inode_free(ip);
+ __xfs_inode_free(ip);
return error;
out_ifunlock:
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 96f606deee31..ee6799e0476f 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1030,7 +1030,7 @@ xfs_dir_ialloc(
tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
}
- code = xfs_trans_roll(&tp, 0);
+ code = xfs_trans_roll(&tp, NULL);
if (committed != NULL)
*committed = 1;
@@ -1161,11 +1161,9 @@ xfs_create(
rdev = 0;
resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
tres = &M_RES(mp)->tr_mkdir;
- tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
} else {
resblks = XFS_CREATE_SPACE_RES(mp, name->len);
tres = &M_RES(mp)->tr_create;
- tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
}
/*
@@ -1174,20 +1172,19 @@ xfs_create(
* the case we'll drop the one we have and get a more
* appropriate transaction later.
*/
- error = xfs_trans_reserve(tp, tres, resblks, 0);
+ error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
if (error == -ENOSPC) {
/* flush outstanding delalloc blocks and retry */
xfs_flush_inodes(mp);
- error = xfs_trans_reserve(tp, tres, resblks, 0);
+ error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
}
if (error == -ENOSPC) {
/* No space at all so try a "no-allocation" reservation */
resblks = 0;
- error = xfs_trans_reserve(tp, tres, 0, 0);
+ error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
}
if (error)
- goto out_trans_cancel;
-
+ goto out_release_inode;
xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
@@ -1337,17 +1334,16 @@ xfs_create_tmpfile(
return error;
resblks = XFS_IALLOC_SPACE_RES(mp);
- tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE_TMPFILE);
-
tres = &M_RES(mp)->tr_create_tmpfile;
- error = xfs_trans_reserve(tp, tres, resblks, 0);
+
+ error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
if (error == -ENOSPC) {
/* No space at all so try a "no-allocation" reservation */
resblks = 0;
- error = xfs_trans_reserve(tp, tres, 0, 0);
+ error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp);
}
if (error)
- goto out_trans_cancel;
+ goto out_release_inode;
error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
pdqp, resblks, 1, 0);
@@ -1432,15 +1428,14 @@ xfs_link(
if (error)
goto std_return;
- tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, resblks, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
if (error == -ENOSPC) {
resblks = 0;
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_link, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
}
if (error)
- goto error_return;
+ goto std_return;
xfs_ilock(tdp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
@@ -1710,11 +1705,9 @@ xfs_inactive_truncate(
struct xfs_trans *tp;
int error;
- tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
- xfs_trans_cancel(tp);
return error;
}
@@ -1764,8 +1757,6 @@ xfs_inactive_ifree(
struct xfs_trans *tp;
int error;
- tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
-
/*
* The ifree transaction might need to allocate blocks for record
* insertion to the finobt. We don't want to fail here at ENOSPC, so
@@ -1781,9 +1772,8 @@ xfs_inactive_ifree(
* now remains allocated and sits on the unlinked list until the fs is
* repaired.
*/
- tp->t_flags |= XFS_TRANS_RESERVE;
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ifree,
- XFS_IFREE_SPACE_RES(mp), 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
+ XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
if (error) {
if (error == -ENOSPC) {
xfs_warn_ratelimited(mp,
@@ -1792,7 +1782,6 @@ xfs_inactive_ifree(
} else {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
}
- xfs_trans_cancel(tp);
return error;
}
@@ -2525,11 +2514,6 @@ xfs_remove(
if (error)
goto std_return;
- if (is_dir)
- tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
- else
- tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
-
/*
* We try to get the real space reservation first,
* allowing for directory btree deletion(s) implying
@@ -2540,14 +2524,15 @@ xfs_remove(
* block from the directory.
*/
resblks = XFS_REMOVE_SPACE_RES(mp);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, resblks, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
if (error == -ENOSPC) {
resblks = 0;
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_remove, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
+ &tp);
}
if (error) {
ASSERT(error != -ENOSPC);
- goto out_trans_cancel;
+ goto std_return;
}
xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
@@ -2855,6 +2840,7 @@ xfs_rename_alloc_whiteout(
* and flag it as linkable.
*/
drop_nlink(VFS_I(tmpfile));
+ xfs_setup_iops(tmpfile);
xfs_finish_inode_setup(tmpfile);
VFS_I(tmpfile)->i_state |= I_LINKABLE;
@@ -2910,15 +2896,15 @@ xfs_rename(
xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
inodes, &num_inodes);
- tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, spaceres, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
if (error == -ENOSPC) {
spaceres = 0;
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_rename, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
+ &tp);
}
if (error)
- goto out_trans_cancel;
+ goto out_release_wip;
/*
* Attach the dquots to the inodes
@@ -3155,6 +3141,7 @@ out_bmap_cancel:
xfs_bmap_cancel(&free_list);
out_trans_cancel:
xfs_trans_cancel(tp);
+out_release_wip:
if (wip)
IRELE(wip);
return error;
@@ -3162,16 +3149,16 @@ out_trans_cancel:
STATIC int
xfs_iflush_cluster(
- xfs_inode_t *ip,
- xfs_buf_t *bp)
+ struct xfs_inode *ip,
+ struct xfs_buf *bp)
{
- xfs_mount_t *mp = ip->i_mount;
+ struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
unsigned long first_index, mask;
unsigned long inodes_per_cluster;
- int ilist_size;
- xfs_inode_t **ilist;
- xfs_inode_t *iq;
+ int cilist_size;
+ struct xfs_inode **cilist;
+ struct xfs_inode *cip;
int nr_found;
int clcount = 0;
int bufwasdelwri;
@@ -3180,23 +3167,23 @@ xfs_iflush_cluster(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
- ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
- ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
- if (!ilist)
+ cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
+ cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
+ if (!cilist)
goto out_put;
mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
rcu_read_lock();
/* really need a gang lookup range call here */
- nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
+ nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
first_index, inodes_per_cluster);
if (nr_found == 0)
goto out_free;
for (i = 0; i < nr_found; i++) {
- iq = ilist[i];
- if (iq == ip)
+ cip = cilist[i];
+ if (cip == ip)
continue;
/*
@@ -3205,20 +3192,30 @@ xfs_iflush_cluster(
* We need to check under the i_flags_lock for a valid inode
* here. Skip it if it is not valid or the wrong inode.
*/
- spin_lock(&ip->i_flags_lock);
- if (!ip->i_ino ||
- (XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
- spin_unlock(&ip->i_flags_lock);
+ spin_lock(&cip->i_flags_lock);
+ if (!cip->i_ino ||
+ __xfs_iflags_test(cip, XFS_ISTALE)) {
+ spin_unlock(&cip->i_flags_lock);
continue;
}
- spin_unlock(&ip->i_flags_lock);
+
+ /*
+ * Once we fall off the end of the cluster, no point checking
+ * any more inodes in the list because they will also all be
+ * outside the cluster.
+ */
+ if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
+ spin_unlock(&cip->i_flags_lock);
+ break;
+ }
+ spin_unlock(&cip->i_flags_lock);
/*
* Do an un-protected check to see if the inode is dirty and
* is a candidate for flushing. These checks will be repeated
* later after the appropriate locks are acquired.
*/
- if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
+ if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
continue;
/*
@@ -3226,15 +3223,28 @@ xfs_iflush_cluster(
* then this inode cannot be flushed and is skipped.
*/
- if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
+ if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
+ continue;
+ if (!xfs_iflock_nowait(cip)) {
+ xfs_iunlock(cip, XFS_ILOCK_SHARED);
continue;
- if (!xfs_iflock_nowait(iq)) {
- xfs_iunlock(iq, XFS_ILOCK_SHARED);
+ }
+ if (xfs_ipincount(cip)) {
+ xfs_ifunlock(cip);
+ xfs_iunlock(cip, XFS_ILOCK_SHARED);
continue;
}
- if (xfs_ipincount(iq)) {
- xfs_ifunlock(iq);
- xfs_iunlock(iq, XFS_ILOCK_SHARED);
+
+
+ /*
+ * Check the inode number again, just to be certain we are not
+ * racing with freeing in xfs_reclaim_inode(). See the comments
+ * in that function for more information as to why the initial
+ * check is not sufficient.
+ */
+ if (!cip->i_ino) {
+ xfs_ifunlock(cip);
+ xfs_iunlock(cip, XFS_ILOCK_SHARED);
continue;
}
@@ -3242,18 +3252,18 @@ xfs_iflush_cluster(
* arriving here means that this inode can be flushed. First
* re-check that it's dirty before flushing.
*/
- if (!xfs_inode_clean(iq)) {
+ if (!xfs_inode_clean(cip)) {
int error;
- error = xfs_iflush_int(iq, bp);
+ error = xfs_iflush_int(cip, bp);
if (error) {
- xfs_iunlock(iq, XFS_ILOCK_SHARED);
+ xfs_iunlock(cip, XFS_ILOCK_SHARED);
goto cluster_corrupt_out;
}
clcount++;
} else {
- xfs_ifunlock(iq);
+ xfs_ifunlock(cip);
}
- xfs_iunlock(iq, XFS_ILOCK_SHARED);
+ xfs_iunlock(cip, XFS_ILOCK_SHARED);
}
if (clcount) {
@@ -3263,7 +3273,7 @@ xfs_iflush_cluster(
out_free:
rcu_read_unlock();
- kmem_free(ilist);
+ kmem_free(cilist);
out_put:
xfs_perag_put(pag);
return 0;
@@ -3306,8 +3316,8 @@ cluster_corrupt_out:
/*
* Unlocks the flush lock
*/
- xfs_iflush_abort(iq, false);
- kmem_free(ilist);
+ xfs_iflush_abort(cip, false);
+ kmem_free(cilist);
xfs_perag_put(pag);
return -EFSCORRUPTED;
}
@@ -3327,7 +3337,7 @@ xfs_iflush(
struct xfs_buf **bpp)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_buf *bp;
+ struct xfs_buf *bp = NULL;
struct xfs_dinode *dip;
int error;
@@ -3369,14 +3379,22 @@ xfs_iflush(
}
/*
- * Get the buffer containing the on-disk inode.
+ * Get the buffer containing the on-disk inode. We are doing a try-lock
+ * operation here, so we may get an EAGAIN error. In that case, we
+ * simply want to return with the inode still dirty.
+ *
+ * If we get any other error, we effectively have a corruption situation
+ * and we cannot flush the inode, so we treat it the same as failing
+ * xfs_iflush_int().
*/
error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
0);
- if (error || !bp) {
+ if (error == -EAGAIN) {
xfs_ifunlock(ip);
return error;
}
+ if (error)
+ goto corrupt_out;
/*
* First flush out the inode that xfs_iflush was called with.
@@ -3404,7 +3422,8 @@ xfs_iflush(
return 0;
corrupt_out:
- xfs_buf_relse(bp);
+ if (bp)
+ xfs_buf_relse(bp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
cluster_corrupt_out:
error = -EFSCORRUPTED;
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 43e1d51b15eb..e52d7c7aeb5b 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -440,6 +440,9 @@ loff_t __xfs_seek_hole_data(struct inode *inode, loff_t start,
/* from xfs_iops.c */
+extern void xfs_setup_inode(struct xfs_inode *ip);
+extern void xfs_setup_iops(struct xfs_inode *ip);
+
/*
* When setting up a newly allocated inode, we need to call
* xfs_finish_inode_setup() once the inode is fully instantiated at
@@ -447,7 +450,6 @@ loff_t __xfs_seek_hole_data(struct inode *inode, loff_t start,
* before we've completed instantiation. Otherwise we can do it
* the moment the inode lookup is complete.
*/
-extern void xfs_setup_inode(struct xfs_inode *ip);
static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
{
xfs_iflags_clear(ip, XFS_INEW);
@@ -458,6 +460,7 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
{
xfs_setup_inode(ip);
+ xfs_setup_iops(ip);
xfs_finish_inode_setup(ip);
}
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index c48b5b18d771..a1b07612224c 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -210,7 +210,7 @@ xfs_inode_item_format_data_fork(
*/
data_bytes = roundup(ip->i_df.if_bytes, 4);
ASSERT(ip->i_df.if_real_bytes == 0 ||
- ip->i_df.if_real_bytes == data_bytes);
+ ip->i_df.if_real_bytes >= data_bytes);
ASSERT(ip->i_df.if_u1.if_data != NULL);
ASSERT(ip->i_d.di_size > 0);
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_ILOCAL,
@@ -305,7 +305,7 @@ xfs_inode_item_format_attr_fork(
*/
data_bytes = roundup(ip->i_afp->if_bytes, 4);
ASSERT(ip->i_afp->if_real_bytes == 0 ||
- ip->i_afp->if_real_bytes == data_bytes);
+ ip->i_afp->if_real_bytes >= data_bytes);
ASSERT(ip->i_afp->if_u1.if_data != NULL);
xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_LOCAL,
ip->i_afp->if_u1.if_data,
@@ -479,6 +479,8 @@ STATIC uint
xfs_inode_item_push(
struct xfs_log_item *lip,
struct list_head *buffer_list)
+ __releases(&lip->li_ailp->xa_lock)
+ __acquires(&lip->li_ailp->xa_lock)
{
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index bcb6c19ce3ea..dbca7375deef 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -277,7 +277,6 @@ xfs_readlink_by_handle(
{
struct dentry *dentry;
__u32 olen;
- void *link;
int error;
if (!capable(CAP_SYS_ADMIN))
@@ -288,7 +287,7 @@ xfs_readlink_by_handle(
return PTR_ERR(dentry);
/* Restrict this handle operation to symlinks only. */
- if (!d_is_symlink(dentry)) {
+ if (!d_inode(dentry)->i_op->readlink) {
error = -EINVAL;
goto out_dput;
}
@@ -298,21 +297,8 @@ xfs_readlink_by_handle(
goto out_dput;
}
- link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
- if (!link) {
- error = -ENOMEM;
- goto out_dput;
- }
-
- error = xfs_readlink(XFS_I(d_inode(dentry)), link);
- if (error)
- goto out_kfree;
- error = readlink_copy(hreq->ohandle, olen, link);
- if (error)
- goto out_kfree;
+ error = d_inode(dentry)->i_op->readlink(dentry, hreq->ohandle, olen);
- out_kfree:
- kfree(link);
out_dput:
dput(dentry);
return error;
@@ -334,12 +320,10 @@ xfs_set_dmattrs(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+ if (error)
return error;
- }
+
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
@@ -1141,10 +1125,9 @@ xfs_ioctl_setattr_get_trans(
if (XFS_FORCED_SHUTDOWN(mp))
goto out_unlock;
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
if (error)
- goto out_cancel;
+ return ERR_PTR(error);
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index d81bdc080370..58391355a44d 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -132,6 +132,7 @@ xfs_iomap_write_direct(
int error;
int lockmode;
int bmapi_flags = XFS_BMAPI_PREALLOC;
+ uint tflags = 0;
rt = XFS_IS_REALTIME_INODE(ip);
extsz = xfs_get_extsz_hint(ip);
@@ -192,11 +193,6 @@ xfs_iomap_write_direct(
return error;
/*
- * Allocate and setup the transaction
- */
- tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
-
- /*
* For DAX, we do not allocate unwritten extents, but instead we zero
* the block before we commit the transaction. Ideally we'd like to do
* this outside the transaction context, but if we commit and then crash
@@ -209,23 +205,17 @@ xfs_iomap_write_direct(
* the reserve block pool for bmbt block allocation if there is no space
* left but we need to do unwritten extent conversion.
*/
-
if (IS_DAX(VFS_I(ip))) {
bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
if (ISUNWRITTEN(imap)) {
- tp->t_flags |= XFS_TRANS_RESERVE;
+ tflags |= XFS_TRANS_RESERVE;
resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
}
}
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
- resblks, resrtextents);
- /*
- * Check for running out of space, note: need lock to return
- */
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
+ tflags, &tp);
+ if (error)
return error;
- }
lockmode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lockmode);
@@ -726,15 +716,13 @@ xfs_iomap_write_allocate(
nimaps = 0;
while (nimaps == 0) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
- tp->t_flags |= XFS_TRANS_RESERVE;
nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
- nres, 0);
- if (error) {
- xfs_trans_cancel(tp);
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, nres,
+ 0, XFS_TRANS_RESERVE, &tp);
+ if (error)
return error;
- }
+
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
@@ -878,25 +866,18 @@ xfs_iomap_write_unwritten(
do {
/*
- * set up a transaction to convert the range of extents
+ * Set up a transaction to convert the range of extents
* from unwritten to real. Do allocations in a loop until
* we have covered the range passed in.
*
- * Note that we open code the transaction allocation here
- * to pass KM_NOFS--we can't risk to recursing back into
- * the filesystem here as we might be asked to write out
- * the same inode that we complete here and might deadlock
- * on the iolock.
+ * Note that we can't risk to recursing back into the filesystem
+ * here as we might be asked to write out the same inode that we
+ * complete here and might deadlock on the iolock.
*/
- sb_start_intwrite(mp->m_super);
- tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
- tp->t_flags |= XFS_TRANS_RESERVE | XFS_TRANS_FREEZE_PROT;
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_write,
- resblks, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
+ XFS_TRANS_RESERVE | XFS_TRANS_NOFS, &tp);
+ if (error)
return error;
- }
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index fb7dc61f4a29..c5d4eba6972e 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -181,6 +181,8 @@ xfs_generic_create(
}
#endif
+ xfs_setup_iops(ip);
+
if (tmpfile)
d_tmpfile(dentry, inode);
else
@@ -368,6 +370,8 @@ xfs_vn_symlink(
if (unlikely(error))
goto out_cleanup_inode;
+ xfs_setup_iops(cip);
+
d_instantiate(dentry, inode);
xfs_finish_inode_setup(cip);
return 0;
@@ -442,6 +446,16 @@ xfs_vn_get_link(
return ERR_PTR(error);
}
+STATIC const char *
+xfs_vn_get_link_inline(
+ struct dentry *dentry,
+ struct inode *inode,
+ struct delayed_call *done)
+{
+ ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
+ return XFS_I(inode)->i_df.if_u1.if_data;
+}
+
STATIC int
xfs_vn_getattr(
struct vfsmount *mnt,
@@ -599,12 +613,12 @@ xfs_setattr_nonsize(
return error;
}
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
if (error)
- goto out_trans_cancel;
+ goto out_dqrele;
xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, 0);
/*
* Change file ownership. Must be the owner or privileged.
@@ -633,12 +647,10 @@ xfs_setattr_nonsize(
NULL, capable(CAP_FOWNER) ?
XFS_QMOPT_FORCE_RES : 0);
if (error) /* out of quota */
- goto out_unlock;
+ goto out_cancel;
}
}
- xfs_trans_ijoin(tp, ip, 0);
-
/*
* Change file ownership. Must be the owner or privileged.
*/
@@ -722,10 +734,9 @@ xfs_setattr_nonsize(
return 0;
-out_unlock:
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-out_trans_cancel:
+out_cancel:
xfs_trans_cancel(tp);
+out_dqrele:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
return error;
@@ -834,7 +845,7 @@ xfs_setattr_size(
* We have to do all the page cache truncate work outside the
* transaction context as the "lock" order is page lock->log space
* reservation as defined by extent allocation in the writeback path.
- * Hence a truncate can fail with ENOMEM from xfs_trans_reserve(), but
+ * Hence a truncate can fail with ENOMEM from xfs_trans_alloc(), but
* having already truncated the in-memory version of the file (i.e. made
* user visible changes). There's not much we can do about this, except
* to hope that the caller sees ENOMEM and retries the truncate
@@ -849,10 +860,9 @@ xfs_setattr_size(
return error;
truncate_setsize(inode, newsize);
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error)
- goto out_trans_cancel;
+ return error;
lock_flags |= XFS_ILOCK_EXCL;
xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -971,12 +981,9 @@ xfs_vn_update_time(
trace_xfs_update_time(ip);
- tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
+ if (error)
return error;
- }
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (flags & S_CTIME)
@@ -1167,6 +1174,18 @@ static const struct inode_operations xfs_symlink_inode_operations = {
.update_time = xfs_vn_update_time,
};
+static const struct inode_operations xfs_inline_symlink_inode_operations = {
+ .readlink = generic_readlink,
+ .get_link = xfs_vn_get_link_inline,
+ .getattr = xfs_vn_getattr,
+ .setattr = xfs_vn_setattr,
+ .setxattr = generic_setxattr,
+ .getxattr = generic_getxattr,
+ .removexattr = generic_removexattr,
+ .listxattr = xfs_vn_listxattr,
+ .update_time = xfs_vn_update_time,
+};
+
STATIC void
xfs_diflags_to_iflags(
struct inode *inode,
@@ -1193,7 +1212,7 @@ xfs_diflags_to_iflags(
}
/*
- * Initialize the Linux inode and set up the operation vectors.
+ * Initialize the Linux inode.
*
* When reading existing inodes from disk this is called directly from xfs_iget,
* when creating a new inode it is called from xfs_ialloc after setting up the
@@ -1232,32 +1251,12 @@ xfs_setup_inode(
i_size_write(inode, ip->i_d.di_size);
xfs_diflags_to_iflags(inode, ip);
- ip->d_ops = ip->i_mount->m_nondir_inode_ops;
- lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class);
- switch (inode->i_mode & S_IFMT) {
- case S_IFREG:
- inode->i_op = &xfs_inode_operations;
- inode->i_fop = &xfs_file_operations;
- inode->i_mapping->a_ops = &xfs_address_space_operations;
- break;
- case S_IFDIR:
+ if (S_ISDIR(inode->i_mode)) {
lockdep_set_class(&ip->i_lock.mr_lock, &xfs_dir_ilock_class);
- if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
- inode->i_op = &xfs_dir_ci_inode_operations;
- else
- inode->i_op = &xfs_dir_inode_operations;
- inode->i_fop = &xfs_dir_file_operations;
ip->d_ops = ip->i_mount->m_dir_inode_ops;
- break;
- case S_IFLNK:
- inode->i_op = &xfs_symlink_inode_operations;
- if (!(ip->i_df.if_flags & XFS_IFINLINE))
- inode->i_mapping->a_ops = &xfs_address_space_operations;
- break;
- default:
- inode->i_op = &xfs_inode_operations;
- init_special_inode(inode, inode->i_mode, inode->i_rdev);
- break;
+ } else {
+ ip->d_ops = ip->i_mount->m_nondir_inode_ops;
+ lockdep_set_class(&ip->i_lock.mr_lock, &xfs_nondir_ilock_class);
}
/*
@@ -1277,3 +1276,35 @@ xfs_setup_inode(
cache_no_acl(inode);
}
}
+
+void
+xfs_setup_iops(
+ struct xfs_inode *ip)
+{
+ struct inode *inode = &ip->i_vnode;
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ inode->i_op = &xfs_inode_operations;
+ inode->i_fop = &xfs_file_operations;
+ inode->i_mapping->a_ops = &xfs_address_space_operations;
+ break;
+ case S_IFDIR:
+ if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
+ inode->i_op = &xfs_dir_ci_inode_operations;
+ else
+ inode->i_op = &xfs_dir_inode_operations;
+ inode->i_fop = &xfs_dir_file_operations;
+ break;
+ case S_IFLNK:
+ if (ip->i_df.if_flags & XFS_IFINLINE)
+ inode->i_op = &xfs_inline_symlink_inode_operations;
+ else
+ inode->i_op = &xfs_symlink_inode_operations;
+ break;
+ default:
+ inode->i_op = &xfs_inode_operations;
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ break;
+ }
+}
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index b49ccf5c1d75..bde02f1fba73 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -435,8 +435,7 @@ xfs_log_reserve(
int cnt,
struct xlog_ticket **ticp,
__uint8_t client,
- bool permanent,
- uint t_type)
+ bool permanent)
{
struct xlog *log = mp->m_log;
struct xlog_ticket *tic;
@@ -456,7 +455,6 @@ xfs_log_reserve(
if (!tic)
return -ENOMEM;
- tic->t_trans_type = t_type;
*ticp = tic;
xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
@@ -823,8 +821,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
} while (iclog != first_iclog);
#endif
if (! (XLOG_FORCED_SHUTDOWN(log))) {
- error = xfs_log_reserve(mp, 600, 1, &tic,
- XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
+ error = xfs_log_reserve(mp, 600, 1, &tic, XFS_LOG, 0);
if (!error) {
/* the data section must be 32 bit size aligned */
struct {
@@ -2032,58 +2029,8 @@ xlog_print_tic_res(
REG_TYPE_STR(ICREATE, "inode create")
};
#undef REG_TYPE_STR
-#define TRANS_TYPE_STR(type) [XFS_TRANS_##type] = #type
- static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
- TRANS_TYPE_STR(SETATTR_NOT_SIZE),
- TRANS_TYPE_STR(SETATTR_SIZE),
- TRANS_TYPE_STR(INACTIVE),
- TRANS_TYPE_STR(CREATE),
- TRANS_TYPE_STR(CREATE_TRUNC),
- TRANS_TYPE_STR(TRUNCATE_FILE),
- TRANS_TYPE_STR(REMOVE),
- TRANS_TYPE_STR(LINK),
- TRANS_TYPE_STR(RENAME),
- TRANS_TYPE_STR(MKDIR),
- TRANS_TYPE_STR(RMDIR),
- TRANS_TYPE_STR(SYMLINK),
- TRANS_TYPE_STR(SET_DMATTRS),
- TRANS_TYPE_STR(GROWFS),
- TRANS_TYPE_STR(STRAT_WRITE),
- TRANS_TYPE_STR(DIOSTRAT),
- TRANS_TYPE_STR(WRITEID),
- TRANS_TYPE_STR(ADDAFORK),
- TRANS_TYPE_STR(ATTRINVAL),
- TRANS_TYPE_STR(ATRUNCATE),
- TRANS_TYPE_STR(ATTR_SET),
- TRANS_TYPE_STR(ATTR_RM),
- TRANS_TYPE_STR(ATTR_FLAG),
- TRANS_TYPE_STR(CLEAR_AGI_BUCKET),
- TRANS_TYPE_STR(SB_CHANGE),
- TRANS_TYPE_STR(DUMMY1),
- TRANS_TYPE_STR(DUMMY2),
- TRANS_TYPE_STR(QM_QUOTAOFF),
- TRANS_TYPE_STR(QM_DQALLOC),
- TRANS_TYPE_STR(QM_SETQLIM),
- TRANS_TYPE_STR(QM_DQCLUSTER),
- TRANS_TYPE_STR(QM_QINOCREATE),
- TRANS_TYPE_STR(QM_QUOTAOFF_END),
- TRANS_TYPE_STR(FSYNC_TS),
- TRANS_TYPE_STR(GROWFSRT_ALLOC),
- TRANS_TYPE_STR(GROWFSRT_ZERO),
- TRANS_TYPE_STR(GROWFSRT_FREE),
- TRANS_TYPE_STR(SWAPEXT),
- TRANS_TYPE_STR(CHECKPOINT),
- TRANS_TYPE_STR(ICREATE),
- TRANS_TYPE_STR(CREATE_TMPFILE)
- };
-#undef TRANS_TYPE_STR
xfs_warn(mp, "xlog_write: reservation summary:");
- xfs_warn(mp, " trans type = %s (%u)",
- ((ticket->t_trans_type <= 0 ||
- ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
- "bad-trans-type" : trans_type_str[ticket->t_trans_type]),
- ticket->t_trans_type);
xfs_warn(mp, " unit res = %d bytes",
ticket->t_unit_res);
xfs_warn(mp, " current res = %d bytes",
@@ -3378,7 +3325,7 @@ xfs_log_force(
{
int error;
- trace_xfs_log_force(mp, 0);
+ trace_xfs_log_force(mp, 0, _RET_IP_);
error = _xfs_log_force(mp, flags, NULL);
if (error)
xfs_warn(mp, "%s: error %d returned.", __func__, error);
@@ -3527,7 +3474,7 @@ xfs_log_force_lsn(
{
int error;
- trace_xfs_log_force(mp, lsn);
+ trace_xfs_log_force(mp, lsn, _RET_IP_);
error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
if (error)
xfs_warn(mp, "%s: error %d returned.", __func__, error);
@@ -3709,7 +3656,6 @@ xlog_ticket_alloc(
tic->t_tid = prandom_u32();
tic->t_clientid = client;
tic->t_flags = XLOG_TIC_INITED;
- tic->t_trans_type = 0;
if (permanent)
tic->t_flags |= XLOG_TIC_PERM_RESERV;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index aa533a7d50f2..80ba0c047090 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -161,8 +161,7 @@ int xfs_log_reserve(struct xfs_mount *mp,
int count,
struct xlog_ticket **ticket,
__uint8_t clientid,
- bool permanent,
- uint t_type);
+ bool permanent);
int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
int xfs_log_unmount_write(struct xfs_mount *mp);
void xfs_log_unmount(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 4e7649351f5a..5e54e7955ea6 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -51,7 +51,6 @@ xlog_cil_ticket_alloc(
tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0,
KM_SLEEP|KM_NOFS);
- tic->t_trans_type = XFS_TRANS_CHECKPOINT;
/*
* set the current reservation to zero so we know to steal the basic
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index ed8896310c00..765f084759b5 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -175,7 +175,6 @@ typedef struct xlog_ticket {
char t_cnt; /* current count : 1 */
char t_clientid; /* who does this belong to; : 1 */
char t_flags; /* properties of reservation : 1 */
- uint t_trans_type; /* transaction type : 4 */
/* reservation array fields */
uint t_res_num; /* num in array : 4 */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 396565f43247..835997843846 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -3843,7 +3843,7 @@ xlog_recover_add_to_cont_trans(
old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
old_len = item->ri_buf[item->ri_cnt-1].i_len;
- ptr = kmem_realloc(old_ptr, len+old_len, old_len, KM_SLEEP);
+ ptr = kmem_realloc(old_ptr, len + old_len, KM_SLEEP);
memcpy(&ptr[old_len], dp, len);
item->ri_buf[item->ri_cnt-1].i_len += len;
item->ri_buf[item->ri_cnt-1].i_addr = ptr;
@@ -4205,10 +4205,9 @@ xlog_recover_process_efi(
}
}
- tp = xfs_trans_alloc(mp, 0);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error)
- goto abort_error;
+ return error;
efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
for (i = 0; i < efip->efi_format.efi_nextents; i++) {
@@ -4355,10 +4354,9 @@ xlog_recover_clear_agi_bucket(
int offset;
int error;
- tp = xfs_trans_alloc(mp, XFS_TRANS_CLEAR_AGI_BUCKET);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_clearagi, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
if (error)
- goto out_abort;
+ goto out_error;
error = xfs_read_agi(mp, tp, agno, &agibp);
if (error)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index cfd4210dd015..e39b02351b4a 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -89,7 +89,6 @@ xfs_uuid_mount(
if (hole < 0) {
xfs_uuid_table = kmem_realloc(xfs_uuid_table,
(xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
- xfs_uuid_table_size * sizeof(*xfs_uuid_table),
KM_SLEEP);
hole = xfs_uuid_table_size++;
}
@@ -681,6 +680,9 @@ xfs_mountfs(
xfs_set_maxicount(mp);
+ /* enable fail_at_unmount as default */
+ mp->m_fail_unmount = 1;
+
error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname);
if (error)
goto out;
@@ -690,10 +692,15 @@ xfs_mountfs(
if (error)
goto out_remove_sysfs;
- error = xfs_uuid_mount(mp);
+ error = xfs_error_sysfs_init(mp);
if (error)
goto out_del_stats;
+
+ error = xfs_uuid_mount(mp);
+ if (error)
+ goto out_remove_error_sysfs;
+
/*
* Set the minimum read and write sizes
*/
@@ -957,6 +964,7 @@ xfs_mountfs(
cancel_delayed_work_sync(&mp->m_reclaim_work);
xfs_reclaim_inodes(mp, SYNC_WAIT);
out_log_dealloc:
+ mp->m_flags |= XFS_MOUNT_UNMOUNTING;
xfs_log_mount_cancel(mp);
out_fail_wait:
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
@@ -968,6 +976,8 @@ xfs_mountfs(
xfs_da_unmount(mp);
out_remove_uuid:
xfs_uuid_unmount(mp);
+ out_remove_error_sysfs:
+ xfs_error_sysfs_del(mp);
out_del_stats:
xfs_sysfs_del(&mp->m_stats.xs_kobj);
out_remove_sysfs:
@@ -1006,6 +1016,14 @@ xfs_unmountfs(
xfs_log_force(mp, XFS_LOG_SYNC);
/*
+ * We now need to tell the world we are unmounting. This will allow
+ * us to detect that the filesystem is going away and we should error
+ * out anything that we have been retrying in the background. This will
+ * prevent neverending retries in AIL pushing from hanging the unmount.
+ */
+ mp->m_flags |= XFS_MOUNT_UNMOUNTING;
+
+ /*
* Flush all pending changes from the AIL.
*/
xfs_ail_push_all_sync(mp->m_ail);
@@ -1056,6 +1074,7 @@ xfs_unmountfs(
#endif
xfs_free_perag(mp);
+ xfs_error_sysfs_del(mp);
xfs_sysfs_del(&mp->m_stats.xs_kobj);
xfs_sysfs_del(&mp->m_kobj);
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index eafe257b357a..c1b798c72126 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -37,6 +37,32 @@ enum {
XFS_LOWSP_MAX,
};
+/*
+ * Error Configuration
+ *
+ * Error classes define the subsystem the configuration belongs to.
+ * Error numbers define the errors that are configurable.
+ */
+enum {
+ XFS_ERR_METADATA,
+ XFS_ERR_CLASS_MAX,
+};
+enum {
+ XFS_ERR_DEFAULT,
+ XFS_ERR_EIO,
+ XFS_ERR_ENOSPC,
+ XFS_ERR_ENODEV,
+ XFS_ERR_ERRNO_MAX,
+};
+
+#define XFS_ERR_RETRY_FOREVER -1
+
+struct xfs_error_cfg {
+ struct xfs_kobj kobj;
+ int max_retries;
+ unsigned long retry_timeout; /* in jiffies, 0 = no timeout */
+};
+
typedef struct xfs_mount {
struct super_block *m_super;
xfs_tid_t m_tid; /* next unused tid for fs */
@@ -127,6 +153,9 @@ typedef struct xfs_mount {
int64_t m_low_space[XFS_LOWSP_MAX];
/* low free space thresholds */
struct xfs_kobj m_kobj;
+ struct xfs_kobj m_error_kobj;
+ struct xfs_kobj m_error_meta_kobj;
+ struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
struct xstats m_stats; /* per-fs stats */
struct workqueue_struct *m_buf_workqueue;
@@ -148,6 +177,7 @@ typedef struct xfs_mount {
*/
__uint32_t m_generation;
+ bool m_fail_unmount;
#ifdef DEBUG
/*
* DEBUG mode instrumentation to test and/or trigger delayed allocation
@@ -166,6 +196,7 @@ typedef struct xfs_mount {
#define XFS_MOUNT_WSYNC (1ULL << 0) /* for nfs - all metadata ops
must be synchronous except
for space allocations */
+#define XFS_MOUNT_UNMOUNTING (1ULL << 1) /* filesystem is unmounting */
#define XFS_MOUNT_WAS_CLEAN (1ULL << 3)
#define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem
operations, typically for
@@ -364,4 +395,7 @@ extern void xfs_set_low_space_thresholds(struct xfs_mount *);
int xfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
xfs_off_t count_fsb);
+struct xfs_error_cfg * xfs_error_get_cfg(struct xfs_mount *mp,
+ int error_class, int error);
+
#endif /* __XFS_MOUNT_H__ */
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 51ddaf2c2b8c..d5b756669fb5 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -308,12 +308,9 @@ xfs_fs_commit_blocks(
goto out_drop_iolock;
}
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
+ if (error)
goto out_drop_iolock;
- }
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index be125e1758c1..a60d9e2739d1 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -783,13 +783,10 @@ xfs_qm_qino_alloc(
}
}
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
- XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
+ XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
+ if (error)
return error;
- }
if (need_alloc) {
error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index f4d0e0a8f517..475a3882a81f 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -236,10 +236,8 @@ xfs_qm_scall_trunc_qfile(
xfs_ilock(ip, XFS_IOLOCK_EXCL);
- tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error) {
- xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
goto out_put;
}
@@ -436,12 +434,9 @@ xfs_qm_scall_setqlim(
defq = xfs_get_defquota(dqp, q);
xfs_dqunlock(dqp);
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_setqlim, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
+ if (error)
goto out_rele;
- }
xfs_dqlock(dqp);
xfs_trans_dqjoin(tp, dqp);
@@ -569,13 +564,9 @@ xfs_qm_log_quotaoff_end(
int error;
xfs_qoff_logitem_t *qoffi;
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
-
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_equotaoff, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_equotaoff, 0, 0, 0, &tp);
+ if (error)
return error;
- }
qoffi = xfs_trans_get_qoff_item(tp, startqoff,
flags & XFS_ALL_QUOTA_ACCT);
@@ -603,12 +594,9 @@ xfs_qm_log_quotaoff(
*qoffstartp = NULL;
- tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_quotaoff, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_quotaoff, 0, 0, 0, &tp);
+ if (error)
goto out;
- }
qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
xfs_trans_log_quotaoff_item(tp, qoffi);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index abf44435d04a..3938b37d1043 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -780,15 +780,14 @@ xfs_growfs_rt_alloc(
* Allocate space to the file, as necessary.
*/
while (oblocks < nblocks) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ALLOC);
resblks = XFS_GROWFSRT_SPACE_RES(mp, nblocks - oblocks);
/*
* Reserve space & log for one extent added to the file.
*/
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtalloc,
- resblks, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtalloc, resblks,
+ 0, 0, &tp);
if (error)
- goto out_trans_cancel;
+ return error;
/*
* Lock the inode.
*/
@@ -823,14 +822,13 @@ xfs_growfs_rt_alloc(
for (bno = map.br_startoff, fsbno = map.br_startblock;
bno < map.br_startoff + map.br_blockcount;
bno++, fsbno++) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_ZERO);
/*
* Reserve log for one block zeroing.
*/
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtzero,
- 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtzero,
+ 0, 0, 0, &tp);
if (error)
- goto out_trans_cancel;
+ return error;
/*
* Lock the bitmap inode.
*/
@@ -994,11 +992,10 @@ xfs_growfs_rt(
/*
* Start a transaction, get the log reservation.
*/
- tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFSRT_FREE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_growrtfree,
- 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growrtfree, 0, 0, 0,
+ &tp);
if (error)
- goto error_cancel;
+ break;
/*
* Lock out other callers by grabbing the bitmap inode lock.
*/
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 187e14b696c2..11ea5d51db56 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -58,8 +58,7 @@
#include <linux/parser.h>
static const struct super_operations xfs_super_operations;
-static kmem_zone_t *xfs_ioend_zone;
-mempool_t *xfs_ioend_pool;
+struct bio_set *xfs_ioend_bioset;
static struct kset *xfs_kset; /* top-level xfs sysfs dir */
#ifdef DEBUG
@@ -350,6 +349,7 @@ xfs_parseargs(
case Opt_pqnoenforce:
mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
mp->m_qflags &= ~XFS_PQUOTA_ENFD;
+ break;
case Opt_gquota:
case Opt_grpquota:
mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
@@ -928,7 +928,7 @@ xfs_fs_alloc_inode(
/*
* Now that the generic code is guaranteed not to be accessing
- * the linux inode, we can reclaim the inode.
+ * the linux inode, we can inactivate and reclaim the inode.
*/
STATIC void
xfs_fs_destroy_inode(
@@ -938,9 +938,14 @@ xfs_fs_destroy_inode(
trace_xfs_destroy_inode(ip);
- XFS_STATS_INC(ip->i_mount, vn_reclaim);
+ ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
+ XFS_STATS_INC(ip->i_mount, vn_rele);
+ XFS_STATS_INC(ip->i_mount, vn_remove);
+
+ xfs_inactive(ip);
ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
+ XFS_STATS_INC(ip->i_mount, vn_reclaim);
/*
* We should never get here with one of the reclaim flags already set.
@@ -987,24 +992,6 @@ xfs_fs_inode_init_once(
"xfsino", ip->i_ino);
}
-STATIC void
-xfs_fs_evict_inode(
- struct inode *inode)
-{
- xfs_inode_t *ip = XFS_I(inode);
-
- ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
-
- trace_xfs_evict_inode(ip);
-
- truncate_inode_pages_final(&inode->i_data);
- clear_inode(inode);
- XFS_STATS_INC(ip->i_mount, vn_rele);
- XFS_STATS_INC(ip->i_mount, vn_remove);
-
- xfs_inactive(ip);
-}
-
/*
* We do an unlocked check for XFS_IDONTCACHE here because we are already
* serialised against cache hits here via the inode->i_lock and igrab() in
@@ -1276,6 +1263,16 @@ xfs_fs_remount(
return -EINVAL;
}
+ if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
+ xfs_sb_has_ro_compat_feature(sbp,
+ XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ xfs_warn(mp,
+"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
+ (sbp->sb_features_ro_compat &
+ XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+ return -EINVAL;
+ }
+
mp->m_flags &= ~XFS_MOUNT_RDONLY;
/*
@@ -1558,14 +1555,12 @@ xfs_fs_fill_super(
if (mp->m_flags & XFS_MOUNT_DAX) {
xfs_warn(mp,
- "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
- if (sb->s_blocksize != PAGE_SIZE) {
- xfs_alert(mp,
- "Filesystem block size invalid for DAX Turning DAX off.");
- mp->m_flags &= ~XFS_MOUNT_DAX;
- } else if (!sb->s_bdev->bd_disk->fops->direct_access) {
+ "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
+
+ error = bdev_dax_supported(sb, sb->s_blocksize);
+ if (error) {
xfs_alert(mp,
- "Block device does not support DAX Turning DAX off.");
+ "DAX unsupported by block device. Turning off DAX.");
mp->m_flags &= ~XFS_MOUNT_DAX;
}
}
@@ -1663,7 +1658,6 @@ xfs_fs_free_cached_objects(
static const struct super_operations xfs_super_operations = {
.alloc_inode = xfs_fs_alloc_inode,
.destroy_inode = xfs_fs_destroy_inode,
- .evict_inode = xfs_fs_evict_inode,
.drop_inode = xfs_fs_drop_inode,
.put_super = xfs_fs_put_super,
.sync_fs = xfs_fs_sync_fs,
@@ -1688,20 +1682,15 @@ MODULE_ALIAS_FS("xfs");
STATIC int __init
xfs_init_zones(void)
{
-
- xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
- if (!xfs_ioend_zone)
+ xfs_ioend_bioset = bioset_create(4 * MAX_BUF_PER_PAGE,
+ offsetof(struct xfs_ioend, io_inline_bio));
+ if (!xfs_ioend_bioset)
goto out;
- xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
- xfs_ioend_zone);
- if (!xfs_ioend_pool)
- goto out_destroy_ioend_zone;
-
xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
"xfs_log_ticket");
if (!xfs_log_ticket_zone)
- goto out_destroy_ioend_pool;
+ goto out_free_ioend_bioset;
xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
"xfs_bmap_free_item");
@@ -1797,10 +1786,8 @@ xfs_init_zones(void)
kmem_zone_destroy(xfs_bmap_free_item_zone);
out_destroy_log_ticket_zone:
kmem_zone_destroy(xfs_log_ticket_zone);
- out_destroy_ioend_pool:
- mempool_destroy(xfs_ioend_pool);
- out_destroy_ioend_zone:
- kmem_zone_destroy(xfs_ioend_zone);
+ out_free_ioend_bioset:
+ bioset_free(xfs_ioend_bioset);
out:
return -ENOMEM;
}
@@ -1826,9 +1813,7 @@ xfs_destroy_zones(void)
kmem_zone_destroy(xfs_btree_cur_zone);
kmem_zone_destroy(xfs_bmap_free_item_zone);
kmem_zone_destroy(xfs_log_ticket_zone);
- mempool_destroy(xfs_ioend_pool);
- kmem_zone_destroy(xfs_ioend_zone);
-
+ bioset_free(xfs_ioend_bioset);
}
STATIC int __init
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index b44284c1adda..08a46c6181fd 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -131,6 +131,8 @@ xfs_readlink(
trace_xfs_readlink(ip);
+ ASSERT(!(ip->i_df.if_flags & XFS_IFINLINE));
+
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
@@ -150,12 +152,7 @@ xfs_readlink(
}
- if (ip->i_df.if_flags & XFS_IFINLINE) {
- memcpy(link, ip->i_df.if_u1.if_data, pathlen);
- link[pathlen] = '\0';
- } else {
- error = xfs_readlink_bmap(ip, link);
- }
+ error = xfs_readlink_bmap(ip, link);
out:
xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -221,7 +218,6 @@ xfs_symlink(
if (error)
return error;
- tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
/*
* The symlink will fit into the inode data fork?
* There can't be any attributes so we get the whole variable part.
@@ -231,13 +227,15 @@ xfs_symlink(
else
fs_blocks = xfs_symlink_blocks(mp, pathlen);
resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, resblks, 0);
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp);
if (error == -ENOSPC && fs_blocks == 0) {
resblks = 0;
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, 0, 0);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, 0, 0, 0,
+ &tp);
}
if (error)
- goto out_trans_cancel;
+ goto out_release_inode;
xfs_ilock(dp, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL |
XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
@@ -302,19 +300,11 @@ xfs_symlink(
* If the symlink will fit into the inode, write it inline.
*/
if (pathlen <= XFS_IFORK_DSIZE(ip)) {
- xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK);
- memcpy(ip->i_df.if_u1.if_data, target_path, pathlen);
- ip->i_d.di_size = pathlen;
-
- /*
- * The inode was initially created in extent format.
- */
- ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
- ip->i_df.if_flags |= XFS_IFINLINE;
+ xfs_init_local_fork(ip, XFS_DATA_FORK, target_path, pathlen);
+ ip->i_d.di_size = pathlen;
ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);
-
} else {
int offset;
@@ -455,12 +445,9 @@ xfs_inactive_symlink_rmt(
*/
ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
- tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
- error = xfs_trans_reserve(tp, &M_RES(mp)->tr_itruncate, 0, 0);
- if (error) {
- xfs_trans_cancel(tp);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ if (error)
return error;
- }
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 6ced4f143494..4c2c55086208 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -17,10 +17,11 @@
*/
#include "xfs.h"
-#include "xfs_sysfs.h"
+#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
+#include "xfs_sysfs.h"
#include "xfs_log.h"
#include "xfs_log_priv.h"
#include "xfs_stats.h"
@@ -362,3 +363,291 @@ struct kobj_type xfs_log_ktype = {
.sysfs_ops = &xfs_sysfs_ops,
.default_attrs = xfs_log_attrs,
};
+
+/*
+ * Metadata IO error configuration
+ *
+ * The sysfs structure here is:
+ * ...xfs/<dev>/error/<class>/<errno>/<error_attrs>
+ *
+ * where <class> allows us to discriminate between data IO and metadata IO,
+ * and any other future type of IO (e.g. special inode or directory error
+ * handling) we care to support.
+ */
+static inline struct xfs_error_cfg *
+to_error_cfg(struct kobject *kobject)
+{
+ struct xfs_kobj *kobj = to_kobj(kobject);
+ return container_of(kobj, struct xfs_error_cfg, kobj);
+}
+
+static inline struct xfs_mount *
+err_to_mp(struct kobject *kobject)
+{
+ struct xfs_kobj *kobj = to_kobj(kobject);
+ return container_of(kobj, struct xfs_mount, m_error_kobj);
+}
+
+static ssize_t
+max_retries_show(
+ struct kobject *kobject,
+ char *buf)
+{
+ struct xfs_error_cfg *cfg = to_error_cfg(kobject);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", cfg->max_retries);
+}
+
+static ssize_t
+max_retries_store(
+ struct kobject *kobject,
+ const char *buf,
+ size_t count)
+{
+ struct xfs_error_cfg *cfg = to_error_cfg(kobject);
+ int ret;
+ int val;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val < -1)
+ return -EINVAL;
+
+ cfg->max_retries = val;
+ return count;
+}
+XFS_SYSFS_ATTR_RW(max_retries);
+
+static ssize_t
+retry_timeout_seconds_show(
+ struct kobject *kobject,
+ char *buf)
+{
+ struct xfs_error_cfg *cfg = to_error_cfg(kobject);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n",
+ jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC);
+}
+
+static ssize_t
+retry_timeout_seconds_store(
+ struct kobject *kobject,
+ const char *buf,
+ size_t count)
+{
+ struct xfs_error_cfg *cfg = to_error_cfg(kobject);
+ int ret;
+ int val;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ /* 1 day timeout maximum */
+ if (val < 0 || val > 86400)
+ return -EINVAL;
+
+ cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
+ return count;
+}
+XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
+
+static ssize_t
+fail_at_unmount_show(
+ struct kobject *kobject,
+ char *buf)
+{
+ struct xfs_mount *mp = err_to_mp(kobject);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", mp->m_fail_unmount);
+}
+
+static ssize_t
+fail_at_unmount_store(
+ struct kobject *kobject,
+ const char *buf,
+ size_t count)
+{
+ struct xfs_mount *mp = err_to_mp(kobject);
+ int ret;
+ int val;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val < 0 || val > 1)
+ return -EINVAL;
+
+ mp->m_fail_unmount = val;
+ return count;
+}
+XFS_SYSFS_ATTR_RW(fail_at_unmount);
+
+static struct attribute *xfs_error_attrs[] = {
+ ATTR_LIST(max_retries),
+ ATTR_LIST(retry_timeout_seconds),
+ NULL,
+};
+
+
+struct kobj_type xfs_error_cfg_ktype = {
+ .release = xfs_sysfs_release,
+ .sysfs_ops = &xfs_sysfs_ops,
+ .default_attrs = xfs_error_attrs,
+};
+
+struct kobj_type xfs_error_ktype = {
+ .release = xfs_sysfs_release,
+ .sysfs_ops = &xfs_sysfs_ops,
+};
+
+/*
+ * Error initialization tables. These need to be ordered in the same
+ * order as the enums used to index the array. All class init tables need to
+ * define a "default" behaviour as the first entry, all other entries can be
+ * empty.
+ */
+struct xfs_error_init {
+ char *name;
+ int max_retries;
+ int retry_timeout; /* in seconds */
+};
+
+static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
+ { .name = "default",
+ .max_retries = XFS_ERR_RETRY_FOREVER,
+ .retry_timeout = 0,
+ },
+ { .name = "EIO",
+ .max_retries = XFS_ERR_RETRY_FOREVER,
+ .retry_timeout = 0,
+ },
+ { .name = "ENOSPC",
+ .max_retries = XFS_ERR_RETRY_FOREVER,
+ .retry_timeout = 0,
+ },
+ { .name = "ENODEV",
+ .max_retries = 0,
+ },
+};
+
+static int
+xfs_error_sysfs_init_class(
+ struct xfs_mount *mp,
+ int class,
+ const char *parent_name,
+ struct xfs_kobj *parent_kobj,
+ const struct xfs_error_init init[])
+{
+ struct xfs_error_cfg *cfg;
+ int error;
+ int i;
+
+ ASSERT(class < XFS_ERR_CLASS_MAX);
+
+ error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
+ &mp->m_error_kobj, parent_name);
+ if (error)
+ return error;
+
+ for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
+ cfg = &mp->m_error_cfg[class][i];
+ error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
+ parent_kobj, init[i].name);
+ if (error)
+ goto out_error;
+
+ cfg->max_retries = init[i].max_retries;
+ cfg->retry_timeout = msecs_to_jiffies(
+ init[i].retry_timeout * MSEC_PER_SEC);
+ }
+ return 0;
+
+out_error:
+ /* unwind the entries that succeeded */
+ for (i--; i >= 0; i--) {
+ cfg = &mp->m_error_cfg[class][i];
+ xfs_sysfs_del(&cfg->kobj);
+ }
+ xfs_sysfs_del(parent_kobj);
+ return error;
+}
+
+int
+xfs_error_sysfs_init(
+ struct xfs_mount *mp)
+{
+ int error;
+
+ /* .../xfs/<dev>/error/ */
+ error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
+ &mp->m_kobj, "error");
+ if (error)
+ return error;
+
+ error = sysfs_create_file(&mp->m_error_kobj.kobject,
+ ATTR_LIST(fail_at_unmount));
+
+ if (error)
+ goto out_error;
+
+ /* .../xfs/<dev>/error/metadata/ */
+ error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
+ "metadata", &mp->m_error_meta_kobj,
+ xfs_error_meta_init);
+ if (error)
+ goto out_error;
+
+ return 0;
+
+out_error:
+ xfs_sysfs_del(&mp->m_error_kobj);
+ return error;
+}
+
+void
+xfs_error_sysfs_del(
+ struct xfs_mount *mp)
+{
+ struct xfs_error_cfg *cfg;
+ int i, j;
+
+ for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
+ for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
+ cfg = &mp->m_error_cfg[i][j];
+
+ xfs_sysfs_del(&cfg->kobj);
+ }
+ }
+ xfs_sysfs_del(&mp->m_error_meta_kobj);
+ xfs_sysfs_del(&mp->m_error_kobj);
+}
+
+struct xfs_error_cfg *
+xfs_error_get_cfg(
+ struct xfs_mount *mp,
+ int error_class,
+ int error)
+{
+ struct xfs_error_cfg *cfg;
+
+ switch (error) {
+ case EIO:
+ cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
+ break;
+ case ENOSPC:
+ cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
+ break;
+ case ENODEV:
+ cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
+ break;
+ default:
+ cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];
+ break;
+ }
+
+ return cfg;
+}
diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
index be692e59938d..d04637181ef2 100644
--- a/fs/xfs/xfs_sysfs.h
+++ b/fs/xfs/xfs_sysfs.h
@@ -58,4 +58,7 @@ xfs_sysfs_del(
wait_for_completion(&kobj->complete);
}
+int xfs_error_sysfs_init(struct xfs_mount *mp);
+void xfs_error_sysfs_del(struct xfs_mount *mp);
+
#endif /* __XFS_SYSFS_H__ */
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index c8d58426008e..ea94ee0fe5ea 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -364,7 +364,6 @@ DEFINE_BUF_EVENT(xfs_buf_delwri_split);
DEFINE_BUF_EVENT(xfs_buf_get_uncached);
DEFINE_BUF_EVENT(xfs_bdstrat_shut);
DEFINE_BUF_EVENT(xfs_buf_item_relse);
-DEFINE_BUF_EVENT(xfs_buf_item_iodone);
DEFINE_BUF_EVENT(xfs_buf_item_iodone_async);
DEFINE_BUF_EVENT(xfs_buf_error_relse);
DEFINE_BUF_EVENT(xfs_buf_wait_buftarg);
@@ -944,7 +943,6 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
TP_ARGS(log, tic),
TP_STRUCT__entry(
__field(dev_t, dev)
- __field(unsigned, trans_type)
__field(char, ocnt)
__field(char, cnt)
__field(int, curr_res)
@@ -962,7 +960,6 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
),
TP_fast_assign(
__entry->dev = log->l_mp->m_super->s_dev;
- __entry->trans_type = tic->t_trans_type;
__entry->ocnt = tic->t_ocnt;
__entry->cnt = tic->t_cnt;
__entry->curr_res = tic->t_curr_res;
@@ -980,14 +977,13 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
__entry->curr_block = log->l_curr_block;
__entry->tail_lsn = atomic64_read(&log->l_tail_lsn);
),
- TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
+ TP_printk("dev %d:%d t_ocnt %u t_cnt %u t_curr_res %u "
"t_unit_res %u t_flags %s reserveq %s "
"writeq %s grant_reserve_cycle %d "
"grant_reserve_bytes %d grant_write_cycle %d "
"grant_write_bytes %d curr_cycle %d curr_block %d "
"tail_cycle %d tail_block %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES),
__entry->ocnt,
__entry->cnt,
__entry->curr_res,
@@ -1053,19 +1049,21 @@ DECLARE_EVENT_CLASS(xfs_log_item_class,
)
TRACE_EVENT(xfs_log_force,
- TP_PROTO(struct xfs_mount *mp, xfs_lsn_t lsn),
- TP_ARGS(mp, lsn),
+ TP_PROTO(struct xfs_mount *mp, xfs_lsn_t lsn, unsigned long caller_ip),
+ TP_ARGS(mp, lsn, caller_ip),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_lsn_t, lsn)
+ __field(unsigned long, caller_ip)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->lsn = lsn;
+ __entry->caller_ip = caller_ip;
),
- TP_printk("dev %d:%d lsn 0x%llx",
+ TP_printk("dev %d:%d lsn 0x%llx caller %ps",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->lsn)
+ __entry->lsn, (void *)__entry->caller_ip)
)
#define DEFINE_LOG_ITEM_EVENT(name) \
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 20c53666cb4b..5f3d33d16e67 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -47,47 +47,6 @@ xfs_trans_init(
}
/*
- * This routine is called to allocate a transaction structure.
- * The type parameter indicates the type of the transaction. These
- * are enumerated in xfs_trans.h.
- *
- * Dynamically allocate the transaction structure from the transaction
- * zone, initialize it, and return it to the caller.
- */
-xfs_trans_t *
-xfs_trans_alloc(
- xfs_mount_t *mp,
- uint type)
-{
- xfs_trans_t *tp;
-
- sb_start_intwrite(mp->m_super);
- tp = _xfs_trans_alloc(mp, type, KM_SLEEP);
- tp->t_flags |= XFS_TRANS_FREEZE_PROT;
- return tp;
-}
-
-xfs_trans_t *
-_xfs_trans_alloc(
- xfs_mount_t *mp,
- uint type,
- xfs_km_flags_t memflags)
-{
- xfs_trans_t *tp;
-
- WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
- atomic_inc(&mp->m_active_trans);
-
- tp = kmem_zone_zalloc(xfs_trans_zone, memflags);
- tp->t_magic = XFS_TRANS_HEADER_MAGIC;
- tp->t_type = type;
- tp->t_mountp = mp;
- INIT_LIST_HEAD(&tp->t_items);
- INIT_LIST_HEAD(&tp->t_busy);
- return tp;
-}
-
-/*
* Free the transaction structure. If there is more clean up
* to do when the structure is freed, add it here.
*/
@@ -99,7 +58,7 @@ xfs_trans_free(
xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
atomic_dec(&tp->t_mountp->m_active_trans);
- if (tp->t_flags & XFS_TRANS_FREEZE_PROT)
+ if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
sb_end_intwrite(tp->t_mountp->m_super);
xfs_trans_free_dqinfo(tp);
kmem_zone_free(xfs_trans_zone, tp);
@@ -125,7 +84,6 @@ xfs_trans_dup(
* Initialize the new transaction structure.
*/
ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
- ntp->t_type = tp->t_type;
ntp->t_mountp = tp->t_mountp;
INIT_LIST_HEAD(&ntp->t_items);
INIT_LIST_HEAD(&ntp->t_busy);
@@ -135,9 +93,9 @@ xfs_trans_dup(
ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
(tp->t_flags & XFS_TRANS_RESERVE) |
- (tp->t_flags & XFS_TRANS_FREEZE_PROT);
+ (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
/* We gave our writer reference to the new transaction */
- tp->t_flags &= ~XFS_TRANS_FREEZE_PROT;
+ tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
tp->t_blk_res = tp->t_blk_res_used;
@@ -165,7 +123,7 @@ xfs_trans_dup(
* This does not do quota reservations. That typically is done by the
* caller afterwards.
*/
-int
+static int
xfs_trans_reserve(
struct xfs_trans *tp,
struct xfs_trans_res *resp,
@@ -219,7 +177,7 @@ xfs_trans_reserve(
resp->tr_logres,
resp->tr_logcount,
&tp->t_ticket, XFS_TRANSACTION,
- permanent, tp->t_type);
+ permanent);
}
if (error)
@@ -268,6 +226,42 @@ undo_blocks:
return error;
}
+int
+xfs_trans_alloc(
+ struct xfs_mount *mp,
+ struct xfs_trans_res *resp,
+ uint blocks,
+ uint rtextents,
+ uint flags,
+ struct xfs_trans **tpp)
+{
+ struct xfs_trans *tp;
+ int error;
+
+ if (!(flags & XFS_TRANS_NO_WRITECOUNT))
+ sb_start_intwrite(mp->m_super);
+
+ WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
+ atomic_inc(&mp->m_active_trans);
+
+ tp = kmem_zone_zalloc(xfs_trans_zone,
+ (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
+ tp->t_magic = XFS_TRANS_HEADER_MAGIC;
+ tp->t_flags = flags;
+ tp->t_mountp = mp;
+ INIT_LIST_HEAD(&tp->t_items);
+ INIT_LIST_HEAD(&tp->t_busy);
+
+ error = xfs_trans_reserve(tp, resp, blocks, rtextents);
+ if (error) {
+ xfs_trans_cancel(tp);
+ return error;
+ }
+
+ *tpp = tp;
+ return 0;
+}
+
/*
* Record the indicated change to the given field for application
* to the file system's superblock when the transaction commits.
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index e7c49cf43fbc..9a462e892e4f 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -90,7 +90,6 @@ void xfs_log_item_init(struct xfs_mount *mp, struct xfs_log_item *item,
*/
typedef struct xfs_trans {
unsigned int t_magic; /* magic number */
- unsigned int t_type; /* transaction type */
unsigned int t_log_res; /* amt of log space resvd */
unsigned int t_log_count; /* count for perm log res */
unsigned int t_blk_res; /* # of blocks resvd */
@@ -148,10 +147,9 @@ typedef struct xfs_trans {
/*
* XFS transaction mechanism exported interfaces.
*/
-xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint);
-xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, xfs_km_flags_t);
-int xfs_trans_reserve(struct xfs_trans *, struct xfs_trans_res *,
- uint, uint);
+int xfs_trans_alloc(struct xfs_mount *mp, struct xfs_trans_res *resp,
+ uint blocks, uint rtextents, uint flags,
+ struct xfs_trans **tpp);
void xfs_trans_mod_sb(xfs_trans_t *, uint, int64_t);
struct xfs_buf *xfs_trans_get_buf_map(struct xfs_trans *tp,
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index d111f691f313..ea62245fee26 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -74,11 +74,12 @@ xfs_forget_acl(
}
static int
-xfs_xattr_set(const struct xattr_handler *handler, struct dentry *dentry,
- const char *name, const void *value, size_t size, int flags)
+xfs_xattr_set(const struct xattr_handler *handler, struct dentry *unused,
+ struct inode *inode, const char *name, const void *value,
+ size_t size, int flags)
{
int xflags = handler->flags;
- struct xfs_inode *ip = XFS_I(d_inode(dentry));
+ struct xfs_inode *ip = XFS_I(inode);
int error;
/* Convert Linux syscall to XFS internal ATTR flags */
@@ -92,7 +93,7 @@ xfs_xattr_set(const struct xattr_handler *handler, struct dentry *dentry,
error = xfs_attr_set(ip, (unsigned char *)name,
(void *)value, size, xflags);
if (!error)
- xfs_forget_acl(d_inode(dentry), name, xflags);
+ xfs_forget_acl(inode, name, xflags);
return error;
}
@@ -146,7 +147,7 @@ __xfs_xattr_put_listent(
arraytop = context->count + prefix_len + namelen + 1;
if (arraytop > context->firstu) {
context->count = -1; /* insufficient space */
- return 1;
+ return 0;
}
offset = (char *)context->alist + context->count;
strncpy(offset, prefix, prefix_len);
@@ -166,8 +167,7 @@ xfs_xattr_put_listent(
int flags,
unsigned char *name,
int namelen,
- int valuelen,
- unsigned char *value)
+ int valuelen)
{
char *prefix;
int prefix_len;
@@ -221,11 +221,15 @@ xfs_xattr_put_listent(
}
ssize_t
-xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
+xfs_vn_listxattr(
+ struct dentry *dentry,
+ char *data,
+ size_t size)
{
struct xfs_attr_list_context context;
struct attrlist_cursor_kern cursor = { 0 };
- struct inode *inode = d_inode(dentry);
+ struct inode *inode = d_inode(dentry);
+ int error;
/*
* First read the regular on-disk attributes.
@@ -239,7 +243,9 @@ xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
context.firstu = context.bufsize;
context.put_listent = xfs_xattr_put_listent;
- xfs_attr_list_int(&context);
+ error = xfs_attr_list_int(&context);
+ if (error)
+ return error;
if (context.count < 0)
return -ERANGE;
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 5d8ffa3e6f8c..c1cde3577551 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -7,10 +7,10 @@
static __always_inline int preempt_count(void)
{
- return current_thread_info()->preempt_count;
+ return READ_ONCE(current_thread_info()->preempt_count);
}
-static __always_inline int *preempt_count_ptr(void)
+static __always_inline volatile int *preempt_count_ptr(void)
{
return &current_thread_info()->preempt_count;
}
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h
new file mode 100644
index 000000000000..e8a9dfd0e055
--- /dev/null
+++ b/include/drm/drm_dp_dual_mode_helper.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_DP_DUAL_MODE_HELPER_H
+#define DRM_DP_DUAL_MODE_HELPER_H
+
+#include <linux/types.h>
+
+/*
+ * Optional for type 1 DVI adaptors
+ * Mandatory for type 1 HDMI and type 2 adaptors
+ */
+#define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */
+#define DP_DUAL_MODE_HDMI_ID_LEN 16
+/*
+ * Optional for type 1 adaptors
+ * Mandatory for type 2 adaptors
+ */
+#define DP_DUAL_MODE_ADAPTOR_ID 0x10
+#define DP_DUAL_MODE_REV_MASK 0x07
+#define DP_DUAL_MODE_REV_TYPE2 0x00
+#define DP_DUAL_MODE_TYPE_MASK 0xf0
+#define DP_DUAL_MODE_TYPE_TYPE2 0xa0
+#define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/
+#define DP_DUAL_IEEE_OUI_LEN 3
+#define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */
+#define DP_DUAL_DEVICE_ID_LEN 6
+#define DP_DUAL_MODE_HARDWARE_REV 0x1a
+#define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b
+#define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c
+#define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d
+#define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e
+#define DP_DUAL_MODE_TMDS_OEN 0x20
+#define DP_DUAL_MODE_TMDS_DISABLE 0x01
+#define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21
+#define DP_DUAL_MODE_CEC_ENABLE 0x01
+#define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22
+
+struct i2c_adapter;
+
+ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
+ u8 offset, void *buffer, size_t size);
+ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
+ u8 offset, const void *buffer, size_t size);
+
+/**
+ * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor
+ * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor
+ * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor
+ * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor
+ */
+enum drm_dp_dual_mode_type {
+ DRM_DP_DUAL_MODE_NONE,
+ DRM_DP_DUAL_MODE_UNKNOWN,
+ DRM_DP_DUAL_MODE_TYPE1_DVI,
+ DRM_DP_DUAL_MODE_TYPE1_HDMI,
+ DRM_DP_DUAL_MODE_TYPE2_DVI,
+ DRM_DP_DUAL_MODE_TYPE2_HDMI,
+};
+
+enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
+int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter);
+int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter, bool *enabled);
+int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter, bool enable);
+const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
+
+#endif
diff --git a/include/dt-bindings/thermal/tegra124-soctherm.h b/include/dt-bindings/thermal/tegra124-soctherm.h
index 85aaf66690f9..729ab9fc325e 100644
--- a/include/dt-bindings/thermal/tegra124-soctherm.h
+++ b/include/dt-bindings/thermal/tegra124-soctherm.h
@@ -9,5 +9,6 @@
#define TEGRA124_SOCTHERM_SENSOR_MEM 1
#define TEGRA124_SOCTHERM_SENSOR_GPU 2
#define TEGRA124_SOCTHERM_SENSOR_PLLX 3
+#define TEGRA124_SOCTHERM_SENSOR_NUM 4
#endif
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b651aed9dc6b..dda39d8fa189 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -24,9 +24,6 @@
#include <linux/workqueue.h>
struct arch_timer_kvm {
- /* Is the timer enabled */
- bool enabled;
-
/* Virtual offset */
cycle_t cntvoff;
};
@@ -53,15 +50,15 @@ struct arch_timer_cpu {
/* Timer IRQ */
struct kvm_irq_level irq;
- /* VGIC mapping */
- struct irq_phys_map *map;
-
/* Active IRQ state caching */
bool active_cleared_last;
+
+ /* Is the timer enabled */
+ bool enabled;
};
int kvm_timer_hyp_init(void);
-void kvm_timer_enable(struct kvm *kvm);
+int kvm_timer_enable(struct kvm_vcpu *vcpu);
void kvm_timer_init(struct kvm *kvm);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index be6037aa703d..da0a524802cb 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -19,6 +19,10 @@
#ifndef __ASM_ARM_KVM_VGIC_H
#define __ASM_ARM_KVM_VGIC_H
+#ifdef CONFIG_KVM_NEW_VGIC
+#include <kvm/vgic/vgic.h>
+#else
+
#include <linux/kernel.h>
#include <linux/kvm.h>
#include <linux/irqreturn.h>
@@ -158,7 +162,6 @@ struct vgic_io_device {
struct irq_phys_map {
u32 virt_irq;
u32 phys_irq;
- u32 irq;
};
struct irq_phys_map_entry {
@@ -305,9 +308,6 @@ struct vgic_cpu {
unsigned long *active_shared;
unsigned long *pend_act_shared;
- /* Number of list registers on this CPU */
- int nr_lr;
-
/* CPU vif control registers for world switch */
union {
struct vgic_v2_cpu_if vgic_v2;
@@ -342,17 +342,18 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
bool level);
int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
- struct irq_phys_map *map, bool level);
+ unsigned int virt_irq, bool level);
void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
-struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
- int virt_irq, int irq);
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
#define vgic_ready(k) ((k)->arch.vgic.ready)
+#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
+ ((i) < (k)->arch.vgic.nr_irqs))
int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
const struct vgic_ops **ops,
@@ -370,4 +371,5 @@ static inline int vgic_v3_probe(const struct gic_kvm_info *gic_kvm_info,
}
#endif
+#endif /* old VGIC include */
#endif
diff --git a/include/kvm/vgic/vgic.h b/include/kvm/vgic/vgic.h
new file mode 100644
index 000000000000..3fbd175265ae
--- /dev/null
+++ b/include/kvm/vgic/vgic.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_ARM_KVM_VGIC_VGIC_H
+#define __ASM_ARM_KVM_VGIC_VGIC_H
+
+#include <linux/kernel.h>
+#include <linux/kvm.h>
+#include <linux/irqreturn.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <kvm/iodev.h>
+
+#define VGIC_V3_MAX_CPUS 255
+#define VGIC_V2_MAX_CPUS 8
+#define VGIC_NR_IRQS_LEGACY 256
+#define VGIC_NR_SGIS 16
+#define VGIC_NR_PPIS 16
+#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
+#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1)
+#define VGIC_MAX_SPI 1019
+#define VGIC_MAX_RESERVED 1023
+#define VGIC_MIN_LPI 8192
+
+enum vgic_type {
+ VGIC_V2, /* Good ol' GICv2 */
+ VGIC_V3, /* New fancy GICv3 */
+};
+
+/* same for all guests, as depending only on the _host's_ GIC model */
+struct vgic_global {
+ /* type of the host GIC */
+ enum vgic_type type;
+
+ /* Physical address of vgic virtual cpu interface */
+ phys_addr_t vcpu_base;
+
+ /* virtual control interface mapping */
+ void __iomem *vctrl_base;
+
+ /* Number of implemented list registers */
+ int nr_lr;
+
+ /* Maintenance IRQ number */
+ unsigned int maint_irq;
+
+ /* maximum number of VCPUs allowed (GICv2 limits us to 8) */
+ int max_gic_vcpus;
+
+ /* Only needed for the legacy KVM_CREATE_IRQCHIP */
+ bool can_emulate_gicv2;
+};
+
+extern struct vgic_global kvm_vgic_global_state;
+
+#define VGIC_V2_MAX_LRS (1 << 6)
+#define VGIC_V3_MAX_LRS 16
+#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
+
+enum vgic_irq_config {
+ VGIC_CONFIG_EDGE = 0,
+ VGIC_CONFIG_LEVEL
+};
+
+struct vgic_irq {
+ spinlock_t irq_lock; /* Protects the content of the struct */
+ struct list_head ap_list;
+
+ struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU
+ * SPIs and LPIs: The VCPU whose ap_list
+ * this is queued on.
+ */
+
+ struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should
+ * be sent to, as a result of the
+ * targets reg (v2) or the
+ * affinity reg (v3).
+ */
+
+ u32 intid; /* Guest visible INTID */
+ bool pending;
+ bool line_level; /* Level only */
+ bool soft_pending; /* Level only */
+ bool active; /* not used for LPIs */
+ bool enabled;
+ bool hw; /* Tied to HW IRQ */
+ u32 hwintid; /* HW INTID number */
+ union {
+ u8 targets; /* GICv2 target VCPUs mask */
+ u32 mpidr; /* GICv3 target VCPU */
+ };
+ u8 source; /* GICv2 SGIs only */
+ u8 priority;
+ enum vgic_irq_config config; /* Level or edge */
+};
+
+struct vgic_register_region;
+
+struct vgic_io_device {
+ gpa_t base_addr;
+ struct kvm_vcpu *redist_vcpu;
+ const struct vgic_register_region *regions;
+ int nr_regions;
+ struct kvm_io_device dev;
+};
+
+struct vgic_dist {
+ bool in_kernel;
+ bool ready;
+ bool initialized;
+
+ /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
+ u32 vgic_model;
+
+ int nr_spis;
+
+ /* TODO: Consider moving to global state */
+ /* Virtual control interface mapping */
+ void __iomem *vctrl_base;
+
+ /* base addresses in guest physical address space: */
+ gpa_t vgic_dist_base; /* distributor */
+ union {
+ /* either a GICv2 CPU interface */
+ gpa_t vgic_cpu_base;
+ /* or a number of GICv3 redistributor regions */
+ gpa_t vgic_redist_base;
+ };
+
+ /* distributor enabled */
+ bool enabled;
+
+ struct vgic_irq *spis;
+
+ struct vgic_io_device dist_iodev;
+ struct vgic_io_device *redist_iodevs;
+};
+
+struct vgic_v2_cpu_if {
+ u32 vgic_hcr;
+ u32 vgic_vmcr;
+ u32 vgic_misr; /* Saved only */
+ u64 vgic_eisr; /* Saved only */
+ u64 vgic_elrsr; /* Saved only */
+ u32 vgic_apr;
+ u32 vgic_lr[VGIC_V2_MAX_LRS];
+};
+
+struct vgic_v3_cpu_if {
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+ u32 vgic_hcr;
+ u32 vgic_vmcr;
+ u32 vgic_sre; /* Restored only, change ignored */
+ u32 vgic_misr; /* Saved only */
+ u32 vgic_eisr; /* Saved only */
+ u32 vgic_elrsr; /* Saved only */
+ u32 vgic_ap0r[4];
+ u32 vgic_ap1r[4];
+ u64 vgic_lr[VGIC_V3_MAX_LRS];
+#endif
+};
+
+struct vgic_cpu {
+ /* CPU vif control registers for world switch */
+ union {
+ struct vgic_v2_cpu_if vgic_v2;
+ struct vgic_v3_cpu_if vgic_v3;
+ };
+
+ unsigned int used_lrs;
+ struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
+
+ spinlock_t ap_list_lock; /* Protects the ap_list */
+
+ /*
+ * List of IRQs that this VCPU should consider because they are either
+ * Active or Pending (hence the name; AP list), or because they recently
+ * were one of the two and need to be migrated off this list to another
+ * VCPU.
+ */
+ struct list_head ap_list_head;
+
+ u64 live_lrs;
+};
+
+int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
+void kvm_vgic_early_init(struct kvm *kvm);
+int kvm_vgic_create(struct kvm *kvm, u32 type);
+void kvm_vgic_destroy(struct kvm *kvm);
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
+void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
+int kvm_vgic_map_resources(struct kvm *kvm);
+int kvm_vgic_hyp_init(void);
+
+int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+ bool level);
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+ bool level);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+
+int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+
+#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
+#define vgic_initialized(k) ((k)->arch.vgic.initialized)
+#define vgic_ready(k) ((k)->arch.vgic.ready)
+#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
+ ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))
+
+bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
+void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
+
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
+#else
+static inline void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
+{
+}
+#endif
+
+/**
+ * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
+ *
+ * The host's GIC naturally limits the maximum amount of VCPUs a guest
+ * can use.
+ */
+static inline int kvm_vgic_get_max_vcpus(void)
+{
+ return kvm_vgic_global_state.max_gic_vcpus;
+}
+
+#endif /* __ASM_ARM_KVM_VGIC_VGIC_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1fd8fdff2f81..3d9cf326574f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -768,6 +768,17 @@ static inline void rq_flush_dcache_pages(struct request *rq)
}
#endif
+#ifdef CONFIG_PRINTK
+#define vfs_msg(sb, level, fmt, ...) \
+ __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
+#else
+#define vfs_msg(sb, level, fmt, ...) \
+do { \
+ no_printk(fmt, ##__VA_ARGS__); \
+ __vfs_msg(sb, "", " "); \
+} while (0)
+#endif
+
extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern blk_qc_t generic_make_request(struct bio *bio);
@@ -1660,7 +1671,7 @@ struct block_device_operations {
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
long (*direct_access)(struct block_device *, sector_t, void __pmem **,
- pfn_t *);
+ pfn_t *, long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1680,6 +1691,8 @@ extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
+extern int bdev_dax_supported(struct super_block *, int);
+extern bool bdev_dax_capable(struct block_device *);
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h
index b827e066e55a..146507df8650 100644
--- a/include/linux/ceph/ceph_frag.h
+++ b/include/linux/ceph/ceph_frag.h
@@ -51,11 +51,11 @@ static inline __u32 ceph_frag_make_child(__u32 f, int by, int i)
return ceph_frag_make(newbits,
ceph_frag_value(f) | (i << (24 - newbits)));
}
-static inline int ceph_frag_is_leftmost(__u32 f)
+static inline bool ceph_frag_is_leftmost(__u32 f)
{
return ceph_frag_value(f) == 0;
}
-static inline int ceph_frag_is_rightmost(__u32 f)
+static inline bool ceph_frag_is_rightmost(__u32 f)
{
return ceph_frag_value(f) == ceph_frag_mask(f);
}
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 37f28bf55ce4..dfce616002ad 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -153,8 +153,9 @@ struct ceph_dir_layout {
/* watch-notify operations */
enum {
- WATCH_NOTIFY = 1, /* notifying watcher */
- WATCH_NOTIFY_COMPLETE = 2, /* notifier notified when done */
+ CEPH_WATCH_EVENT_NOTIFY = 1, /* notifying watcher */
+ CEPH_WATCH_EVENT_NOTIFY_COMPLETE = 2, /* notifier notified when done */
+ CEPH_WATCH_EVENT_DISCONNECT = 3, /* we were disconnected */
};
@@ -207,6 +208,8 @@ struct ceph_mon_subscribe_ack {
struct ceph_fsid fsid;
} __attribute__ ((packed));
+#define CEPH_FS_CLUSTER_ID_NONE -1
+
/*
* mdsmap flags
*/
@@ -344,6 +347,18 @@ extern const char *ceph_mds_op_name(int op);
#define CEPH_XATTR_REPLACE (1 << 1)
#define CEPH_XATTR_REMOVE (1 << 31)
+/*
+ * readdir request flags;
+ */
+#define CEPH_READDIR_REPLY_BITFLAGS (1<<0)
+
+/*
+ * readdir reply flags.
+ */
+#define CEPH_READDIR_FRAG_END (1<<0)
+#define CEPH_READDIR_FRAG_COMPLETE (1<<8)
+#define CEPH_READDIR_HASH_ORDER (1<<9)
+
union ceph_mds_request_args {
struct {
__le32 mask; /* CEPH_CAP_* */
@@ -361,6 +376,7 @@ union ceph_mds_request_args {
__le32 frag; /* which dir fragment */
__le32 max_entries; /* how many dentries to grab */
__le32 max_bytes;
+ __le16 flags;
} __attribute__ ((packed)) readdir;
struct {
__le32 mode;
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index a6ef9cc267ec..19e9932f3e77 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -47,7 +47,7 @@ static inline void ceph_decode_copy(void **p, void *pv, size_t n)
/*
* bounds check input.
*/
-static inline int ceph_has_room(void **p, void *end, size_t n)
+static inline bool ceph_has_room(void **p, void *end, size_t n)
{
return end >= *p && n <= end - *p;
}
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index db92a8d4926e..690985daad1c 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -180,6 +180,63 @@ static inline int calc_pages_for(u64 off, u64 len)
(off >> PAGE_SHIFT);
}
+/*
+ * These are not meant to be generic - an integer key is assumed.
+ */
+#define DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \
+static void insert_##name(struct rb_root *root, type *t) \
+{ \
+ struct rb_node **n = &root->rb_node; \
+ struct rb_node *parent = NULL; \
+ \
+ BUG_ON(!RB_EMPTY_NODE(&t->nodefld)); \
+ \
+ while (*n) { \
+ type *cur = rb_entry(*n, type, nodefld); \
+ \
+ parent = *n; \
+ if (t->keyfld < cur->keyfld) \
+ n = &(*n)->rb_left; \
+ else if (t->keyfld > cur->keyfld) \
+ n = &(*n)->rb_right; \
+ else \
+ BUG(); \
+ } \
+ \
+ rb_link_node(&t->nodefld, parent, n); \
+ rb_insert_color(&t->nodefld, root); \
+} \
+static void erase_##name(struct rb_root *root, type *t) \
+{ \
+ BUG_ON(RB_EMPTY_NODE(&t->nodefld)); \
+ rb_erase(&t->nodefld, root); \
+ RB_CLEAR_NODE(&t->nodefld); \
+}
+
+#define DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld) \
+static type *lookup_##name(struct rb_root *root, \
+ typeof(((type *)0)->keyfld) key) \
+{ \
+ struct rb_node *n = root->rb_node; \
+ \
+ while (n) { \
+ type *cur = rb_entry(n, type, nodefld); \
+ \
+ if (key < cur->keyfld) \
+ n = n->rb_left; \
+ else if (key > cur->keyfld) \
+ n = n->rb_right; \
+ else \
+ return cur; \
+ } \
+ \
+ return NULL; \
+}
+
+#define DEFINE_RB_FUNCS(name, type, keyfld, nodefld) \
+DEFINE_RB_INSDEL_FUNCS(name, type, keyfld, nodefld) \
+DEFINE_RB_LOOKUP_FUNC(name, type, keyfld, nodefld)
+
extern struct kmem_cache *ceph_inode_cachep;
extern struct kmem_cache *ceph_cap_cachep;
extern struct kmem_cache *ceph_cap_flush_cachep;
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index e230e7ed60d3..e2a92df08b47 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -39,20 +39,31 @@ struct ceph_mon_request {
ceph_monc_request_func_t do_request;
};
+typedef void (*ceph_monc_callback_t)(struct ceph_mon_generic_request *);
+
/*
* ceph_mon_generic_request is being used for the statfs and
* mon_get_version requests which are being done a bit differently
* because we need to get data back to the caller
*/
struct ceph_mon_generic_request {
+ struct ceph_mon_client *monc;
struct kref kref;
u64 tid;
struct rb_node node;
int result;
- void *buf;
+
struct completion completion;
+ ceph_monc_callback_t complete_cb;
+ u64 private_data; /* r_tid/linger_id */
+
struct ceph_msg *request; /* original request */
struct ceph_msg *reply; /* and reply */
+
+ union {
+ struct ceph_statfs *st;
+ u64 newest;
+ } u;
};
struct ceph_mon_client {
@@ -77,7 +88,6 @@ struct ceph_mon_client {
/* pending generic requests */
struct rb_root generic_request_tree;
- int num_generic_requests;
u64 last_tid;
/* subs, indexed with CEPH_SUB_* */
@@ -86,6 +96,7 @@ struct ceph_mon_client {
bool want;
u32 have; /* epoch */
} subs[3];
+ int fs_cluster_id; /* "mdsmap.<id>" sub */
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_file;
@@ -116,16 +127,18 @@ extern const char *ceph_sub_str[];
bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
bool continuous);
void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch);
+void ceph_monc_renew_subs(struct ceph_mon_client *monc);
-extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
extern int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
unsigned long timeout);
extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
struct ceph_statfs *buf);
-extern int ceph_monc_do_get_version(struct ceph_mon_client *monc,
- const char *what, u64 *newest);
+int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
+ u64 *newest);
+int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
+ ceph_monc_callback_t cb, u64 private_data);
extern int ceph_monc_open_session(struct ceph_mon_client *monc);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index cbf460927c42..19b14862d3e0 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -20,10 +20,11 @@ struct ceph_osd_client;
/*
* completion callback for async writepages
*/
-typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *,
- struct ceph_msg *);
+typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool);
+#define CEPH_HOMELESS_OSD -1
+
/* a given osd we're communicating with */
struct ceph_osd {
atomic_t o_ref;
@@ -32,16 +33,15 @@ struct ceph_osd {
int o_incarnation;
struct rb_node o_node;
struct ceph_connection o_con;
- struct list_head o_requests;
- struct list_head o_linger_requests;
+ struct rb_root o_requests;
+ struct rb_root o_linger_requests;
struct list_head o_osd_lru;
struct ceph_auth_handshake o_auth;
unsigned long lru_ttl;
- int o_marked_for_keepalive;
struct list_head o_keepalive_item;
+ struct mutex lock;
};
-
#define CEPH_OSD_SLAB_OPS 2
#define CEPH_OSD_MAX_OPS 16
@@ -104,76 +104,95 @@ struct ceph_osd_req_op {
struct ceph_osd_data response_data;
__u8 class_len;
__u8 method_len;
- __u8 argc;
+ u32 indata_len;
} cls;
struct {
u64 cookie;
- u64 ver;
- u32 prot_ver;
- u32 timeout;
- __u8 flag;
+ __u8 op; /* CEPH_OSD_WATCH_OP_ */
+ u32 gen;
} watch;
struct {
+ struct ceph_osd_data request_data;
+ } notify_ack;
+ struct {
+ u64 cookie;
+ struct ceph_osd_data request_data;
+ struct ceph_osd_data response_data;
+ } notify;
+ struct {
u64 expected_object_size;
u64 expected_write_size;
} alloc_hint;
};
};
+struct ceph_osd_request_target {
+ struct ceph_object_id base_oid;
+ struct ceph_object_locator base_oloc;
+ struct ceph_object_id target_oid;
+ struct ceph_object_locator target_oloc;
+
+ struct ceph_pg pgid;
+ u32 pg_num;
+ u32 pg_num_mask;
+ struct ceph_osds acting;
+ struct ceph_osds up;
+ int size;
+ int min_size;
+ bool sort_bitwise;
+
+ unsigned int flags; /* CEPH_OSD_FLAG_* */
+ bool paused;
+
+ int osd;
+};
+
/* an in-flight request */
struct ceph_osd_request {
u64 r_tid; /* unique for this client */
struct rb_node r_node;
- struct list_head r_req_lru_item;
- struct list_head r_osd_item;
- struct list_head r_linger_item;
- struct list_head r_linger_osd_item;
+ struct rb_node r_mc_node; /* map check */
struct ceph_osd *r_osd;
- struct ceph_pg r_pgid;
- int r_pg_osds[CEPH_PG_MAX_SIZE];
- int r_num_pg_osds;
+
+ struct ceph_osd_request_target r_t;
+#define r_base_oid r_t.base_oid
+#define r_base_oloc r_t.base_oloc
+#define r_flags r_t.flags
struct ceph_msg *r_request, *r_reply;
- int r_flags; /* any additional flags for the osd */
u32 r_sent; /* >0 if r_request is sending/sent */
/* request osd ops array */
unsigned int r_num_ops;
- /* these are updated on each send */
- __le32 *r_request_osdmap_epoch;
- __le32 *r_request_flags;
- __le64 *r_request_pool;
- void *r_request_pgid;
- __le32 *r_request_attempts;
- bool r_paused;
- struct ceph_eversion *r_request_reassert_version;
-
int r_result;
- int r_got_reply;
- int r_linger;
+ bool r_got_reply;
struct ceph_osd_client *r_osdc;
struct kref r_kref;
bool r_mempool;
- struct completion r_completion, r_safe_completion;
+ struct completion r_completion;
+ struct completion r_safe_completion; /* fsync waiter */
ceph_osdc_callback_t r_callback;
ceph_osdc_unsafe_callback_t r_unsafe_callback;
- struct ceph_eversion r_reassert_version;
struct list_head r_unsafe_item;
struct inode *r_inode; /* for use by callbacks */
void *r_priv; /* ditto */
- struct ceph_object_locator r_base_oloc;
- struct ceph_object_id r_base_oid;
- struct ceph_object_locator r_target_oloc;
- struct ceph_object_id r_target_oid;
-
- u64 r_snapid;
- unsigned long r_stamp; /* send OR check time */
+ /* set by submitter */
+ u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */
+ struct ceph_snap_context *r_snapc; /* for writes */
+ struct timespec r_mtime; /* ditto */
+ u64 r_data_offset; /* ditto */
+ bool r_linger; /* don't resend on failure */
- struct ceph_snap_context *r_snapc; /* snap context for writes */
+ /* internal */
+ unsigned long r_stamp; /* jiffies, send or check time */
+ int r_attempts;
+ struct ceph_eversion r_replay_version; /* aka reassert_version */
+ u32 r_last_force_resend;
+ u32 r_map_dne_bound;
struct ceph_osd_req_op r_ops[];
};
@@ -182,44 +201,70 @@ struct ceph_request_redirect {
struct ceph_object_locator oloc;
};
-struct ceph_osd_event {
- u64 cookie;
- int one_shot;
+typedef void (*rados_watchcb2_t)(void *arg, u64 notify_id, u64 cookie,
+ u64 notifier_id, void *data, size_t data_len);
+typedef void (*rados_watcherrcb_t)(void *arg, u64 cookie, int err);
+
+struct ceph_osd_linger_request {
struct ceph_osd_client *osdc;
- void (*cb)(u64, u64, u8, void *);
- void *data;
- struct rb_node node;
- struct list_head osd_node;
+ u64 linger_id;
+ bool committed;
+ bool is_watch; /* watch or notify */
+
+ struct ceph_osd *osd;
+ struct ceph_osd_request *reg_req;
+ struct ceph_osd_request *ping_req;
+ unsigned long ping_sent;
+ unsigned long watch_valid_thru;
+ struct list_head pending_lworks;
+
+ struct ceph_osd_request_target t;
+ u32 last_force_resend;
+ u32 map_dne_bound;
+
+ struct timespec mtime;
+
struct kref kref;
-};
+ struct mutex lock;
+ struct rb_node node; /* osd */
+ struct rb_node osdc_node; /* osdc */
+ struct rb_node mc_node; /* map check */
+ struct list_head scan_item;
+
+ struct completion reg_commit_wait;
+ struct completion notify_finish_wait;
+ int reg_commit_error;
+ int notify_finish_error;
+ int last_error;
+
+ u32 register_gen;
+ u64 notify_id;
+
+ rados_watchcb2_t wcb;
+ rados_watcherrcb_t errcb;
+ void *data;
-struct ceph_osd_event_work {
- struct work_struct work;
- struct ceph_osd_event *event;
- u64 ver;
- u64 notify_id;
- u8 opcode;
+ struct page ***preply_pages;
+ size_t *preply_len;
};
struct ceph_osd_client {
struct ceph_client *client;
struct ceph_osdmap *osdmap; /* current map */
- struct rw_semaphore map_sem;
- struct completion map_waiters;
- u64 last_requested_map;
+ struct rw_semaphore lock;
- struct mutex request_mutex;
struct rb_root osds; /* osds */
struct list_head osd_lru; /* idle osds */
- u64 timeout_tid; /* tid of timeout triggering rq */
- u64 last_tid; /* tid of last request */
- struct rb_root requests; /* pending requests */
- struct list_head req_lru; /* in-flight lru */
- struct list_head req_unsent; /* unsent/need-resend queue */
- struct list_head req_notarget; /* map to no osd */
- struct list_head req_linger; /* lingering requests */
- int num_requests;
+ spinlock_t osd_lru_lock;
+ struct ceph_osd homeless_osd;
+ atomic64_t last_tid; /* tid of last request */
+ u64 last_linger_id;
+ struct rb_root linger_requests; /* lingering requests */
+ struct rb_root map_checks;
+ struct rb_root linger_map_checks;
+ atomic_t num_requests;
+ atomic_t num_homeless;
struct delayed_work timeout_work;
struct delayed_work osds_timeout_work;
#ifdef CONFIG_DEBUG_FS
@@ -231,10 +276,6 @@ struct ceph_osd_client {
struct ceph_msgpool msgpool_op;
struct ceph_msgpool msgpool_op_reply;
- spinlock_t event_lock;
- struct rb_root event_tree;
- u64 event_count;
-
struct workqueue_struct *notify_wq;
};
@@ -271,9 +312,6 @@ extern void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
extern struct ceph_osd_data *osd_req_op_extent_osd_data(
struct ceph_osd_request *osd_req,
unsigned int which);
-extern struct ceph_osd_data *osd_req_op_cls_response_data(
- struct ceph_osd_request *osd_req,
- unsigned int which);
extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
unsigned int which,
@@ -309,9 +347,6 @@ extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
u16 opcode, const char *name, const void *value,
size_t size, u8 cmp_op, u8 cmp_mode);
-extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
- unsigned int which, u16 opcode,
- u64 cookie, u64 version, int flag);
extern void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
unsigned int which,
u64 expected_object_size,
@@ -322,11 +357,7 @@ extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *
unsigned int num_ops,
bool use_mempool,
gfp_t gfp_flags);
-
-extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
- struct ceph_snap_context *snapc,
- u64 snap_id,
- struct timespec *mtime);
+int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp);
extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
struct ceph_file_layout *layout,
@@ -338,9 +369,6 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
-extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req);
-
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
@@ -353,6 +381,7 @@ extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
extern void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc);
+void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc);
extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
struct ceph_vino vino,
@@ -371,11 +400,33 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
struct timespec *mtime,
struct page **pages, int nr_pages);
-/* watch/notify events */
-extern int ceph_osdc_create_event(struct ceph_osd_client *osdc,
- void (*event_cb)(u64, u64, u8, void *),
- void *data, struct ceph_osd_event **pevent);
-extern void ceph_osdc_cancel_event(struct ceph_osd_event *event);
-extern void ceph_osdc_put_event(struct ceph_osd_event *event);
+/* watch/notify */
+struct ceph_osd_linger_request *
+ceph_osdc_watch(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ rados_watchcb2_t wcb,
+ rados_watcherrcb_t errcb,
+ void *data);
+int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
+ struct ceph_osd_linger_request *lreq);
+
+int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ u64 notify_id,
+ u64 cookie,
+ void *payload,
+ size_t payload_len);
+int ceph_osdc_notify(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ void *payload,
+ size_t payload_len,
+ u32 timeout,
+ struct page ***preply_pages,
+ size_t *preply_len);
+int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
+ struct ceph_osd_linger_request *lreq);
#endif
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index e55c08bc3a96..ddc426b22d81 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -24,21 +24,29 @@ struct ceph_pg {
uint32_t seed;
};
-#define CEPH_POOL_FLAG_HASHPSPOOL 1
+int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs);
+
+#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
+ together */
+#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
struct ceph_pg_pool_info {
struct rb_node node;
s64 id;
- u8 type;
+ u8 type; /* CEPH_POOL_TYPE_* */
u8 size;
+ u8 min_size;
u8 crush_ruleset;
u8 object_hash;
+ u32 last_force_request_resend;
u32 pg_num, pgp_num;
int pg_num_mask, pgp_num_mask;
s64 read_tier;
s64 write_tier; /* wins for read+write ops */
- u64 flags;
+ u64 flags; /* CEPH_POOL_FLAG_* */
char *name;
+
+ bool was_full; /* for handle_one_map() */
};
static inline bool ceph_can_shift_osds(struct ceph_pg_pool_info *pool)
@@ -57,6 +65,22 @@ struct ceph_object_locator {
s64 pool;
};
+static inline void ceph_oloc_init(struct ceph_object_locator *oloc)
+{
+ oloc->pool = -1;
+}
+
+static inline bool ceph_oloc_empty(const struct ceph_object_locator *oloc)
+{
+ return oloc->pool == -1;
+}
+
+static inline void ceph_oloc_copy(struct ceph_object_locator *dest,
+ const struct ceph_object_locator *src)
+{
+ dest->pool = src->pool;
+}
+
/*
* Maximum supported by kernel client object name length
*
@@ -64,11 +88,47 @@ struct ceph_object_locator {
*/
#define CEPH_MAX_OID_NAME_LEN 100
+/*
+ * 51-char inline_name is long enough for all cephfs and all but one
+ * rbd requests: <imgname> in "<imgname>.rbd"/"rbd_id.<imgname>" can be
+ * arbitrarily long (~PAGE_SIZE). It's done once during rbd map; all
+ * other rbd requests fit into inline_name.
+ *
+ * Makes ceph_object_id 64 bytes on 64-bit.
+ */
+#define CEPH_OID_INLINE_LEN 52
+
+/*
+ * Both inline and external buffers have space for a NUL-terminator,
+ * which is carried around. It's not required though - RADOS object
+ * names don't have to be NUL-terminated and may contain NULs.
+ */
struct ceph_object_id {
- char name[CEPH_MAX_OID_NAME_LEN];
+ char *name;
+ char inline_name[CEPH_OID_INLINE_LEN];
int name_len;
};
+static inline void ceph_oid_init(struct ceph_object_id *oid)
+{
+ oid->name = oid->inline_name;
+ oid->name_len = 0;
+}
+
+static inline bool ceph_oid_empty(const struct ceph_object_id *oid)
+{
+ return oid->name == oid->inline_name && !oid->name_len;
+}
+
+void ceph_oid_copy(struct ceph_object_id *dest,
+ const struct ceph_object_id *src);
+__printf(2, 3)
+void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...);
+__printf(3, 4)
+int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
+ const char *fmt, ...);
+void ceph_oid_destroy(struct ceph_object_id *oid);
+
struct ceph_pg_mapping {
struct rb_node node;
struct ceph_pg pgid;
@@ -87,7 +147,6 @@ struct ceph_pg_mapping {
struct ceph_osdmap {
struct ceph_fsid fsid;
u32 epoch;
- u32 mkfs_epoch;
struct ceph_timespec created, modified;
u32 flags; /* CEPH_OSDMAP_* */
@@ -113,43 +172,19 @@ struct ceph_osdmap {
int crush_scratch_ary[CEPH_PG_MAX_SIZE * 3];
};
-static inline void ceph_oid_set_name(struct ceph_object_id *oid,
- const char *name)
-{
- int len;
-
- len = strlen(name);
- if (len > sizeof(oid->name)) {
- WARN(1, "ceph_oid_set_name '%s' len %d vs %zu, truncating\n",
- name, len, sizeof(oid->name));
- len = sizeof(oid->name);
- }
-
- memcpy(oid->name, name, len);
- oid->name_len = len;
-}
-
-static inline void ceph_oid_copy(struct ceph_object_id *dest,
- struct ceph_object_id *src)
-{
- BUG_ON(src->name_len > sizeof(dest->name));
- memcpy(dest->name, src->name, src->name_len);
- dest->name_len = src->name_len;
-}
-
-static inline int ceph_osd_exists(struct ceph_osdmap *map, int osd)
+static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd)
{
return osd >= 0 && osd < map->max_osd &&
(map->osd_state[osd] & CEPH_OSD_EXISTS);
}
-static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
+static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd)
{
return ceph_osd_exists(map, osd) &&
(map->osd_state[osd] & CEPH_OSD_UP);
}
-static inline int ceph_osd_is_down(struct ceph_osdmap *map, int osd)
+static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd)
{
return !ceph_osd_is_up(map, osd);
}
@@ -192,28 +227,59 @@ static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid)
return 0;
}
+struct ceph_osdmap *ceph_osdmap_alloc(void);
extern struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end);
-extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
- struct ceph_osdmap *map,
- struct ceph_messenger *msgr);
+struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+ struct ceph_osdmap *map);
extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
+struct ceph_osds {
+ int osds[CEPH_PG_MAX_SIZE];
+ int size;
+ int primary; /* id, NOT index */
+};
+
+static inline void ceph_osds_init(struct ceph_osds *set)
+{
+ set->size = 0;
+ set->primary = -1;
+}
+
+void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src);
+
+bool ceph_is_new_interval(const struct ceph_osds *old_acting,
+ const struct ceph_osds *new_acting,
+ const struct ceph_osds *old_up,
+ const struct ceph_osds *new_up,
+ int old_size,
+ int new_size,
+ int old_min_size,
+ int new_min_size,
+ u32 old_pg_num,
+ u32 new_pg_num,
+ bool old_sort_bitwise,
+ bool new_sort_bitwise,
+ const struct ceph_pg *pgid);
+bool ceph_osds_changed(const struct ceph_osds *old_acting,
+ const struct ceph_osds *new_acting,
+ bool any_change);
+
/* calculate mapping of a file extent to an object */
extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
u64 off, u64 len,
u64 *bno, u64 *oxoff, u64 *oxlen);
-/* calculate mapping of object to a placement group */
-extern int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap,
- struct ceph_object_locator *oloc,
- struct ceph_object_id *oid,
- struct ceph_pg *pg_out);
-
-extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap,
- struct ceph_pg pgid,
- int *osds, int *primary);
-extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
- struct ceph_pg pgid);
+int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ struct ceph_pg *raw_pgid);
+
+void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
+ const struct ceph_pg *raw_pgid,
+ struct ceph_osds *up,
+ struct ceph_osds *acting);
+int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
+ const struct ceph_pg *raw_pgid);
extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
u64 id);
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 2f822dca1046..5c0da61cb763 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -114,8 +114,8 @@ struct ceph_object_layout {
* compound epoch+version, used by storage layer to serialize mutations
*/
struct ceph_eversion {
- __le32 epoch;
__le64 version;
+ __le32 epoch;
} __attribute__ ((packed));
/*
@@ -153,6 +153,11 @@ extern const char *ceph_osd_state_name(int s);
#define CEPH_OSDMAP_NOIN (1<<8) /* block osd auto mark-in */
#define CEPH_OSDMAP_NOBACKFILL (1<<9) /* block osd backfill */
#define CEPH_OSDMAP_NORECOVER (1<<10) /* block osd recovery and backfill */
+#define CEPH_OSDMAP_NOSCRUB (1<<11) /* block periodic scrub */
+#define CEPH_OSDMAP_NODEEP_SCRUB (1<<12) /* block periodic deep-scrub */
+#define CEPH_OSDMAP_NOTIERAGENT (1<<13) /* disable tiering agent */
+#define CEPH_OSDMAP_NOREBALANCE (1<<14) /* block osd backfill unless pg is degraded */
+#define CEPH_OSDMAP_SORTBITWISE (1<<15) /* use bitwise hobject_t sort */
/*
* The error code to return when an OSD can't handle a write
@@ -389,6 +394,13 @@ enum {
CEPH_OSD_FLAG_SKIPRWLOCKS = 0x10000, /* skip rw locks */
CEPH_OSD_FLAG_IGNORE_OVERLAY = 0x20000, /* ignore pool overlay */
CEPH_OSD_FLAG_FLUSH = 0x40000, /* this is part of flush */
+ CEPH_OSD_FLAG_MAP_SNAP_CLONE = 0x80000, /* map snap direct to clone id */
+ CEPH_OSD_FLAG_ENFORCE_SNAPC = 0x100000, /* use snapc provided even if
+ pool uses pool snaps */
+ CEPH_OSD_FLAG_REDIRECTED = 0x200000, /* op has been redirected */
+ CEPH_OSD_FLAG_KNOWN_REDIR = 0x400000, /* redirect bit is authoritative */
+ CEPH_OSD_FLAG_FULL_TRY = 0x800000, /* try op despite full flag */
+ CEPH_OSD_FLAG_FULL_FORCE = 0x1000000, /* force op despite full flag */
};
enum {
@@ -415,7 +427,17 @@ enum {
CEPH_OSD_CMPXATTR_MODE_U64 = 2
};
-#define RADOS_NOTIFY_VER 1
+enum {
+ CEPH_OSD_WATCH_OP_UNWATCH = 0,
+ CEPH_OSD_WATCH_OP_LEGACY_WATCH = 1,
+ /* note: use only ODD ids to prevent pre-giant code from
+ interpreting the op as UNWATCH */
+ CEPH_OSD_WATCH_OP_WATCH = 3,
+ CEPH_OSD_WATCH_OP_RECONNECT = 5,
+ CEPH_OSD_WATCH_OP_PING = 7,
+};
+
+const char *ceph_osd_watch_op_name(int o);
/*
* an individual object operation. each may be accompanied by some data
@@ -450,10 +472,14 @@ struct ceph_osd_op {
} __attribute__ ((packed)) snap;
struct {
__le64 cookie;
- __le64 ver;
- __u8 flag; /* 0 = unwatch, 1 = watch */
+ __le64 ver; /* no longer used */
+ __u8 op; /* CEPH_OSD_WATCH_OP_* */
+ __le32 gen; /* registration generation */
} __attribute__ ((packed)) watch;
struct {
+ __le64 cookie;
+ } __attribute__ ((packed)) notify;
+ struct {
__le64 offset, length;
__le64 src_offset;
} __attribute__ ((packed)) clonerange;
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 982a6c4a62f3..43d5f0b799c7 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -3,45 +3,62 @@
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/radix-tree.h>
#include <asm/pgtable.h>
+/* We use lowest available exceptional entry bit for locking */
+#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
+
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
get_block_t, dio_iodone_t, int flags);
-int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
-int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
- dax_iodone_t);
-int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
- dax_iodone_t);
+int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
+int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
+int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+void dax_wake_mapping_entry_waiter(struct address_space *mapping,
+ pgoff_t index, bool wake_all);
#ifdef CONFIG_FS_DAX
struct page *read_dax_sector(struct block_device *bdev, sector_t n);
+void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
+int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
+ unsigned int offset, unsigned int length);
#else
static inline struct page *read_dax_sector(struct block_device *bdev,
sector_t n)
{
return ERR_PTR(-ENXIO);
}
+/* Shouldn't ever be called when dax is disabled. */
+static inline void dax_unlock_mapping_entry(struct address_space *mapping,
+ pgoff_t index)
+{
+ BUG();
+}
+static inline int __dax_zero_page_range(struct block_device *bdev,
+ sector_t sector, unsigned int offset, unsigned int length)
+{
+ return -ENXIO;
+}
#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
- unsigned int flags, get_block_t, dax_iodone_t);
+ unsigned int flags, get_block_t);
int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
- unsigned int flags, get_block_t, dax_iodone_t);
+ unsigned int flags, get_block_t);
#else
static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, unsigned int flags, get_block_t gb,
- dax_iodone_t di)
+ pmd_t *pmd, unsigned int flags, get_block_t gb)
{
return VM_FAULT_FALLBACK;
}
#define __dax_pmd_fault dax_pmd_fault
#endif
int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
-#define dax_mkwrite(vma, vmf, gb, iod) dax_fault(vma, vmf, gb, iod)
-#define __dax_mkwrite(vma, vmf, gb, iod) __dax_fault(vma, vmf, gb, iod)
+#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
+#define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb)
static inline bool vma_is_dax(struct vm_area_struct *vma)
{
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index f8506e8dd4d4..484c8792da82 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -10,6 +10,7 @@
#include <linux/cache.h>
#include <linux/rcupdate.h>
#include <linux/lockref.h>
+#include <linux/stringhash.h>
struct path;
struct vfsmount;
@@ -52,9 +53,6 @@ struct qstr {
};
#define QSTR_INIT(n,l) { { { .len = l } }, .name = n }
-#define hashlen_hash(hashlen) ((u32) (hashlen))
-#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
-#define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash))
struct dentry_stat_t {
long nr_dentry;
@@ -65,29 +63,6 @@ struct dentry_stat_t {
};
extern struct dentry_stat_t dentry_stat;
-/* Name hashing routines. Initial hash value */
-/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
-#define init_name_hash() 0
-
-/* partial hash update function. Assume roughly 4 bits per character */
-static inline unsigned long
-partial_name_hash(unsigned long c, unsigned long prevhash)
-{
- return (prevhash + (c << 4) + (c >> 4)) * 11;
-}
-
-/*
- * Finally: cut down the number of bits to a int value (and try to avoid
- * losing bits)
- */
-static inline unsigned long end_name_hash(unsigned long hash)
-{
- return (unsigned int) hash;
-}
-
-/* Compute the hash for a name string. */
-extern unsigned int full_name_hash(const unsigned char *, unsigned int);
-
/*
* Try to keep struct dentry aligned on 64 byte cachelines (this will
* give reasonable cacheline footprint with larger lines without the
diff --git a/include/linux/err.h b/include/linux/err.h
index 56762ab41713..1e3558845e4c 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -18,7 +18,7 @@
#ifndef __ASSEMBLY__
-#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
+#define IS_ERR_VALUE(x) unlikely((unsigned long)(void *)(x) >= (unsigned long)-MAX_ERRNO)
static inline void * __must_check ERR_PTR(long error)
{
diff --git a/include/linux/errno.h b/include/linux/errno.h
index 89627b9187f9..7ce9fb1b7d28 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -28,5 +28,6 @@
#define EBADTYPE 527 /* Type not supported by server */
#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */
#define EIOCBQUEUED 529 /* iocb queued, will get completion event */
+#define ERECALLCONFLICT 530 /* conflict with recalled state */
#endif
diff --git a/include/linux/export.h b/include/linux/export.h
index 96e45ea463e7..2f9ccbe6a639 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -38,7 +38,7 @@ extern struct module __this_module;
#ifdef CONFIG_MODULES
-#ifndef __GENKSYMS__
+#if defined(__KERNEL__) && !defined(__GENKSYMS__)
#ifdef CONFIG_MODVERSIONS
/* Mark the CRC weak since genksyms apparently decides not to
* generate a checksums for some symbols */
@@ -53,7 +53,7 @@ extern struct module __this_module;
#endif
/* For every exported symbol, place a struct in the __ksymtab section */
-#define __EXPORT_SYMBOL(sym, sec) \
+#define ___EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
@@ -65,6 +65,35 @@ extern struct module __this_module;
__attribute__((section("___ksymtab" sec "+" #sym), unused)) \
= { (unsigned long)&sym, __kstrtab_##sym }
+#if defined(__KSYM_DEPS__)
+
+/*
+ * For fine grained build dependencies, we want to tell the build system
+ * about each possible exported symbol even if they're not actually exported.
+ * We use a string pattern that is unlikely to be valid code that the build
+ * system filters out from the preprocessor output (see ksym_dep_filter
+ * in scripts/Kbuild.include).
+ */
+#define __EXPORT_SYMBOL(sym, sec) === __KSYM_##sym ===
+
+#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+
+#include <linux/kconfig.h>
+#include <generated/autoksyms.h>
+
+#define __EXPORT_SYMBOL(sym, sec) \
+ __cond_export_sym(sym, sec, config_enabled(__KSYM_##sym))
+#define __cond_export_sym(sym, sec, conf) \
+ ___cond_export_sym(sym, sec, conf)
+#define ___cond_export_sym(sym, sec, enabled) \
+ __cond_export_sym_##enabled(sym, sec)
+#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec)
+#define __cond_export_sym_0(sym, sec) /* nothing */
+
+#else
+#define __EXPORT_SYMBOL ___EXPORT_SYMBOL
+#endif
+
#define EXPORT_SYMBOL(sym) \
__EXPORT_SYMBOL(sym, "")
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5f61431d8673..dd288148a6b1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -74,7 +74,6 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
-typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
#define MAY_EXEC 0x00000001
#define MAY_WRITE 0x00000002
@@ -1730,7 +1729,8 @@ struct inode_operations {
struct inode *, struct dentry *, unsigned int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
- int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
+ int (*setxattr) (struct dentry *, struct inode *,
+ const char *, const void *, size_t, int);
ssize_t (*getxattr) (struct dentry *, struct inode *,
const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
diff --git a/include/linux/hash.h b/include/linux/hash.h
index 79c52fa81cac..ad6fa21d977b 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -3,92 +3,94 @@
/* Fast hashing routine for ints, longs and pointers.
(C) 2002 Nadia Yvette Chambers, IBM */
-/*
- * Knuth recommends primes in approximately golden ratio to the maximum
- * integer representable by a machine word for multiplicative hashing.
- * Chuck Lever verified the effectiveness of this technique:
- * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
- *
- * These primes are chosen to be bit-sparse, that is operations on
- * them can use shifts and additions instead of multiplications for
- * machines where multiplications are slow.
- */
-
#include <asm/types.h>
#include <linux/compiler.h>
-/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
-#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
-/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
-#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
-
+/*
+ * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
+ * fs/inode.c. It's not actually prime any more (the previous primes
+ * were actively bad for hashing), but the name remains.
+ */
#if BITS_PER_LONG == 32
-#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
#define hash_long(val, bits) hash_32(val, bits)
#elif BITS_PER_LONG == 64
#define hash_long(val, bits) hash_64(val, bits)
-#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
#else
#error Wordsize not 32 or 64
#endif
/*
- * The above primes are actively bad for hashing, since they are
- * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
- * real problems. Besides, the "prime" part is pointless for the
- * multiplicative hash.
+ * This hash multiplies the input by a large odd number and takes the
+ * high bits. Since multiplication propagates changes to the most
+ * significant end only, it is essential that the high bits of the
+ * product be used for the hash value.
+ *
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
*
* Although a random odd number will do, it turns out that the golden
* ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
- * properties.
+ * properties. (See Knuth vol 3, section 6.4, exercise 9.)
*
- * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
- * (See Knuth vol 3, section 6.4, exercise 9.)
+ * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
+ * which is very slightly easier to multiply by and makes no
+ * difference to the hash distribution.
*/
#define GOLDEN_RATIO_32 0x61C88647
#define GOLDEN_RATIO_64 0x61C8864680B583EBull
-static __always_inline u64 hash_64(u64 val, unsigned int bits)
-{
- u64 hash = val;
+#ifdef CONFIG_HAVE_ARCH_HASH
+/* This header may use the GOLDEN_RATIO_xx constants */
+#include <asm/hash.h>
+#endif
-#if BITS_PER_LONG == 64
- hash = hash * GOLDEN_RATIO_64;
-#else
- /* Sigh, gcc can't optimise this alone like it does for 32 bits. */
- u64 n = hash;
- n <<= 18;
- hash -= n;
- n <<= 33;
- hash -= n;
- n <<= 3;
- hash += n;
- n <<= 3;
- hash -= n;
- n <<= 4;
- hash += n;
- n <<= 2;
- hash += n;
+/*
+ * The _generic versions exist only so lib/test_hash.c can compare
+ * the arch-optimized versions with the generic.
+ *
+ * Note that if you change these, any <asm/hash.h> that aren't updated
+ * to match need to have their HAVE_ARCH_* define values updated so the
+ * self-test will not false-positive.
+ */
+#ifndef HAVE_ARCH__HASH_32
+#define __hash_32 __hash_32_generic
#endif
+static inline u32 __hash_32_generic(u32 val)
+{
+ return val * GOLDEN_RATIO_32;
+}
+#ifndef HAVE_ARCH_HASH_32
+#define hash_32 hash_32_generic
+#endif
+static inline u32 hash_32_generic(u32 val, unsigned int bits)
+{
/* High bits are more random, so use them. */
- return hash >> (64 - bits);
+ return __hash_32(val) >> (32 - bits);
}
-static inline u32 hash_32(u32 val, unsigned int bits)
+#ifndef HAVE_ARCH_HASH_64
+#define hash_64 hash_64_generic
+#endif
+static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
{
- /* On some cpus multiply is faster, on others gcc will do shifts */
- u32 hash = val * GOLDEN_RATIO_PRIME_32;
-
- /* High bits are more random, so use them. */
- return hash >> (32 - bits);
+#if BITS_PER_LONG == 64
+ /* 64x64-bit multiply is efficient on all 64-bit processors */
+ return val * GOLDEN_RATIO_64 >> (64 - bits);
+#else
+ /* Hash 64 bits using only 32x32-bit multiply. */
+ return hash_32((u32)val ^ __hash_32(val >> 32), bits);
+#endif
}
-static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
+static inline u32 hash_ptr(const void *ptr, unsigned int bits)
{
return hash_long((unsigned long)ptr, bits);
}
+/* This really should be called fold32_ptr; it does no hashing to speak of. */
static inline u32 hash32_ptr(const void *ptr)
{
unsigned long val = (unsigned long)ptr;
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 92f7177db2ce..f27bb2c62fca 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -19,8 +19,21 @@
/* iova structure */
struct iova {
struct rb_node node;
- unsigned long pfn_hi; /* IOMMU dish out addr hi */
- unsigned long pfn_lo; /* IOMMU dish out addr lo */
+ unsigned long pfn_hi; /* Highest allocated pfn */
+ unsigned long pfn_lo; /* Lowest allocated pfn */
+};
+
+struct iova_magazine;
+struct iova_cpu_rcache;
+
+#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
+#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
+
+struct iova_rcache {
+ spinlock_t lock;
+ unsigned long depot_size;
+ struct iova_magazine *depot[MAX_GLOBAL_MAGS];
+ struct iova_cpu_rcache __percpu *cpu_rcaches;
};
/* holds all the iova translations for a domain */
@@ -31,6 +44,7 @@ struct iova_domain {
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
+ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
};
static inline unsigned long iova_size(struct iova *iova)
@@ -78,6 +92,10 @@ void __free_iova(struct iova_domain *iovad, struct iova *iova);
struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn,
bool size_aligned);
+void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
+ unsigned long size);
+unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
@@ -87,5 +105,6 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
+void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
#endif
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 9e6fdd33bdb2..bfbd707de390 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -273,6 +273,12 @@
#define ICH_LR_ACTIVE_BIT (1ULL << 63)
#define ICH_LR_PHYS_ID_SHIFT 32
#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
+#define ICH_LR_PRIORITY_SHIFT 48
+
+/* These are for GICv2 emulation only */
+#define GICH_LR_VIRTUALID (0x3ffUL << 0)
+#define GICH_LR_PHYSID_CPUID_SHIFT (10)
+#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
#define ICH_MISR_EOI (1 << 0)
#define ICH_MISR_U (1 << 1)
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 9c940263ca23..fd051855539b 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -33,6 +33,7 @@
#define GIC_DIST_CTRL 0x000
#define GIC_DIST_CTR 0x004
+#define GIC_DIST_IIDR 0x008
#define GIC_DIST_IGROUP 0x080
#define GIC_DIST_ENABLE_SET 0x100
#define GIC_DIST_ENABLE_CLEAR 0x180
@@ -76,6 +77,7 @@
#define GICH_LR_VIRTUALID (0x3ff << 0)
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
#define GICH_LR_PHYSID_CPUID (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
+#define GICH_LR_PRIORITY_SHIFT 23
#define GICH_LR_STATE (3 << 28)
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index b1fa8f11c95b..1c9c973a7dd9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -412,6 +412,8 @@ struct kvm {
#endif
long tlbs_dirty;
struct list_head devices;
+ struct dentry *debugfs_dentry;
+ struct kvm_stat_data **debugfs_stat_data;
};
#define kvm_err(fmt, ...) \
@@ -991,6 +993,11 @@ enum kvm_stat_kind {
KVM_STAT_VCPU,
};
+struct kvm_stat_data {
+ int offset;
+ struct kvm *kvm;
+};
+
struct kvm_stats_debugfs_item {
const char *name;
int offset;
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 20d8a5d4d133..5145620ba48a 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -182,7 +182,7 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
#ifdef CONFIG_HAVE_BOOTMEM_INFO_NODE
-extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
+extern void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
#else
static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
{
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index a677c2bd485c..64184d27e3cd 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -50,9 +50,11 @@ enum {
EC_MSG_TX_TRAILER_BYTES,
EC_MSG_RX_PROTO_BYTES = 3,
- /* Max length of messages */
- EC_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
+ /* Max length of messages for proto 2*/
+ EC_PROTO2_MSG_BYTES = EC_PROTO2_MAX_PARAM_SIZE +
EC_MSG_TX_PROTO_BYTES,
+
+ EC_MAX_MSG_BYTES = 64 * 1024,
};
/*
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index 8f9fc3d26e6d..8e95cd87cd74 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -134,6 +134,7 @@
#define TWL6040_HFDACENA (1 << 0)
#define TWL6040_HFPGAENA (1 << 1)
#define TWL6040_HFDRVENA (1 << 4)
+#define TWL6040_HFSWENA (1 << 6)
/* VIBCTLL/R (0x18/0x1A) fields */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2835d598d258..5df5feb49575 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -303,6 +303,12 @@ struct vm_fault {
* is set (which is also implied by
* VM_FAULT_ERROR).
*/
+ void *entry; /* ->fault handler can alternatively
+ * return locked DAX entry. In that
+ * case handler should return
+ * VM_FAULT_DAX_LOCKED and fill in
+ * entry here.
+ */
/* for ->map_pages() only */
pgoff_t max_pgoff; /* map pages for offset from pgoff till
* max_pgoff inclusive */
@@ -1076,6 +1082,7 @@ static inline void clear_page_pfmemalloc(struct page *page)
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
+#define VM_FAULT_DAX_LOCKED 0x1000 /* ->fault has locked DAX entry */
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
@@ -2011,7 +2018,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len) {}
#endif
/* These take the mm semaphore themselves */
-extern unsigned long __must_check vm_brk(unsigned long, unsigned long);
+extern int __must_check vm_brk(unsigned long, unsigned long);
extern int vm_munmap(unsigned long, size_t);
extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
unsigned long, unsigned long,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d553855503e6..ca3e517980a0 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -514,7 +514,9 @@ struct mm_struct {
#ifdef CONFIG_HUGETLB_PAGE
atomic_long_t hugetlb_usage;
#endif
+#ifdef CONFIG_MMU
struct work_struct async_put_work;
+#endif
};
static inline void mm_init_cpumask(struct mm_struct *mm)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 85800b48241f..45cde8cd39f2 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -329,6 +329,7 @@ struct mmc_host {
unsigned int can_retune:1; /* re-tuning can be used */
unsigned int doing_retune:1; /* re-tuning in progress */
unsigned int retune_now:1; /* do re-tuning at next req */
+ unsigned int retune_paused:1; /* re-tuning is temporarily disabled */
int rescan_disable; /* disable card detection */
int rescan_entered; /* used with nonremovable devices */
@@ -526,4 +527,7 @@ static inline void mmc_retune_recheck(struct mmc_host *host)
host->retune_now = 1;
}
+void mmc_retune_pause(struct mmc_host *host);
+void mmc_retune_unpause(struct mmc_host *host);
+
#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 011433478a14..bfed6b367350 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -50,12 +50,27 @@ struct nfs4_label {
typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier;
-struct nfs_stateid4 {
- __be32 seqid;
- char other[NFS4_STATEID_OTHER_SIZE];
-} __attribute__ ((packed));
+struct nfs4_stateid_struct {
+ union {
+ char data[NFS4_STATEID_SIZE];
+ struct {
+ __be32 seqid;
+ char other[NFS4_STATEID_OTHER_SIZE];
+ } __attribute__ ((packed));
+ };
+
+ enum {
+ NFS4_INVALID_STATEID_TYPE = 0,
+ NFS4_SPECIAL_STATEID_TYPE,
+ NFS4_OPEN_STATEID_TYPE,
+ NFS4_LOCK_STATEID_TYPE,
+ NFS4_DELEGATION_STATEID_TYPE,
+ NFS4_LAYOUT_STATEID_TYPE,
+ NFS4_PNFS_DS_STATEID_TYPE,
+ } type;
+};
-typedef struct nfs_stateid4 nfs4_stateid;
+typedef struct nfs4_stateid_struct nfs4_stateid;
enum nfs_opnum4 {
OP_ACCESS = 3,
@@ -504,6 +519,7 @@ enum {
NFSPROC4_CLNT_DEALLOCATE,
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
+ NFSPROC4_CLNT_COPY,
};
/* nfs41 types */
@@ -621,7 +637,9 @@ enum pnfs_update_layout_reason {
PNFS_UPDATE_LAYOUT_IO_TEST_FAIL,
PNFS_UPDATE_LAYOUT_FOUND_CACHED,
PNFS_UPDATE_LAYOUT_RETURN,
+ PNFS_UPDATE_LAYOUT_RETRY,
PNFS_UPDATE_LAYOUT_BLOCKED,
+ PNFS_UPDATE_LAYOUT_INVALID_OPEN,
PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET,
};
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7fcc13c8cf1f..14a762d2734d 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -246,5 +246,6 @@ struct nfs_server {
#define NFS_CAP_DEALLOCATE (1U << 21)
#define NFS_CAP_LAYOUTSTATS (1U << 22)
#define NFS_CAP_CLONE (1U << 23)
+#define NFS_CAP_COPY (1U << 24)
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index ee8491dadbf3..c304a11b5b1a 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -233,7 +233,6 @@ struct nfs4_layoutget_args {
struct inode *inode;
struct nfs_open_context *ctx;
nfs4_stateid stateid;
- unsigned long timestamp;
struct nfs4_layoutdriver_data layout;
};
@@ -251,7 +250,6 @@ struct nfs4_layoutget {
struct nfs4_layoutget_res res;
struct rpc_cred *cred;
gfp_t gfp_flags;
- long timeout;
};
struct nfs4_getdeviceinfo_args {
@@ -1343,6 +1341,32 @@ struct nfs42_falloc_res {
const struct nfs_server *falloc_server;
};
+struct nfs42_copy_args {
+ struct nfs4_sequence_args seq_args;
+
+ struct nfs_fh *src_fh;
+ nfs4_stateid src_stateid;
+ u64 src_pos;
+
+ struct nfs_fh *dst_fh;
+ nfs4_stateid dst_stateid;
+ u64 dst_pos;
+
+ u64 count;
+};
+
+struct nfs42_write_res {
+ u64 count;
+ struct nfs_writeverf verifier;
+};
+
+struct nfs42_copy_res {
+ struct nfs4_sequence_res seq_res;
+ struct nfs42_write_res write_res;
+ bool consecutive;
+ bool synchronous;
+};
+
struct nfs42_seek_args {
struct nfs4_sequence_args seq_args;
@@ -1431,7 +1455,7 @@ struct nfs_commit_completion_ops {
};
struct nfs_commit_info {
- spinlock_t *lock; /* inode->i_lock */
+ struct inode *inode; /* Needed for inode->i_lock */
struct nfs_mds_commit_info *mds;
struct pnfs_ds_commit_info *ds;
struct nfs_direct_req *dreq; /* O_DIRECT request */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 44f33834ad78..1a827cecd62f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -61,6 +61,14 @@ struct perf_callchain_entry {
__u64 ip[0]; /* /proc/sys/kernel/perf_event_max_stack */
};
+struct perf_callchain_entry_ctx {
+ struct perf_callchain_entry *entry;
+ u32 max_stack;
+ u32 nr;
+ short contexts;
+ bool contexts_maxed;
+};
+
struct perf_raw_record {
u32 size;
void *data;
@@ -1061,20 +1069,36 @@ extern void perf_event_fork(struct task_struct *tsk);
/* Callchains */
DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
-extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs);
-extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs);
+extern void perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs);
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
- bool crosstask, bool add_mark);
+ u32 max_stack, bool crosstask, bool add_mark);
extern int get_callchain_buffers(void);
extern void put_callchain_buffers(void);
extern int sysctl_perf_event_max_stack;
+extern int sysctl_perf_event_max_contexts_per_stack;
+
+static inline int perf_callchain_store_context(struct perf_callchain_entry_ctx *ctx, u64 ip)
+{
+ if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) {
+ struct perf_callchain_entry *entry = ctx->entry;
+ entry->ip[entry->nr++] = ip;
+ ++ctx->contexts;
+ return 0;
+ } else {
+ ctx->contexts_maxed = true;
+ return -1; /* no more room, stop walking the stack */
+ }
+}
-static inline int perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+static inline int perf_callchain_store(struct perf_callchain_entry_ctx *ctx, u64 ip)
{
- if (entry->nr < sysctl_perf_event_max_stack) {
+ if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) {
+ struct perf_callchain_entry *entry = ctx->entry;
entry->ip[entry->nr++] = ip;
+ ++ctx->nr;
return 0;
} else {
return -1; /* no more room, stop walking the stack */
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h
index dc9a13e5acda..be830b141d83 100644
--- a/include/linux/platform_data/at24.h
+++ b/include/linux/platform_data/at24.h
@@ -26,7 +26,7 @@
*
* An example in pseudo code for a setup() callback:
*
- * void get_mac_addr(struct mvmem_device *nvmem, void *context)
+ * void get_mac_addr(struct nvmem_device *nvmem, void *context)
* {
* u8 *mac_addr = ethernet_pdata->mac_addr;
* off_t offset = context;
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index d1c12d160ace..d37fbb34d06f 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -156,6 +156,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
+extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
# define down_write_nest_lock(sem, nest_lock) \
@@ -176,6 +177,7 @@ extern void up_read_non_owner(struct rw_semaphore *sem);
# define down_read_nested(sem, subclass) down_read(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
+# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
# define down_read_non_owner(sem) down_read(sem)
# define up_read_non_owner(sem) up_read(sem)
#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 21c26e78aec5..6e42ada26345 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1539,6 +1539,7 @@ struct task_struct {
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
+ unsigned sched_remote_wakeup:1;
unsigned :0; /* force alignment to the next boundary */
/* unserialized, strictly 'current' */
@@ -2744,10 +2745,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
-/* same as above but performs the slow path from the async kontext. Can
+#ifdef CONFIG_MMU
+/* same as above but performs the slow path from the async context. Can
* be called from the atomic context as well
*/
extern void mmput_async(struct mm_struct *);
+#endif
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index e0582106ef4f..7973a821ac58 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -277,7 +277,7 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
static inline int raw_read_seqcount_latch(seqcount_t *s)
{
- return lockless_dereference(s->sequence);
+ return lockless_dereference(s)->sequence;
}
/**
@@ -331,7 +331,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
* unsigned seq, idx;
*
* do {
- * seq = lockless_dereference(latch->seq);
+ * seq = lockless_dereference(latch)->seq;
*
* idx = seq & 0x01;
* entry = data_query(latch->data[idx], ...);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 665cd0cd18b8..d1faa019c02a 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -111,22 +111,6 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
}
#endif
-
-/**
- * virt_to_obj - returns address of the beginning of object.
- * @s: object's kmem_cache
- * @slab_page: address of slab page
- * @x: address within object memory range
- *
- * Returns address of the beginning of object
- */
-static inline void *virt_to_obj(struct kmem_cache *s,
- const void *slab_page,
- const void *x)
-{
- return (void *)x - ((x - slab_page) % s->size);
-}
-
void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason);
diff --git a/include/linux/stringhash.h b/include/linux/stringhash.h
new file mode 100644
index 000000000000..451771d9b9c0
--- /dev/null
+++ b/include/linux/stringhash.h
@@ -0,0 +1,76 @@
+#ifndef __LINUX_STRINGHASH_H
+#define __LINUX_STRINGHASH_H
+
+#include <linux/compiler.h> /* For __pure */
+#include <linux/types.h> /* For u32, u64 */
+
+/*
+ * Routines for hashing strings of bytes to a 32-bit hash value.
+ *
+ * These hash functions are NOT GUARANTEED STABLE between kernel
+ * versions, architectures, or even repeated boots of the same kernel.
+ * (E.g. they may depend on boot-time hardware detection or be
+ * deliberately randomized.)
+ *
+ * They are also not intended to be secure against collisions caused by
+ * malicious inputs; much slower hash functions are required for that.
+ *
+ * They are optimized for pathname components, meaning short strings.
+ * Even if a majority of files have longer names, the dynamic profile of
+ * pathname components skews short due to short directory names.
+ * (E.g. /usr/lib/libsesquipedalianism.so.3.141.)
+ */
+
+/*
+ * Version 1: one byte at a time. Example of use:
+ *
+ * unsigned long hash = init_name_hash;
+ * while (*p)
+ * hash = partial_name_hash(tolower(*p++), hash);
+ * hash = end_name_hash(hash);
+ *
+ * Although this is designed for bytes, fs/hfsplus/unicode.c
+ * abuses it to hash 16-bit values.
+ */
+
+/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
+#define init_name_hash() 0
+
+/* partial hash update function. Assume roughly 4 bits per character */
+static inline unsigned long
+partial_name_hash(unsigned long c, unsigned long prevhash)
+{
+ return (prevhash + (c << 4) + (c >> 4)) * 11;
+}
+
+/*
+ * Finally: cut down the number of bits to a int value (and try to avoid
+ * losing bits)
+ */
+static inline unsigned long end_name_hash(unsigned long hash)
+{
+ return (unsigned int)hash;
+}
+
+/*
+ * Version 2: One word (32 or 64 bits) at a time.
+ * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h>
+ * exists, which describes major Linux platforms like x86 and ARM), then
+ * this computes a different hash function much faster.
+ *
+ * If not set, this falls back to a wrapper around the preceding.
+ */
+extern unsigned int __pure full_name_hash(const char *, unsigned int);
+
+/*
+ * A hash_len is a u64 with the hash of a string in the low
+ * half and the length in the high half.
+ */
+#define hashlen_hash(hashlen) ((u32)(hashlen))
+#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
+#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
+
+/* Return the "hash_len" (hash and length) of a null-terminated string */
+extern u64 __pure hashlen_string(const char *name);
+
+#endif /* __LINUX_STRINGHASH_H */
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 6a241a277249..899791573a40 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -127,7 +127,7 @@ struct rpc_authops {
void (*destroy)(struct rpc_auth *);
struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
- struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int);
+ struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t);
int (*list_pseudoflavors)(rpc_authflavor_t *, int);
rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *);
int (*flavor2info)(rpc_authflavor_t,
@@ -167,6 +167,7 @@ void rpc_destroy_authunix(void);
struct rpc_cred * rpc_lookup_cred(void);
struct rpc_cred * rpc_lookup_cred_nonblock(void);
+struct rpc_cred * rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t);
struct rpc_cred * rpc_lookup_machine_cred(const char *service_name);
int rpcauth_register(const struct rpc_authops *);
int rpcauth_unregister(const struct rpc_authops *);
@@ -178,7 +179,7 @@ rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t,
int rpcauth_get_gssinfo(rpc_authflavor_t,
struct rpcsec_gss_info *);
int rpcauth_list_flavors(rpc_authflavor_t *, int);
-struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int);
+struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t);
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int);
@@ -201,9 +202,28 @@ char * rpcauth_stringify_acceptor(struct rpc_cred *);
static inline
struct rpc_cred * get_rpccred(struct rpc_cred *cred)
{
- atomic_inc(&cred->cr_count);
+ if (cred != NULL)
+ atomic_inc(&cred->cr_count);
return cred;
}
+/**
+ * get_rpccred_rcu - get a reference to a cred using rcu-protected pointer
+ * @cred: cred of which to take a reference
+ *
+ * In some cases, we may have a pointer to a credential to which we
+ * want to take a reference, but don't already have one. Because these
+ * objects are freed using RCU, we can access the cr_count while its
+ * on its way to destruction and only take a reference if it's not already
+ * zero.
+ */
+static inline struct rpc_cred *
+get_rpccred_rcu(struct rpc_cred *cred)
+{
+ if (atomic_inc_not_zero(&cred->cr_count))
+ return cred;
+ return NULL;
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_AUTH_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 9a7ddbaf116e..19c659d1c0f8 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -176,6 +176,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
int rpc_protocol(struct rpc_clnt *);
struct net * rpc_net_ns(struct rpc_clnt *);
size_t rpc_max_payload(struct rpc_clnt *);
+size_t rpc_max_bc_payload(struct rpc_clnt *);
unsigned long rpc_get_timeout(struct rpc_clnt *clnt);
void rpc_force_rebind(struct rpc_clnt *);
size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 807371357160..59cbf16eaeb5 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -158,9 +158,9 @@ typedef __be32 rpc_fraghdr;
/*
* Note that RFC 1833 does not put any size restrictions on the
- * netid string, but all currently defined netid's fit in 4 bytes.
+ * netid string, but all currently defined netid's fit in 5 bytes.
*/
-#define RPCBIND_MAXNETIDLEN (4u)
+#define RPCBIND_MAXNETIDLEN (5u)
/*
* Universal addresses are introduced in RFC 1833 and further spelled
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index c00f53a4ccdd..91d5a5d6f52b 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -16,6 +16,7 @@
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/hash.h>
+#include <linux/stringhash.h>
#include <linux/cred.h>
struct svc_cred {
@@ -165,41 +166,18 @@ extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
extern int unix_gid_cache_create(struct net *net);
extern void unix_gid_cache_destroy(struct net *net);
-static inline unsigned long hash_str(char *name, int bits)
+/*
+ * The <stringhash.h> functions are good enough that we don't need to
+ * use hash_32() on them; just extracting the high bits is enough.
+ */
+static inline unsigned long hash_str(char const *name, int bits)
{
- unsigned long hash = 0;
- unsigned long l = 0;
- int len = 0;
- unsigned char c;
- do {
- if (unlikely(!(c = *name++))) {
- c = (char)len; len = -1;
- }
- l = (l << 8) | c;
- len++;
- if ((len & (BITS_PER_LONG/8-1))==0)
- hash = hash_long(hash^l, BITS_PER_LONG);
- } while (len);
- return hash >> (BITS_PER_LONG - bits);
+ return hashlen_hash(hashlen_string(name)) >> (32 - bits);
}
-static inline unsigned long hash_mem(char *buf, int length, int bits)
+static inline unsigned long hash_mem(char const *buf, int length, int bits)
{
- unsigned long hash = 0;
- unsigned long l = 0;
- int len = 0;
- unsigned char c;
- do {
- if (len == length) {
- c = (char)len; len = -1;
- } else
- c = *buf++;
- l = (l << 8) | c;
- len++;
- if ((len & (BITS_PER_LONG/8-1))==0)
- hash = hash_long(hash^l, BITS_PER_LONG);
- } while (len);
- return hash >> (BITS_PER_LONG - bits);
+ return full_name_hash(buf, length) >> (32 - bits);
}
#endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index fb0d212e0d3a..5aa3834619a8 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -142,6 +142,7 @@ struct rpc_xprt_ops {
int (*bc_setup)(struct rpc_xprt *xprt,
unsigned int min_reqs);
int (*bc_up)(struct svc_serv *serv, struct net *net);
+ size_t (*bc_maxpayload)(struct rpc_xprt *xprt);
void (*bc_free_rqst)(struct rpc_rqst *rqst);
void (*bc_destroy)(struct rpc_xprt *xprt,
unsigned int max_reqs);
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 767190b01363..39267dc3486a 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -52,7 +52,9 @@
#define RPCRDMA_DEF_SLOT_TABLE (128U)
#define RPCRDMA_MAX_SLOT_TABLE (256U)
-#define RPCRDMA_DEF_INLINE (1024) /* default inline max */
+#define RPCRDMA_MIN_INLINE (1024) /* min inline thresh */
+#define RPCRDMA_DEF_INLINE (1024) /* default inline thresh */
+#define RPCRDMA_MAX_INLINE (3068) /* max inline thresh */
/* Memory registration strategies, by number.
* This is part of a kernel / user space API. Do not remove. */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 1b8a5a7876ce..e45abe7db9a6 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -340,6 +340,7 @@ struct thermal_zone_of_device_ops {
int (*get_temp)(void *, int *);
int (*get_trend)(void *, long *);
int (*set_emul_temp)(void *, int);
+ int (*set_trip_temp)(void *, int, int);
};
/**
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 1cc4c578deb9..94079bab9243 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -33,8 +33,8 @@ struct xattr_handler {
struct inode *inode, const char *name, void *buffer,
size_t size);
int (*set)(const struct xattr_handler *, struct dentry *dentry,
- const char *name, const void *buffer, size_t size,
- int flags);
+ struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags);
};
const char *xattr_full_name(const struct xattr_handler *, const char *);
@@ -54,7 +54,8 @@ int vfs_removexattr(struct dentry *, const char *);
ssize_t generic_getxattr(struct dentry *dentry, struct inode *inode, const char *name, void *buffer, size_t size);
ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
-int generic_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags);
+int generic_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags);
int generic_removexattr(struct dentry *dentry, const char *name);
ssize_t vfs_getxattr_alloc(struct dentry *dentry, const char *name,
char **xattr_value, size_t size, gfp_t flags);
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 37dd534cbeab..c8a773ffe23b 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -239,12 +239,15 @@ struct ib_vendor_mad {
#define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001)
+#define IB_CLASS_PORT_INFO_RESP_TIME_MASK 0x1F
+#define IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE 5
+
struct ib_class_port_info {
u8 base_version;
u8 class_version;
__be16 capability_mask;
- u8 reserved[3];
- u8 resp_time_value;
+ /* 27 bits for cap_mask2, 5 bits for resp_time */
+ __be32 cap_mask2_resp_time;
u8 redirect_gid[16];
__be32 redirect_tcslfl;
__be16 redirect_lid;
@@ -259,6 +262,59 @@ struct ib_class_port_info {
__be32 trap_qkey;
};
+/**
+ * ib_get_cpi_resp_time - Returns the resp_time value from
+ * cap_mask2_resp_time in ib_class_port_info.
+ * @cpi: A struct ib_class_port_info mad.
+ */
+static inline u8 ib_get_cpi_resp_time(struct ib_class_port_info *cpi)
+{
+ return (u8)(be32_to_cpu(cpi->cap_mask2_resp_time) &
+ IB_CLASS_PORT_INFO_RESP_TIME_MASK);
+}
+
+/**
+ * ib_set_cpi_resptime - Sets the response time in an
+ * ib_class_port_info mad.
+ * @cpi: A struct ib_class_port_info.
+ * @rtime: The response time to set.
+ */
+static inline void ib_set_cpi_resp_time(struct ib_class_port_info *cpi,
+ u8 rtime)
+{
+ cpi->cap_mask2_resp_time =
+ (cpi->cap_mask2_resp_time &
+ cpu_to_be32(~IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
+ cpu_to_be32(rtime & IB_CLASS_PORT_INFO_RESP_TIME_MASK);
+}
+
+/**
+ * ib_get_cpi_capmask2 - Returns the capmask2 value from
+ * cap_mask2_resp_time in ib_class_port_info.
+ * @cpi: A struct ib_class_port_info mad.
+ */
+static inline u32 ib_get_cpi_capmask2(struct ib_class_port_info *cpi)
+{
+ return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
+ IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
+}
+
+/**
+ * ib_set_cpi_capmask2 - Sets the capmask2 in an
+ * ib_class_port_info mad.
+ * @cpi: A struct ib_class_port_info.
+ * @capmask2: The capmask2 to set.
+ */
+static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi,
+ u32 capmask2)
+{
+ cpi->cap_mask2_resp_time =
+ (cpi->cap_mask2_resp_time &
+ cpu_to_be32(IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
+ cpu_to_be32(capmask2 <<
+ IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
+}
+
struct ib_mad_notice_attr {
u8 generic_type;
u8 prod_type_msb;
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index 0f3daae44bf9..b13419ce99ff 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -103,6 +103,9 @@ enum {
IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12,
IB_OPCODE_COMPARE_SWAP = 0x13,
IB_OPCODE_FETCH_ADD = 0x14,
+ /* opcode 0x15 is reserved */
+ IB_OPCODE_SEND_LAST_WITH_INVALIDATE = 0x16,
+ IB_OPCODE_SEND_ONLY_WITH_INVALIDATE = 0x17,
/* real constants follow -- see comment about above IB_OPCODE()
macro for more details */
@@ -129,6 +132,8 @@ enum {
IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
IB_OPCODE(RC, COMPARE_SWAP),
IB_OPCODE(RC, FETCH_ADD),
+ IB_OPCODE(RC, SEND_LAST_WITH_INVALIDATE),
+ IB_OPCODE(RC, SEND_ONLY_WITH_INVALIDATE),
/* UC */
IB_OPCODE(UC, SEND_FIRST),
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index cdc1c81aa275..384041669489 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -94,6 +94,8 @@ enum ib_sa_selector {
IB_SA_BEST = 3
};
+#define IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT BIT(12)
+
/*
* Structures for SA records are named "struct ib_sa_xxx_rec." No
* attempt is made to pack structures to match the physical layout of
@@ -439,4 +441,14 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
void *context,
struct ib_sa_query **sa_query);
+/* Support get SA ClassPortInfo */
+int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
+ struct ib_device *device, u8 port_num,
+ int timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_class_port_info *resp,
+ void *context),
+ void *context,
+ struct ib_sa_query **sa_query);
+
#endif /* IB_SA_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fc0320c004a3..432bed510369 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -403,56 +403,55 @@ enum ib_port_speed {
IB_SPEED_EDR = 32
};
-struct ib_protocol_stats {
- /* TBD... */
-};
-
-struct iw_protocol_stats {
- u64 ipInReceives;
- u64 ipInHdrErrors;
- u64 ipInTooBigErrors;
- u64 ipInNoRoutes;
- u64 ipInAddrErrors;
- u64 ipInUnknownProtos;
- u64 ipInTruncatedPkts;
- u64 ipInDiscards;
- u64 ipInDelivers;
- u64 ipOutForwDatagrams;
- u64 ipOutRequests;
- u64 ipOutDiscards;
- u64 ipOutNoRoutes;
- u64 ipReasmTimeout;
- u64 ipReasmReqds;
- u64 ipReasmOKs;
- u64 ipReasmFails;
- u64 ipFragOKs;
- u64 ipFragFails;
- u64 ipFragCreates;
- u64 ipInMcastPkts;
- u64 ipOutMcastPkts;
- u64 ipInBcastPkts;
- u64 ipOutBcastPkts;
-
- u64 tcpRtoAlgorithm;
- u64 tcpRtoMin;
- u64 tcpRtoMax;
- u64 tcpMaxConn;
- u64 tcpActiveOpens;
- u64 tcpPassiveOpens;
- u64 tcpAttemptFails;
- u64 tcpEstabResets;
- u64 tcpCurrEstab;
- u64 tcpInSegs;
- u64 tcpOutSegs;
- u64 tcpRetransSegs;
- u64 tcpInErrs;
- u64 tcpOutRsts;
-};
-
-union rdma_protocol_stats {
- struct ib_protocol_stats ib;
- struct iw_protocol_stats iw;
-};
+/**
+ * struct rdma_hw_stats
+ * @timestamp - Used by the core code to track when the last update was
+ * @lifespan - Used by the core code to determine how old the counters
+ * should be before being updated again. Stored in jiffies, defaults
+ * to 10 milliseconds, drivers can override the default be specifying
+ * their own value during their allocation routine.
+ * @name - Array of pointers to static names used for the counters in
+ * directory.
+ * @num_counters - How many hardware counters there are. If name is
+ * shorter than this number, a kernel oops will result. Driver authors
+ * are encouraged to leave BUILD_BUG_ON(ARRAY_SIZE(@name) < num_counters)
+ * in their code to prevent this.
+ * @value - Array of u64 counters that are accessed by the sysfs code and
+ * filled in by the drivers get_stats routine
+ */
+struct rdma_hw_stats {
+ unsigned long timestamp;
+ unsigned long lifespan;
+ const char * const *names;
+ int num_counters;
+ u64 value[];
+};
+
+#define RDMA_HW_STATS_DEFAULT_LIFESPAN 10
+/**
+ * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
+ * for drivers.
+ * @names - Array of static const char *
+ * @num_counters - How many elements in array
+ * @lifespan - How many milliseconds between updates
+ */
+static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
+ const char * const *names, int num_counters,
+ unsigned long lifespan)
+{
+ struct rdma_hw_stats *stats;
+
+ stats = kzalloc(sizeof(*stats) + num_counters * sizeof(u64),
+ GFP_KERNEL);
+ if (!stats)
+ return NULL;
+ stats->names = names;
+ stats->num_counters = num_counters;
+ stats->lifespan = msecs_to_jiffies(lifespan);
+
+ return stats;
+}
+
/* Define bits for the various functionality this port needs to be supported by
* the core.
@@ -1707,8 +1706,29 @@ struct ib_device {
struct iw_cm_verbs *iwcm;
- int (*get_protocol_stats)(struct ib_device *device,
- union rdma_protocol_stats *stats);
+ /**
+ * alloc_hw_stats - Allocate a struct rdma_hw_stats and fill in the
+ * driver initialized data. The struct is kfree()'ed by the sysfs
+ * core when the device is removed. A lifespan of -1 in the return
+ * struct tells the core to set a default lifespan.
+ */
+ struct rdma_hw_stats *(*alloc_hw_stats)(struct ib_device *device,
+ u8 port_num);
+ /**
+ * get_hw_stats - Fill in the counter value(s) in the stats struct.
+ * @index - The index in the value array we wish to have updated, or
+ * num_counters if we want all stats updated
+ * Return codes -
+ * < 0 - Error, no counters updated
+ * index - Updated the single counter pointed to by index
+ * num_counters - Updated all counters (will reset the timestamp
+ * and prevent further calls for lifespan milliseconds)
+ * Drivers are allowed to update all counters in leiu of just the
+ * one given in index at their option
+ */
+ int (*get_hw_stats)(struct ib_device *device,
+ struct rdma_hw_stats *stats,
+ u8 port, int index);
int (*query_device)(struct ib_device *device,
struct ib_device_attr *device_attr,
struct ib_udata *udata);
@@ -1926,6 +1946,8 @@ struct ib_device {
u8 node_type;
u8 phys_port_cnt;
struct ib_device_attr attrs;
+ struct attribute_group *hw_stats_ag;
+ struct rdma_hw_stats *hw_stats;
/**
* The following mandatory functions are used only at device
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index d57ceee90d26..16274e2133cd 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -149,15 +149,15 @@ struct rvt_driver_params {
int qpn_res_end;
int nports;
int npkeys;
- u8 qos_shift;
char cq_name[RVT_CQN_MAX];
int node;
- int max_rdma_atomic;
int psn_mask;
int psn_shift;
int psn_modify_mask;
u32 core_cap_flags;
u32 max_mad_size;
+ u8 qos_shift;
+ u8 max_rdma_atomic;
};
/* Protection domain */
@@ -426,6 +426,15 @@ static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
}
/*
+ * Return the max atomic suitable for determining
+ * the size of the ack ring buffer in a QP.
+ */
+static inline unsigned int rvt_max_atomic(struct rvt_dev_info *rdi)
+{
+ return rdi->dparms.max_rdma_atomic + 1;
+}
+
+/*
* Return the indexed PKEY from the port PKEY table.
*/
static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0e1ff2abfe92..6d23b879416a 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -211,8 +211,6 @@ struct rvt_mmap_info {
unsigned size;
};
-#define RVT_MAX_RDMA_ATOMIC 16
-
/*
* This structure holds the information that the send tasklet needs
* to send a RDMA read response or atomic operation.
@@ -282,8 +280,7 @@ struct rvt_qp {
atomic_t refcount ____cacheline_aligned_in_smp;
wait_queue_head_t wait;
- struct rvt_ack_entry s_ack_queue[RVT_MAX_RDMA_ATOMIC + 1]
- ____cacheline_aligned_in_smp;
+ struct rvt_ack_entry *s_ack_queue;
struct rvt_sge_state s_rdma_read_sge;
spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index c3371fa548cb..4ac24f5a3308 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -74,6 +74,7 @@ enum iscsit_transport_type {
ISCSI_IWARP_TCP = 3,
ISCSI_IWARP_SCTP = 4,
ISCSI_INFINIBAND = 5,
+ ISCSI_CXGBIT = 6,
};
/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */
@@ -890,4 +891,30 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session)
}
extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+
+static inline void iscsit_thread_check_cpumask(
+ struct iscsi_conn *conn,
+ struct task_struct *p,
+ int mode)
+{
+ /*
+ * mode == 1 signals iscsi_target_tx_thread() usage.
+ * mode == 0 signals iscsi_target_rx_thread() usage.
+ */
+ if (mode == 1) {
+ if (!conn->conn_tx_reset_cpumask)
+ return;
+ conn->conn_tx_reset_cpumask = 0;
+ } else {
+ if (!conn->conn_rx_reset_cpumask)
+ return;
+ conn->conn_rx_reset_cpumask = 0;
+ }
+ /*
+ * Update the CPU mask for this single kthread so that
+ * both TX and RX kthreads are scheduled to run on the
+ * same CPU.
+ */
+ set_cpus_allowed_ptr(p, conn->conn_cpumask);
+}
#endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index 90e37faa2ede..40ac7cd80150 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -6,6 +6,7 @@ struct iscsit_transport {
#define ISCSIT_TRANSPORT_NAME 16
char name[ISCSIT_TRANSPORT_NAME];
int transport_type;
+ bool rdma_shutdown;
int priv_size;
struct module *owner;
struct list_head t_node;
@@ -22,6 +23,13 @@ struct iscsit_transport {
int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
+ int (*iscsit_xmit_pdu)(struct iscsi_conn *, struct iscsi_cmd *,
+ struct iscsi_datain_req *, const void *, u32);
+ void (*iscsit_release_cmd)(struct iscsi_conn *, struct iscsi_cmd *);
+ void (*iscsit_get_rx_pdu)(struct iscsi_conn *);
+ int (*iscsit_validate_params)(struct iscsi_conn *);
+ void (*iscsit_get_r2t_ttt)(struct iscsi_conn *, struct iscsi_cmd *,
+ struct iscsi_r2t *);
enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
};
@@ -77,6 +85,18 @@ extern void iscsit_build_reject(struct iscsi_cmd *, struct iscsi_conn *,
extern int iscsit_build_logout_rsp(struct iscsi_cmd *, struct iscsi_conn *,
struct iscsi_logout_rsp *);
extern int iscsit_logout_post_handler(struct iscsi_cmd *, struct iscsi_conn *);
+extern int iscsit_queue_rsp(struct iscsi_conn *, struct iscsi_cmd *);
+extern void iscsit_aborted_task(struct iscsi_conn *, struct iscsi_cmd *);
+extern int iscsit_add_reject(struct iscsi_conn *, u8, unsigned char *);
+extern int iscsit_reject_cmd(struct iscsi_cmd *, u8, unsigned char *);
+extern int iscsit_handle_snack(struct iscsi_conn *, unsigned char *);
+extern void iscsit_build_datain_pdu(struct iscsi_cmd *, struct iscsi_conn *,
+ struct iscsi_datain *,
+ struct iscsi_data_rsp *, bool);
+extern int iscsit_build_r2ts_for_cmd(struct iscsi_conn *, struct iscsi_cmd *,
+ bool);
+extern int iscsit_immediate_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
+extern int iscsit_response_queue(struct iscsi_conn *, struct iscsi_cmd *, int);
/*
* From iscsi_target_device.c
*/
@@ -102,3 +122,24 @@ extern struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *, int);
extern int iscsit_sequence_cmd(struct iscsi_conn *, struct iscsi_cmd *,
unsigned char *, __be32);
extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
+extern void iscsit_add_cmd_to_immediate_queue(struct iscsi_cmd *,
+ struct iscsi_conn *, u8);
+
+/*
+ * From iscsi_target_nego.c
+ */
+extern int iscsi_target_check_login_request(struct iscsi_conn *,
+ struct iscsi_login *);
+
+/*
+ * From iscsi_target_login.c
+ */
+extern __printf(2, 3) int iscsi_change_param_sprintf(
+ struct iscsi_conn *, const char *, ...);
+
+/*
+ * From iscsi_target_parameters.c
+ */
+extern struct iscsi_param *iscsi_find_param_from_key(
+ char *, struct iscsi_param_list *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 3e0dd86360a2..b316b44d03f3 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -536,7 +536,6 @@ struct se_node_acl {
char initiatorname[TRANSPORT_IQN_LEN];
/* Used to signal demo mode created ACL, disabled by default */
bool dynamic_node_acl;
- bool acl_stop:1;
u32 queue_depth;
u32 acl_index;
enum target_prot_type saved_prot_type;
@@ -603,7 +602,6 @@ struct se_session {
struct list_head sess_cmd_list;
struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
- struct kref sess_kref;
void *sess_cmd_map;
struct percpu_ida sess_tag_pool;
};
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 78d88f03b296..de44462a7680 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -50,10 +50,6 @@ struct target_core_fabric_ops {
*/
int (*check_stop_free)(struct se_cmd *);
void (*release_cmd)(struct se_cmd *);
- /*
- * Called with spin_lock_bh(struct se_portal_group->session_lock held.
- */
- int (*shutdown_session)(struct se_session *);
void (*close_session)(struct se_session *);
u32 (*sess_get_index)(struct se_session *);
/*
@@ -123,8 +119,6 @@ void __transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
-int target_get_session(struct se_session *);
-void target_put_session(struct se_session *);
ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
void transport_free_session(struct se_session *);
void target_put_nacl(struct se_node_acl *);
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 526fb3d2e43a..f28292d73ddb 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -108,7 +108,7 @@ TRACE_EVENT(kvm_ioapic_set_irq,
__entry->coalesced = coalesced;
),
- TP_printk("pin %u dst %x vec=%u (%s|%s|%s%s)%s",
+ TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
__entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
(__entry->e & (1<<11)) ? "logical" : "physical",
@@ -129,7 +129,7 @@ TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
__entry->e = e;
),
- TP_printk("dst %x vec=%u (%s|%s|%s%s)",
+ TP_printk("dst %x vec %u (%s|%s|%s%s)",
(u8)(__entry->e >> 56), (u8)__entry->e,
__print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
(__entry->e & (1<<11)) ? "logical" : "physical",
@@ -151,7 +151,7 @@ TRACE_EVENT(kvm_msi_set_irq,
__entry->data = data;
),
- TP_printk("dst %u vec %x (%s|%s|%s%s)",
+ TP_printk("dst %u vec %u (%s|%s|%s%s)",
(u8)(__entry->address >> 12), (u8)__entry->data,
__print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
(__entry->address & (1<<2)) ? "logical" : "physical",
diff --git a/include/uapi/linux/nvme_ioctl.h b/include/uapi/linux/nvme_ioctl.h
index c4b2a3f90829..50ff21f748b6 100644
--- a/include/uapi/linux/nvme_ioctl.h
+++ b/include/uapi/linux/nvme_ioctl.h
@@ -61,5 +61,6 @@ struct nvme_passthru_cmd {
#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
#define NVME_IOCTL_RESET _IO('N', 0x44)
#define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45)
+#define NVME_IOCTL_RESCAN _IO('N', 0x46)
#endif /* _UAPI_LINUX_NVME_IOCTL_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 43fc8d213472..36ce552cf6a9 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -862,6 +862,7 @@ enum perf_event_type {
};
#define PERF_MAX_STACK_DEPTH 127
+#define PERF_MAX_CONTEXTS_PER_STACK 8
enum perf_callchain_context {
PERF_CONTEXT_HV = (__u64)-32,
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index a533cecab14f..98bebf8bef55 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -66,7 +66,7 @@
* The major version changes when data structures change in an incompatible
* way. The driver must be the same for initialization to succeed.
*/
-#define HFI1_USER_SWMAJOR 5
+#define HFI1_USER_SWMAJOR 6
/*
* Minor version differences are always compatible
@@ -75,7 +75,12 @@
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
-#define HFI1_USER_SWMINOR 0
+#define HFI1_USER_SWMINOR 1
+
+/*
+ * We will encode the major/minor inside a single 32bit version number.
+ */
+#define HFI1_SWMAJOR_SHIFT 16
/*
* Set of HW and driver capability/feature bits.
@@ -107,19 +112,6 @@
#define HFI1_RCVHDR_ENTSIZE_16 (1UL << 1)
#define HFI1_RCVDHR_ENTSIZE_32 (1UL << 2)
-/*
- * If the unit is specified via open, HFI choice is fixed. If port is
- * specified, it's also fixed. Otherwise we try to spread contexts
- * across ports and HFIs, using different algorithms. WITHIN is
- * the old default, prior to this mechanism.
- */
-#define HFI1_ALG_ACROSS 0 /* round robin contexts across HFIs, then
- * ports; this is the default */
-#define HFI1_ALG_WITHIN 1 /* use all contexts on an HFI (round robin
- * active ports within), then next HFI */
-#define HFI1_ALG_COUNT 2 /* number of algorithm choices */
-
-
/* User commands. */
#define HFI1_CMD_ASSIGN_CTXT 1 /* allocate HFI and context */
#define HFI1_CMD_CTXT_INFO 2 /* find out what resources we got */
@@ -127,7 +119,6 @@
#define HFI1_CMD_TID_UPDATE 4 /* update expected TID entries */
#define HFI1_CMD_TID_FREE 5 /* free expected TID entries */
#define HFI1_CMD_CREDIT_UPD 6 /* force an update of PIO credit */
-#define HFI1_CMD_SDMA_STATUS_UPD 7 /* force update of SDMA status ring */
#define HFI1_CMD_RECV_CTRL 8 /* control receipt of packets */
#define HFI1_CMD_POLL_TYPE 9 /* set the kind of polling we want */
@@ -135,13 +126,46 @@
#define HFI1_CMD_SET_PKEY 11 /* set context's pkey */
#define HFI1_CMD_CTXT_RESET 12 /* reset context's HW send context */
#define HFI1_CMD_TID_INVAL_READ 13 /* read TID cache invalidations */
-/* separate EPROM commands from normal PSM commands */
-#define HFI1_CMD_EP_INFO 64 /* read EPROM device ID */
-#define HFI1_CMD_EP_ERASE_CHIP 65 /* erase whole EPROM */
-/* range 66-74 no longer used */
-#define HFI1_CMD_EP_ERASE_RANGE 75 /* erase EPROM range */
-#define HFI1_CMD_EP_READ_RANGE 76 /* read EPROM range */
-#define HFI1_CMD_EP_WRITE_RANGE 77 /* write EPROM range */
+#define HFI1_CMD_GET_VERS 14 /* get the version of the user cdev */
+
+/*
+ * User IOCTLs can not go above 128 if they do then see common.h and change the
+ * base for the snoop ioctl
+ */
+#define IB_IOCTL_MAGIC 0x1b /* See Documentation/ioctl/ioctl-number.txt */
+
+/*
+ * Make the ioctls occupy the last 0xf0-0xff portion of the IB range
+ */
+#define __NUM(cmd) (HFI1_CMD_##cmd + 0xe0)
+
+struct hfi1_cmd;
+#define HFI1_IOCTL_ASSIGN_CTXT \
+ _IOWR(IB_IOCTL_MAGIC, __NUM(ASSIGN_CTXT), struct hfi1_user_info)
+#define HFI1_IOCTL_CTXT_INFO \
+ _IOW(IB_IOCTL_MAGIC, __NUM(CTXT_INFO), struct hfi1_ctxt_info)
+#define HFI1_IOCTL_USER_INFO \
+ _IOW(IB_IOCTL_MAGIC, __NUM(USER_INFO), struct hfi1_base_info)
+#define HFI1_IOCTL_TID_UPDATE \
+ _IOWR(IB_IOCTL_MAGIC, __NUM(TID_UPDATE), struct hfi1_tid_info)
+#define HFI1_IOCTL_TID_FREE \
+ _IOWR(IB_IOCTL_MAGIC, __NUM(TID_FREE), struct hfi1_tid_info)
+#define HFI1_IOCTL_CREDIT_UPD \
+ _IO(IB_IOCTL_MAGIC, __NUM(CREDIT_UPD))
+#define HFI1_IOCTL_RECV_CTRL \
+ _IOW(IB_IOCTL_MAGIC, __NUM(RECV_CTRL), int)
+#define HFI1_IOCTL_POLL_TYPE \
+ _IOW(IB_IOCTL_MAGIC, __NUM(POLL_TYPE), int)
+#define HFI1_IOCTL_ACK_EVENT \
+ _IOW(IB_IOCTL_MAGIC, __NUM(ACK_EVENT), unsigned long)
+#define HFI1_IOCTL_SET_PKEY \
+ _IOW(IB_IOCTL_MAGIC, __NUM(SET_PKEY), __u16)
+#define HFI1_IOCTL_CTXT_RESET \
+ _IO(IB_IOCTL_MAGIC, __NUM(CTXT_RESET))
+#define HFI1_IOCTL_TID_INVAL_READ \
+ _IOWR(IB_IOCTL_MAGIC, __NUM(TID_INVAL_READ), struct hfi1_tid_info)
+#define HFI1_IOCTL_GET_VERS \
+ _IOR(IB_IOCTL_MAGIC, __NUM(GET_VERS), int)
#define _HFI1_EVENT_FROZEN_BIT 0
#define _HFI1_EVENT_LINKDOWN_BIT 1
@@ -199,9 +223,7 @@ struct hfi1_user_info {
* Should be set to HFI1_USER_SWVERSION.
*/
__u32 userversion;
- __u16 pad;
- /* HFI selection algorithm, if unit has not selected */
- __u16 hfi1_alg;
+ __u32 pad;
/*
* If two or more processes wish to share a context, each process
* must set the subcontext_cnt and subcontext_id to the same
@@ -243,12 +265,6 @@ struct hfi1_tid_info {
__u32 length;
};
-struct hfi1_cmd {
- __u32 type; /* command type */
- __u32 len; /* length of struct pointed to by add */
- __u64 addr; /* pointer to user structure */
-};
-
enum hfi1_sdma_comp_state {
FREE = 0,
QUEUED,
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 6e373d151cad..02fe8390c18f 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -135,10 +135,12 @@ enum {
* Local service operations:
* RESOLVE - The client requests the local service to resolve a path.
* SET_TIMEOUT - The local service requests the client to set the timeout.
+ * IP_RESOLVE - The client requests the local service to resolve an IP to GID.
*/
enum {
RDMA_NL_LS_OP_RESOLVE = 0,
RDMA_NL_LS_OP_SET_TIMEOUT,
+ RDMA_NL_LS_OP_IP_RESOLVE,
RDMA_NL_LS_NUM_OPS
};
@@ -176,6 +178,10 @@ struct rdma_ls_resolve_header {
__u8 path_use;
};
+struct rdma_ls_ip_resolve_header {
+ __u32 ifindex;
+};
+
/* Local service attribute type */
#define RDMA_NLA_F_MANDATORY (1 << 13)
#define RDMA_NLA_TYPE_MASK (~(NLA_F_NESTED | NLA_F_NET_BYTEORDER | \
@@ -193,6 +199,8 @@ struct rdma_ls_resolve_header {
* TCLASS u8
* PKEY u16 cpu
* QOS_CLASS u16 cpu
+ * IPV4 u32 BE
+ * IPV6 u8[16] BE
*/
enum {
LS_NLA_TYPE_UNSPEC = 0,
@@ -204,6 +212,8 @@ enum {
LS_NLA_TYPE_TCLASS,
LS_NLA_TYPE_PKEY,
LS_NLA_TYPE_QOS_CLASS,
+ LS_NLA_TYPE_IPV4,
+ LS_NLA_TYPE_IPV6,
LS_NLA_TYPE_MAX
};
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index c4cc1e40b35c..e4701a3c6331 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -116,6 +116,14 @@
#define SND_SOC_TPLG_STREAM_PLAYBACK 0
#define SND_SOC_TPLG_STREAM_CAPTURE 1
+/* vendor tuple types */
+#define SND_SOC_TPLG_TUPLE_TYPE_UUID 0
+#define SND_SOC_TPLG_TUPLE_TYPE_STRING 1
+#define SND_SOC_TPLG_TUPLE_TYPE_BOOL 2
+#define SND_SOC_TPLG_TUPLE_TYPE_BYTE 3
+#define SND_SOC_TPLG_TUPLE_TYPE_WORD 4
+#define SND_SOC_TPLG_TUPLE_TYPE_SHORT 5
+
/*
* Block Header.
* This header precedes all object and object arrays below.
@@ -132,6 +140,35 @@ struct snd_soc_tplg_hdr {
__le32 count; /* number of elements in block */
} __attribute__((packed));
+/* vendor tuple for uuid */
+struct snd_soc_tplg_vendor_uuid_elem {
+ __le32 token;
+ char uuid[16];
+} __attribute__((packed));
+
+/* vendor tuple for a bool/byte/short/word value */
+struct snd_soc_tplg_vendor_value_elem {
+ __le32 token;
+ __le32 value;
+} __attribute__((packed));
+
+/* vendor tuple for string */
+struct snd_soc_tplg_vendor_string_elem {
+ __le32 token;
+ char string[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+} __attribute__((packed));
+
+struct snd_soc_tplg_vendor_array {
+ __le32 size; /* size in bytes of the array, including all elements */
+ __le32 type; /* SND_SOC_TPLG_TUPLE_TYPE_ */
+ __le32 num_elems; /* number of elements in array */
+ union {
+ struct snd_soc_tplg_vendor_uuid_elem uuid[0];
+ struct snd_soc_tplg_vendor_value_elem value[0];
+ struct snd_soc_tplg_vendor_string_elem string[0];
+ };
+} __attribute__((packed));
+
/*
* Private data.
* All topology objects may have private data that can be used by the driver or
@@ -139,7 +176,10 @@ struct snd_soc_tplg_hdr {
*/
struct snd_soc_tplg_private {
__le32 size; /* in bytes of private data */
- char data[0];
+ union {
+ char data[0];
+ struct snd_soc_tplg_vendor_array array[0];
+ };
} __attribute__((packed));
/*
@@ -383,7 +423,7 @@ struct snd_soc_tplg_pcm {
__le32 size; /* in bytes of this structure */
char pcm_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
char dai_name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
- __le32 pcm_id; /* unique ID - used to match */
+ __le32 pcm_id; /* unique ID - used to match with DAI link */
__le32 dai_id; /* unique ID - used to match */
__le32 playback; /* supports playback mode */
__le32 capture; /* supports capture mode */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index ad66589f2ae6..3a2a79401789 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -16,6 +16,7 @@
#include <linux/videodev2.h>
#include <linux/bitmap.h>
#include <linux/fb.h>
+#include <linux/of.h>
#include <media/v4l2-mediabus.h>
#include <video/videomode.h>
@@ -345,6 +346,7 @@ struct ipu_client_platformdata {
int dc;
int dp;
int dma[2];
+ struct device_node *of_node;
};
#endif /* __DRM_IPU_H__ */
diff --git a/init/Kconfig b/init/Kconfig
index a9c4aefd5436..f755a602d4a1 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1306,6 +1306,17 @@ source "usr/Kconfig"
endif
+choice
+ prompt "Compiler optimization level"
+ default CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+
+config CC_OPTIMIZE_FOR_PERFORMANCE
+ bool "Optimize for performance"
+ help
+ This is the default optimization level for the kernel, building
+ with the "-O2" compiler flag for best performance and most
+ helpful compile-time warnings.
+
config CC_OPTIMIZE_FOR_SIZE
bool "Optimize for size"
help
@@ -1314,6 +1325,8 @@ config CC_OPTIMIZE_FOR_SIZE
If unsure, say N.
+endchoice
+
config SYSCTL
bool
@@ -2049,6 +2062,22 @@ config MODULE_COMPRESS_XZ
endchoice
+config TRIM_UNUSED_KSYMS
+ bool "Trim unused exported kernel symbols"
+ depends on MODULES && !UNUSED_SYMBOLS
+ help
+ The kernel and some modules make many symbols available for
+ other modules to use via EXPORT_SYMBOL() and variants. Depending
+ on the set of modules being selected in your kernel configuration,
+ many of those exported symbols might never be used.
+
+ This option allows for unused exported symbols to be dropped from
+ the build. In turn, this provides the compiler more opportunities
+ (especially when using LTO) for optimizing the code and reducing
+ binary size. This might have some security advantages as well.
+
+ If unsure say N.
+
endif # MODULES
config MODULES_TREE_LOOKUP
diff --git a/init/main.c b/init/main.c
index bc0f9e0bcf22..4c17fda5c2ff 100644
--- a/init/main.c
+++ b/init/main.c
@@ -607,6 +607,7 @@ asmlinkage __visible void __init start_kernel(void)
initrd_start = 0;
}
#endif
+ page_ext_init();
debug_objects_mem_init();
kmemleak_init();
setup_per_cpu_pageset();
@@ -1003,8 +1004,6 @@ static noinline void __init kernel_init_freeable(void)
sched_init_smp();
page_alloc_init_late();
- /* Initialize page ext after all struct pages are initializaed */
- page_ext_init();
do_basic_setup();
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index c8ee35287bfe..080a2dfb5800 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -136,7 +136,8 @@ u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
return -EINVAL;
- trace = get_perf_callchain(regs, init_nr, kernel, user, false, false);
+ trace = get_perf_callchain(regs, init_nr, kernel, user,
+ sysctl_perf_event_max_stack, false, false);
if (unlikely(!trace))
/* couldn't fetch the stack trace */
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index b9325e7dcba1..179ef4640964 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -19,11 +19,13 @@ struct callchain_cpus_entries {
};
int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
+int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
static inline size_t perf_callchain_entry__sizeof(void)
{
return (sizeof(struct perf_callchain_entry) +
- sizeof(__u64) * sysctl_perf_event_max_stack);
+ sizeof(__u64) * (sysctl_perf_event_max_stack +
+ sysctl_perf_event_max_contexts_per_stack));
}
static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
@@ -32,12 +34,12 @@ static DEFINE_MUTEX(callchain_mutex);
static struct callchain_cpus_entries *callchain_cpus_entries;
-__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+__weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
}
-__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+__weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
}
@@ -176,14 +178,15 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
if (!kernel && !user)
return NULL;
- return get_perf_callchain(regs, 0, kernel, user, crosstask, true);
+ return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
}
struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
- bool crosstask, bool add_mark)
+ u32 max_stack, bool crosstask, bool add_mark)
{
struct perf_callchain_entry *entry;
+ struct perf_callchain_entry_ctx ctx;
int rctx;
entry = get_callchain_entry(&rctx);
@@ -193,12 +196,16 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
if (!entry)
goto exit_put;
- entry->nr = init_nr;
+ ctx.entry = entry;
+ ctx.max_stack = max_stack;
+ ctx.nr = entry->nr = init_nr;
+ ctx.contexts = 0;
+ ctx.contexts_maxed = false;
if (kernel && !user_mode(regs)) {
if (add_mark)
- perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
- perf_callchain_kernel(entry, regs);
+ perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
+ perf_callchain_kernel(&ctx, regs);
}
if (user) {
@@ -214,8 +221,8 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
goto exit_put;
if (add_mark)
- perf_callchain_store(entry, PERF_CONTEXT_USER);
- perf_callchain_user(entry, regs);
+ perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
+ perf_callchain_user(&ctx, regs);
}
}
@@ -225,10 +232,15 @@ exit_put:
return entry;
}
+/*
+ * Used for sysctl_perf_event_max_stack and
+ * sysctl_perf_event_max_contexts_per_stack.
+ */
int perf_event_max_stack_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
- int new_value = sysctl_perf_event_max_stack, ret;
+ int *value = table->data;
+ int new_value = *value, ret;
struct ctl_table new_table = *table;
new_table.data = &new_value;
@@ -240,7 +252,7 @@ int perf_event_max_stack_handler(struct ctl_table *table, int write,
if (atomic_read(&nr_callchain_events))
ret = -EBUSY;
else
- sysctl_perf_event_max_stack = new_value;
+ *value = new_value;
mutex_unlock(&callchain_mutex);
diff --git a/kernel/fork.c b/kernel/fork.c
index 47887bba944f..5c2c355aa97f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -736,6 +736,7 @@ void mmput(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(mmput);
+#ifdef CONFIG_MMU
static void mmput_async_fn(struct work_struct *work)
{
struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
@@ -749,6 +750,7 @@ void mmput_async(struct mm_struct *mm)
schedule_work(&mm->async_put_work);
}
}
+#endif
/**
* set_mm_exe_file - change a reference to the mm's executable file
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index c92e44855ddd..1276aabaab55 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -37,6 +37,7 @@ config ARCH_HAS_GCOV_PROFILE_ALL
config GCOV_PROFILE_ALL
bool "Profile entire Kernel"
+ depends on !COMPILE_TEST
depends on GCOV_KERNEL
depends on ARCH_HAS_GCOV_PROFILE_ALL
default n
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index c817216c1615..2e853ad93a3a 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -173,6 +173,22 @@ void down_write_nested(struct rw_semaphore *sem, int subclass)
EXPORT_SYMBOL(down_write_nested);
+int __sched down_write_killable_nested(struct rw_semaphore *sem, int subclass)
+{
+ might_sleep();
+ rwsem_acquire(&sem->dep_map, subclass, 0, _RET_IP_);
+
+ if (LOCK_CONTENDED_RETURN(sem, __down_write_trylock, __down_write_killable)) {
+ rwsem_release(&sem->dep_map, 1, _RET_IP_);
+ return -EINTR;
+ }
+
+ rwsem_set_owner(sem);
+ return 0;
+}
+
+EXPORT_SYMBOL(down_write_killable_nested);
+
void up_read_non_owner(struct rw_semaphore *sem)
{
__up_read(sem);
diff --git a/kernel/pid.c b/kernel/pid.c
index 4d73a834c7e6..f66162f2359b 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -311,7 +311,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
pid->level = ns->level;
for (i = ns->level; i >= 0; i--) {
nr = alloc_pidmap(tmp);
- if (IS_ERR_VALUE(nr)) {
+ if (nr < 0) {
retval = nr;
goto out_free;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 404c0784b1fc..7f2cae4620c7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1768,13 +1768,15 @@ void sched_ttwu_pending(void)
cookie = lockdep_pin_lock(&rq->lock);
while (llist) {
+ int wake_flags = 0;
+
p = llist_entry(llist, struct task_struct, wake_entry);
llist = llist_next(llist);
- /*
- * See ttwu_queue(); we only call ttwu_queue_remote() when
- * its a x-cpu wakeup.
- */
- ttwu_do_activate(rq, p, WF_MIGRATED, cookie);
+
+ if (p->sched_remote_wakeup)
+ wake_flags = WF_MIGRATED;
+
+ ttwu_do_activate(rq, p, wake_flags, cookie);
}
lockdep_unpin_lock(&rq->lock, cookie);
@@ -1819,10 +1821,12 @@ void scheduler_ipi(void)
irq_exit();
}
-static void ttwu_queue_remote(struct task_struct *p, int cpu)
+static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
{
struct rq *rq = cpu_rq(cpu);
+ p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
+
if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
if (!set_nr_if_polling(rq->idle))
smp_send_reschedule(cpu);
@@ -1869,7 +1873,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
#if defined(CONFIG_SMP)
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
sched_clock_cpu(cpu); /* sync clocks x-cpu */
- ttwu_queue_remote(p, cpu);
+ ttwu_queue_remote(p, cpu, wake_flags);
return;
}
#endif
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 154ae3a51e86..14c4aa25cc45 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -388,7 +390,7 @@ static int sugov_init(struct cpufreq_policy *policy)
mutex_unlock(&global_tunables_lock);
sugov_policy_free(sg_policy);
- pr_err("cpufreq: schedutil governor initialization failed (error %d)\n", ret);
+ pr_err("initialization failed (error %d)\n", ret);
return ret;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 2effd84d83e3..87b2fc38398b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1149,13 +1149,22 @@ static struct ctl_table kern_table[] = {
},
{
.procname = "perf_event_max_stack",
- .data = NULL, /* filled in by handler */
+ .data = &sysctl_perf_event_max_stack,
.maxlen = sizeof(sysctl_perf_event_max_stack),
.mode = 0644,
.proc_handler = perf_event_max_stack_handler,
.extra1 = &zero,
.extra2 = &six_hundred_forty_kb,
},
+ {
+ .procname = "perf_event_max_contexts_per_stack",
+ .data = &sysctl_perf_event_max_contexts_per_stack,
+ .maxlen = sizeof(sysctl_perf_event_max_contexts_per_stack),
+ .mode = 0644,
+ .proc_handler = perf_event_max_stack_handler,
+ .extra1 = &zero,
+ .extra2 = &one_thousand,
+ },
#endif
#ifdef CONFIG_KMEMCHECK
{
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e707ab3e1991..b9cfdbfae9aa 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1841,6 +1841,9 @@ config TEST_BITMAP
If unsure, say N.
+config TEST_UUID
+ tristate "Test functions located in the uuid module at runtime"
+
config TEST_RHASHTABLE
tristate "Perform selftest on resizable hash table"
default n
@@ -1849,6 +1852,17 @@ config TEST_RHASHTABLE
If unsure, say N.
+config TEST_HASH
+ tristate "Perform selftest on hash functions"
+ default n
+ help
+ Enable this option to test the kernel's integer (<linux/hash,h>)
+ and string (<linux/stringhash.h>) hash functions on boot
+ (or module load).
+
+ This is intended to help people writing architecture-specific
+ optimized versions. If unsure, say N.
+
endmenu # runtime tests
config PROVIDE_OHCI1394_DMA_INIT
diff --git a/lib/Makefile b/lib/Makefile
index 42b69185f963..ff6a7a6c6395 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -48,6 +48,7 @@ obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
obj-y += kstrtox.o
obj-$(CONFIG_TEST_BPF) += test_bpf.o
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
+obj-$(CONFIG_TEST_HASH) += test_hash.o
obj-$(CONFIG_TEST_KASAN) += test_kasan.o
obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
obj-$(CONFIG_TEST_LKM) += test_module.o
@@ -57,6 +58,7 @@ obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o
obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o
obj-$(CONFIG_TEST_PRINTF) += test_printf.o
obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o
+obj-$(CONFIG_TEST_UUID) += test_uuid.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 4a1515f4b452..51a76af25c66 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -657,9 +657,9 @@ static struct dma_debug_entry *dma_entry_alloc(void)
spin_lock_irqsave(&free_entries_lock, flags);
if (list_empty(&free_entries)) {
- pr_err("DMA-API: debugging out of memory - disabling\n");
global_disable = true;
spin_unlock_irqrestore(&free_entries_lock, flags);
+ pr_err("DMA-API: debugging out of memory - disabling\n");
return NULL;
}
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 28cb4315fe57..0cd522753ff5 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -101,7 +101,7 @@
#define iterate_and_advance(i, n, v, I, B, K) { \
if (unlikely(i->count < n)) \
n = i->count; \
- if (n) { \
+ if (i->count) { \
size_t skip = i->iov_offset; \
if (unlikely(i->type & ITER_BVEC)) { \
const struct bio_vec *bvec; \
diff --git a/lib/test_hash.c b/lib/test_hash.c
new file mode 100644
index 000000000000..c9549c8b4909
--- /dev/null
+++ b/lib/test_hash.c
@@ -0,0 +1,250 @@
+/*
+ * Test cases for <linux/hash.h> and <linux/stringhash.h>
+ * This just verifies that various ways of computing a hash
+ * produce the same thing and, for cases where a k-bit hash
+ * value is requested, is of the requested size.
+ *
+ * We fill a buffer with a 255-byte null-terminated string,
+ * and use both full_name_hash() and hashlen_string() to hash the
+ * substrings from i to j, where 0 <= i < j < 256.
+ *
+ * The returned values are used to check that __hash_32() and
+ * __hash_32_generic() compute the same thing. Likewise hash_32()
+ * and hash_64().
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt "\n"
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/hash.h>
+#include <linux/stringhash.h>
+#include <linux/printk.h>
+
+/* 32-bit XORSHIFT generator. Seed must not be zero. */
+static u32 __init __attribute_const__
+xorshift(u32 seed)
+{
+ seed ^= seed << 13;
+ seed ^= seed >> 17;
+ seed ^= seed << 5;
+ return seed;
+}
+
+/* Given a non-zero x, returns a non-zero byte. */
+static u8 __init __attribute_const__
+mod255(u32 x)
+{
+ x = (x & 0xffff) + (x >> 16); /* 1 <= x <= 0x1fffe */
+ x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x2fd */
+ x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0x100 */
+ x = (x & 0xff) + (x >> 8); /* 1 <= x <= 0xff */
+ return x;
+}
+
+/* Fill the buffer with non-zero bytes. */
+static void __init
+fill_buf(char *buf, size_t len, u32 seed)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ seed = xorshift(seed);
+ buf[i] = mod255(seed);
+ }
+}
+
+/*
+ * Test the various integer hash functions. h64 (or its low-order bits)
+ * is the integer to hash. hash_or accumulates the OR of the hash values,
+ * which are later checked to see that they cover all the requested bits.
+ *
+ * Because these functions (as opposed to the string hashes) are all
+ * inline, the code being tested is actually in the module, and you can
+ * recompile and re-test the module without rebooting.
+ */
+static bool __init
+test_int_hash(unsigned long long h64, u32 hash_or[2][33])
+{
+ int k;
+ u32 h0 = (u32)h64, h1, h2;
+
+ /* Test __hash32 */
+ hash_or[0][0] |= h1 = __hash_32(h0);
+#ifdef HAVE_ARCH__HASH_32
+ hash_or[1][0] |= h2 = __hash_32_generic(h0);
+#if HAVE_ARCH__HASH_32 == 1
+ if (h1 != h2) {
+ pr_err("__hash_32(%#x) = %#x != __hash_32_generic() = %#x",
+ h0, h1, h2);
+ return false;
+ }
+#endif
+#endif
+
+ /* Test k = 1..32 bits */
+ for (k = 1; k <= 32; k++) {
+ u32 const m = ((u32)2 << (k-1)) - 1; /* Low k bits set */
+
+ /* Test hash_32 */
+ hash_or[0][k] |= h1 = hash_32(h0, k);
+ if (h1 > m) {
+ pr_err("hash_32(%#x, %d) = %#x > %#x", h0, k, h1, m);
+ return false;
+ }
+#ifdef HAVE_ARCH_HASH_32
+ h2 = hash_32_generic(h0, k);
+#if HAVE_ARCH_HASH_32 == 1
+ if (h1 != h2) {
+ pr_err("hash_32(%#x, %d) = %#x != hash_32_generic() "
+ " = %#x", h0, k, h1, h2);
+ return false;
+ }
+#else
+ if (h2 > m) {
+ pr_err("hash_32_generic(%#x, %d) = %#x > %#x",
+ h0, k, h1, m);
+ return false;
+ }
+#endif
+#endif
+ /* Test hash_64 */
+ hash_or[1][k] |= h1 = hash_64(h64, k);
+ if (h1 > m) {
+ pr_err("hash_64(%#llx, %d) = %#x > %#x", h64, k, h1, m);
+ return false;
+ }
+#ifdef HAVE_ARCH_HASH_64
+ h2 = hash_64_generic(h64, k);
+#if HAVE_ARCH_HASH_64 == 1
+ if (h1 != h2) {
+ pr_err("hash_64(%#llx, %d) = %#x != hash_64_generic() "
+ "= %#x", h64, k, h1, h2);
+ return false;
+ }
+#else
+ if (h2 > m) {
+ pr_err("hash_64_generic(%#llx, %d) = %#x > %#x",
+ h64, k, h1, m);
+ return false;
+ }
+#endif
+#endif
+ }
+
+ (void)h2; /* Suppress unused variable warning */
+ return true;
+}
+
+#define SIZE 256 /* Run time is cubic in SIZE */
+
+static int __init
+test_hash_init(void)
+{
+ char buf[SIZE+1];
+ u32 string_or = 0, hash_or[2][33] = { 0 };
+ unsigned tests = 0;
+ unsigned long long h64 = 0;
+ int i, j;
+
+ fill_buf(buf, SIZE, 1);
+
+ /* Test every possible non-empty substring in the buffer. */
+ for (j = SIZE; j > 0; --j) {
+ buf[j] = '\0';
+
+ for (i = 0; i <= j; i++) {
+ u64 hashlen = hashlen_string(buf+i);
+ u32 h0 = full_name_hash(buf+i, j-i);
+
+ /* Check that hashlen_string gets the length right */
+ if (hashlen_len(hashlen) != j-i) {
+ pr_err("hashlen_string(%d..%d) returned length"
+ " %u, expected %d",
+ i, j, hashlen_len(hashlen), j-i);
+ return -EINVAL;
+ }
+ /* Check that the hashes match */
+ if (hashlen_hash(hashlen) != h0) {
+ pr_err("hashlen_string(%d..%d) = %08x != "
+ "full_name_hash() = %08x",
+ i, j, hashlen_hash(hashlen), h0);
+ return -EINVAL;
+ }
+
+ string_or |= h0;
+ h64 = h64 << 32 | h0; /* For use with hash_64 */
+ if (!test_int_hash(h64, hash_or))
+ return -EINVAL;
+ tests++;
+ } /* i */
+ } /* j */
+
+ /* The OR of all the hash values should cover all the bits */
+ if (~string_or) {
+ pr_err("OR of all string hash results = %#x != %#x",
+ string_or, -1u);
+ return -EINVAL;
+ }
+ if (~hash_or[0][0]) {
+ pr_err("OR of all __hash_32 results = %#x != %#x",
+ hash_or[0][0], -1u);
+ return -EINVAL;
+ }
+#ifdef HAVE_ARCH__HASH_32
+#if HAVE_ARCH__HASH_32 != 1 /* Test is pointless if results match */
+ if (~hash_or[1][0]) {
+ pr_err("OR of all __hash_32_generic results = %#x != %#x",
+ hash_or[1][0], -1u);
+ return -EINVAL;
+ }
+#endif
+#endif
+
+ /* Likewise for all the i-bit hash values */
+ for (i = 1; i <= 32; i++) {
+ u32 const m = ((u32)2 << (i-1)) - 1; /* Low i bits set */
+
+ if (hash_or[0][i] != m) {
+ pr_err("OR of all hash_32(%d) results = %#x "
+ "(%#x expected)", i, hash_or[0][i], m);
+ return -EINVAL;
+ }
+ if (hash_or[1][i] != m) {
+ pr_err("OR of all hash_64(%d) results = %#x "
+ "(%#x expected)", i, hash_or[1][i], m);
+ return -EINVAL;
+ }
+ }
+
+ /* Issue notices about skipped tests. */
+#ifndef HAVE_ARCH__HASH_32
+ pr_info("__hash_32() has no arch implementation to test.");
+#elif HAVE_ARCH__HASH_32 != 1
+ pr_info("__hash_32() is arch-specific; not compared to generic.");
+#endif
+#ifndef HAVE_ARCH_HASH_32
+ pr_info("hash_32() has no arch implementation to test.");
+#elif HAVE_ARCH_HASH_32 != 1
+ pr_info("hash_32() is arch-specific; not compared to generic.");
+#endif
+#ifndef HAVE_ARCH_HASH_64
+ pr_info("hash_64() has no arch implementation to test.");
+#elif HAVE_ARCH_HASH_64 != 1
+ pr_info("hash_64() is arch-specific; not compared to generic.");
+#endif
+
+ pr_notice("%u tests passed.", tests);
+
+ return 0;
+}
+
+static void __exit test_hash_exit(void)
+{
+}
+
+module_init(test_hash_init); /* Does everything */
+module_exit(test_hash_exit); /* Does nothing */
+
+MODULE_LICENSE("GPL");
diff --git a/lib/test_uuid.c b/lib/test_uuid.c
new file mode 100644
index 000000000000..547d3127a3cf
--- /dev/null
+++ b/lib/test_uuid.c
@@ -0,0 +1,133 @@
+/*
+ * Test cases for lib/uuid.c module.
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/uuid.h>
+
+struct test_uuid_data {
+ const char *uuid;
+ uuid_le le;
+ uuid_be be;
+};
+
+static const struct test_uuid_data test_uuid_test_data[] = {
+ {
+ .uuid = "c33f4995-3701-450e-9fbf-206a2e98e576",
+ .le = UUID_LE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+ .be = UUID_BE(0xc33f4995, 0x3701, 0x450e, 0x9f, 0xbf, 0x20, 0x6a, 0x2e, 0x98, 0xe5, 0x76),
+ },
+ {
+ .uuid = "64b4371c-77c1-48f9-8221-29f054fc023b",
+ .le = UUID_LE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+ .be = UUID_BE(0x64b4371c, 0x77c1, 0x48f9, 0x82, 0x21, 0x29, 0xf0, 0x54, 0xfc, 0x02, 0x3b),
+ },
+ {
+ .uuid = "0cb4ddff-a545-4401-9d06-688af53e7f84",
+ .le = UUID_LE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+ .be = UUID_BE(0x0cb4ddff, 0xa545, 0x4401, 0x9d, 0x06, 0x68, 0x8a, 0xf5, 0x3e, 0x7f, 0x84),
+ },
+};
+
+static const char * const test_uuid_wrong_data[] = {
+ "c33f4995-3701-450e-9fbf206a2e98e576 ", /* no hyphen(s) */
+ "64b4371c-77c1-48f9-8221-29f054XX023b", /* invalid character(s) */
+ "0cb4ddff-a545-4401-9d06-688af53e", /* not enough data */
+};
+
+static unsigned total_tests __initdata;
+static unsigned failed_tests __initdata;
+
+static void __init test_uuid_failed(const char *prefix, bool wrong, bool be,
+ const char *data, const char *actual)
+{
+ pr_err("%s test #%u %s %s data: '%s'\n",
+ prefix,
+ total_tests,
+ wrong ? "passed on wrong" : "failed on",
+ be ? "BE" : "LE",
+ data);
+ if (actual && *actual)
+ pr_err("%s test #%u actual data: '%s'\n",
+ prefix,
+ total_tests,
+ actual);
+ failed_tests++;
+}
+
+static void __init test_uuid_test(const struct test_uuid_data *data)
+{
+ uuid_le le;
+ uuid_be be;
+ char buf[48];
+
+ /* LE */
+ total_tests++;
+ if (uuid_le_to_bin(data->uuid, &le))
+ test_uuid_failed("conversion", false, false, data->uuid, NULL);
+
+ total_tests++;
+ if (uuid_le_cmp(data->le, le)) {
+ sprintf(buf, "%pUl", &le);
+ test_uuid_failed("cmp", false, false, data->uuid, buf);
+ }
+
+ /* BE */
+ total_tests++;
+ if (uuid_be_to_bin(data->uuid, &be))
+ test_uuid_failed("conversion", false, true, data->uuid, NULL);
+
+ total_tests++;
+ if (uuid_be_cmp(data->be, be)) {
+ sprintf(buf, "%pUb", &be);
+ test_uuid_failed("cmp", false, true, data->uuid, buf);
+ }
+}
+
+static void __init test_uuid_wrong(const char *data)
+{
+ uuid_le le;
+ uuid_be be;
+
+ /* LE */
+ total_tests++;
+ if (!uuid_le_to_bin(data, &le))
+ test_uuid_failed("negative", true, false, data, NULL);
+
+ /* BE */
+ total_tests++;
+ if (!uuid_be_to_bin(data, &be))
+ test_uuid_failed("negative", true, true, data, NULL);
+}
+
+static int __init test_uuid_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_uuid_test_data); i++)
+ test_uuid_test(&test_uuid_test_data[i]);
+
+ for (i = 0; i < ARRAY_SIZE(test_uuid_wrong_data); i++)
+ test_uuid_wrong(test_uuid_wrong_data[i]);
+
+ if (failed_tests == 0)
+ pr_info("all %u tests passed\n", total_tests);
+ else
+ pr_err("failed %u out of %u tests\n", failed_tests, total_tests);
+
+ return failed_tests ? -EINVAL : 0;
+}
+module_init(test_uuid_init);
+
+static void __exit test_uuid_exit(void)
+{
+ /* do nothing */
+}
+module_exit(test_uuid_exit);
+
+MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/uuid.c b/lib/uuid.c
index e116ae5fa00f..37687af77ff8 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -106,8 +106,8 @@ static int __uuid_to_bin(const char *uuid, __u8 b[16], const u8 ei[16])
return -EINVAL;
for (i = 0; i < 16; i++) {
- int hi = hex_to_bin(uuid[si[i]] + 0);
- int lo = hex_to_bin(uuid[si[i]] + 1);
+ int hi = hex_to_bin(uuid[si[i] + 0]);
+ int lo = hex_to_bin(uuid[si[i] + 1]);
b[ei[i]] = (hi << 4) | lo;
}
diff --git a/mm/Kconfig b/mm/Kconfig
index 2664c118b5d2..3e2daef3c946 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -648,7 +648,8 @@ config DEFERRED_STRUCT_PAGE_INIT
bool "Defer initialisation of struct pages to kthreads"
default n
depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
- depends on MEMORY_HOTPLUG
+ depends on NO_BOOTMEM && MEMORY_HOTPLUG
+ depends on !FLATMEM
help
Ordinarily all struct pages are initialised during early boot in a
single thread. On very large machines this can take a considerable
diff --git a/mm/cma.c b/mm/cma.c
index ea506eb18cd6..bd0e1412475e 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -183,7 +183,8 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
return -EINVAL;
/* ensure minimal alignment required by mm core */
- alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+ alignment = PAGE_SIZE <<
+ max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
/* alignment should be aligned with order_per_bit */
if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
@@ -266,8 +267,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
* migratetype page by page allocator's buddy algorithm. In the case,
* you couldn't get a contiguous memory, which is not what we want.
*/
- alignment = max(alignment,
- (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+ alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
+ max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
base = ALIGN(base, alignment);
size = ALIGN(size, alignment);
limit &= ~(alignment - 1);
diff --git a/mm/filemap.c b/mm/filemap.c
index 9665b1d4f318..00ae878b2a38 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -143,13 +143,15 @@ static void page_cache_tree_delete(struct address_space *mapping,
return;
/*
- * Track node that only contains shadow entries.
+ * Track node that only contains shadow entries. DAX mappings contain
+ * no shadow entries and may contain other exceptional entries so skip
+ * those.
*
* Avoid acquiring the list_lru lock if already tracked. The
* list_empty() test is safe as node->private_list is
* protected by mapping->tree_lock.
*/
- if (!workingset_node_pages(node) &&
+ if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
list_empty(&node->private_list)) {
node->private_data = mapping;
list_lru_add(&workingset_shadow_nodes, &node->private_list);
@@ -580,14 +582,24 @@ static int page_cache_tree_insert(struct address_space *mapping,
if (!radix_tree_exceptional_entry(p))
return -EEXIST;
- if (WARN_ON(dax_mapping(mapping)))
- return -EINVAL;
-
- if (shadowp)
- *shadowp = p;
mapping->nrexceptional--;
- if (node)
- workingset_node_shadows_dec(node);
+ if (!dax_mapping(mapping)) {
+ if (shadowp)
+ *shadowp = p;
+ if (node)
+ workingset_node_shadows_dec(node);
+ } else {
+ /* DAX can replace empty locked entry with a hole */
+ WARN_ON_ONCE(p !=
+ (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY |
+ RADIX_DAX_ENTRY_LOCK));
+ /* DAX accounts exceptional entries as normal pages */
+ if (node)
+ workingset_node_pages_dec(node);
+ /* Wakeup waiters for exceptional entry lock */
+ dax_wake_mapping_entry_waiter(mapping, page->index,
+ false);
+ }
}
radix_tree_replace_slot(slot, page);
mapping->nrpages++;
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 7f7ac51d7faf..fb87923552ef 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -77,7 +77,6 @@ struct kasan_alloc_meta {
struct kasan_track track;
u32 state : 2; /* enum kasan_state */
u32 alloc_size : 30;
- u32 reserved;
};
struct qlist_node {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index cf428d7b9a03..925b431f3f03 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1108,6 +1108,8 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
limit = READ_ONCE(memcg->memsw.limit);
if (count <= limit)
margin = min(margin, limit - count);
+ else
+ margin = 0;
}
return margin;
@@ -1302,6 +1304,8 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
mem_cgroup_iter_break(memcg, iter);
if (chosen)
put_task_struct(chosen);
+ /* Set a dummy value to return "true". */
+ chosen = (void *) 1;
goto unlock;
case OOM_SCAN_OK:
break;
@@ -4305,24 +4309,6 @@ static int mem_cgroup_do_precharge(unsigned long count)
return 0;
}
-/**
- * get_mctgt_type - get target type of moving charge
- * @vma: the vma the pte to be checked belongs
- * @addr: the address corresponding to the pte to be checked
- * @ptent: the pte to be checked
- * @target: the pointer the target page or swap ent will be stored(can be NULL)
- *
- * Returns
- * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
- * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
- * move charge. if @target is not NULL, the page is stored in target->page
- * with extra refcnt got(Callers should handle it).
- * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
- * target for charge migration. if @target is not NULL, the entry is stored
- * in target->ent.
- *
- * Called with pte lock held.
- */
union mc_target {
struct page *page;
swp_entry_t ent;
@@ -4511,6 +4497,25 @@ out:
return ret;
}
+/**
+ * get_mctgt_type - get target type of moving charge
+ * @vma: the vma the pte to be checked belongs
+ * @addr: the address corresponding to the pte to be checked
+ * @ptent: the pte to be checked
+ * @target: the pointer the target page or swap ent will be stored(can be NULL)
+ *
+ * Returns
+ * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
+ * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
+ * move charge. if @target is not NULL, the page is stored in target->page
+ * with extra refcnt got(Callers should handle it).
+ * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
+ * target for charge migration. if @target is not NULL, the entry is stored
+ * in target->ent.
+ *
+ * Called with pte lock held.
+ */
+
static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
unsigned long addr, pte_t ptent, union mc_target *target)
{
diff --git a/mm/memory.c b/mm/memory.c
index a1b93d9e4449..15322b73636b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -63,6 +63,7 @@
#include <linux/dma-debug.h>
#include <linux/debugfs.h>
#include <linux/userfaultfd_k.h>
+#include <linux/dax.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
@@ -2492,8 +2493,6 @@ void unmap_mapping_range(struct address_space *mapping,
if (details.last_index < details.first_index)
details.last_index = ULONG_MAX;
-
- /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
@@ -2825,7 +2824,8 @@ oom:
*/
static int __do_fault(struct vm_area_struct *vma, unsigned long address,
pgoff_t pgoff, unsigned int flags,
- struct page *cow_page, struct page **page)
+ struct page *cow_page, struct page **page,
+ void **entry)
{
struct vm_fault vmf;
int ret;
@@ -2840,8 +2840,10 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
ret = vma->vm_ops->fault(vma, &vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
- if (!vmf.page)
- goto out;
+ if (ret & VM_FAULT_DAX_LOCKED) {
+ *entry = vmf.entry;
+ return ret;
+ }
if (unlikely(PageHWPoison(vmf.page))) {
if (ret & VM_FAULT_LOCKED)
@@ -2855,7 +2857,6 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
else
VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
- out:
*page = vmf.page;
return ret;
}
@@ -3048,7 +3049,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap_unlock(pte, ptl);
}
- ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
+ ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
@@ -3071,6 +3072,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
{
struct page *fault_page, *new_page;
+ void *fault_entry;
struct mem_cgroup *memcg;
spinlock_t *ptl;
pte_t *pte;
@@ -3088,26 +3090,24 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return VM_FAULT_OOM;
}
- ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page);
+ ret = __do_fault(vma, address, pgoff, flags, new_page, &fault_page,
+ &fault_entry);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
goto uncharge_out;
- if (fault_page)
+ if (!(ret & VM_FAULT_DAX_LOCKED))
copy_user_highpage(new_page, fault_page, address, vma);
__SetPageUptodate(new_page);
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
if (unlikely(!pte_same(*pte, orig_pte))) {
pte_unmap_unlock(pte, ptl);
- if (fault_page) {
+ if (!(ret & VM_FAULT_DAX_LOCKED)) {
unlock_page(fault_page);
put_page(fault_page);
} else {
- /*
- * The fault handler has no page to lock, so it holds
- * i_mmap_lock for read to protect against truncate.
- */
- i_mmap_unlock_read(vma->vm_file->f_mapping);
+ dax_unlock_mapping_entry(vma->vm_file->f_mapping,
+ pgoff);
}
goto uncharge_out;
}
@@ -3115,15 +3115,11 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
mem_cgroup_commit_charge(new_page, memcg, false, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pte_unmap_unlock(pte, ptl);
- if (fault_page) {
+ if (!(ret & VM_FAULT_DAX_LOCKED)) {
unlock_page(fault_page);
put_page(fault_page);
} else {
- /*
- * The fault handler has no page to lock, so it holds
- * i_mmap_lock for read to protect against truncate.
- */
- i_mmap_unlock_read(vma->vm_file->f_mapping);
+ dax_unlock_mapping_entry(vma->vm_file->f_mapping, pgoff);
}
return ret;
uncharge_out:
@@ -3143,7 +3139,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
int dirtied = 0;
int ret, tmp;
- ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page);
+ ret = __do_fault(vma, address, pgoff, flags, NULL, &fault_page, NULL);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index caf2a14c37ad..e3cbdcaff2a5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -263,7 +263,7 @@ static void register_page_bootmem_info_section(unsigned long start_pfn)
}
#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
-void register_page_bootmem_info_node(struct pglist_data *pgdat)
+void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
{
unsigned long i, pfn, end_pfn, nr_pages;
int node = pgdat->node_id;
@@ -300,7 +300,7 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat)
* multiple nodes we check that this pfn does not already
* reside in some other nodes.
*/
- if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
+ if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
register_page_bootmem_info_section(pfn);
}
}
diff --git a/mm/mmap.c b/mm/mmap.c
index d3d9a94ca031..de2c1769cc68 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -168,7 +168,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
return next;
}
-static unsigned long do_brk(unsigned long addr, unsigned long len);
+static int do_brk(unsigned long addr, unsigned long len);
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
@@ -224,7 +224,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out;
/* Ok, looks good - let it rip. */
- if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
+ if (do_brk(oldbrk, newbrk-oldbrk) < 0)
goto out;
set_brk:
@@ -2625,7 +2625,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
-static unsigned long do_brk(unsigned long addr, unsigned long len)
+static int do_brk(unsigned long addr, unsigned long len)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
@@ -2636,7 +2636,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
len = PAGE_ALIGN(len);
if (!len)
- return addr;
+ return 0;
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@@ -2703,13 +2703,13 @@ out:
if (flags & VM_LOCKED)
mm->locked_vm += (len >> PAGE_SHIFT);
vma->vm_flags |= VM_SOFTDIRTY;
- return addr;
+ return 0;
}
-unsigned long vm_brk(unsigned long addr, unsigned long len)
+int vm_brk(unsigned long addr, unsigned long len)
{
struct mm_struct *mm = current->mm;
- unsigned long ret;
+ int ret;
bool populate;
if (down_write_killable(&mm->mmap_sem))
@@ -2718,7 +2718,7 @@ unsigned long vm_brk(unsigned long addr, unsigned long len)
ret = do_brk(addr, len);
populate = ((mm->def_flags & VM_LOCKED) != 0);
up_write(&mm->mmap_sem);
- if (populate)
+ if (populate && !ret)
mm_populate(addr, len);
return ret;
}
diff --git a/mm/nommu.c b/mm/nommu.c
index c8bd59a03c71..c2e58880207f 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1682,7 +1682,7 @@ void exit_mmap(struct mm_struct *mm)
}
}
-unsigned long vm_brk(unsigned long addr, unsigned long len)
+int vm_brk(unsigned long addr, unsigned long len)
{
return -ENOMEM;
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5bb2f7698ad7..dfb1ab61fb23 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -443,13 +443,29 @@ static bool __oom_reap_task(struct task_struct *tsk)
{
struct mmu_gather tlb;
struct vm_area_struct *vma;
- struct mm_struct *mm;
+ struct mm_struct *mm = NULL;
struct task_struct *p;
struct zap_details details = {.check_swap_entries = true,
.ignore_dirty = true};
bool ret = true;
/*
+ * We have to make sure to not race with the victim exit path
+ * and cause premature new oom victim selection:
+ * __oom_reap_task exit_mm
+ * atomic_inc_not_zero
+ * mmput
+ * atomic_dec_and_test
+ * exit_oom_victim
+ * [...]
+ * out_of_memory
+ * select_bad_process
+ * # no TIF_MEMDIE task selects new victim
+ * unmap_page_range # frees some memory
+ */
+ mutex_lock(&oom_lock);
+
+ /*
* Make sure we find the associated mm_struct even when the particular
* thread has already terminated and cleared its mm.
* We might have race with exit path so consider our work done if there
@@ -457,19 +473,19 @@ static bool __oom_reap_task(struct task_struct *tsk)
*/
p = find_lock_task_mm(tsk);
if (!p)
- return true;
+ goto unlock_oom;
mm = p->mm;
if (!atomic_inc_not_zero(&mm->mm_users)) {
task_unlock(p);
- return true;
+ goto unlock_oom;
}
task_unlock(p);
if (!down_read_trylock(&mm->mmap_sem)) {
ret = false;
- goto out;
+ goto unlock_oom;
}
tlb_gather_mmu(&tlb, mm, 0, -1);
@@ -511,13 +527,15 @@ static bool __oom_reap_task(struct task_struct *tsk)
* to release its memory.
*/
set_bit(MMF_OOM_REAPED, &mm->flags);
-out:
+unlock_oom:
+ mutex_unlock(&oom_lock);
/*
* Drop our reference but make sure the mmput slow path is called from a
* different context because we shouldn't risk we get stuck there and
* put the oom_reaper out of the way.
*/
- mmput_async(mm);
+ if (mm)
+ mmput_async(mm);
return ret;
}
@@ -611,8 +629,6 @@ void try_oom_reaper(struct task_struct *tsk)
if (!process_shares_mm(p, mm))
continue;
- if (same_thread_group(p, tsk))
- continue;
if (fatal_signal_pending(p))
continue;
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 2d864e64f7fe..44a4c029c8e7 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -390,8 +390,10 @@ void __init page_ext_init(void)
* We know some arch can have a nodes layout such as
* -------------pfn-------------->
* N0 | N1 | N2 | N0 | N1 | N2|....
+ *
+ * Take into account DEFERRED_STRUCT_PAGE_INIT.
*/
- if (pfn_to_nid(pfn) != nid)
+ if (early_pfn_to_nid(pfn) != nid)
continue;
if (init_section_page_ext(pfn, nid))
goto oom;
diff --git a/mm/rmap.c b/mm/rmap.c
index 8a839935b18c..0ea5d9071b32 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1098,6 +1098,8 @@ void page_move_anon_rmap(struct page *page,
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_VMA(!anon_vma, vma);
+ if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
+ address &= HPAGE_PMD_MASK;
VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
diff --git a/mm/shmem.c b/mm/shmem.c
index e418a995427d..a36144909b28 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2645,10 +2645,11 @@ static int shmem_xattr_handler_get(const struct xattr_handler *handler,
}
static int shmem_xattr_handler_set(const struct xattr_handler *handler,
- struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+ struct dentry *unused, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
{
- struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
+ struct shmem_inode_info *info = SHMEM_I(inode);
name = xattr_full_name(handler, name);
return simple_xattr_set(&info->xattrs, name, value, size, flags);
diff --git a/mm/truncate.c b/mm/truncate.c
index b00272810871..4064f8f53daa 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -34,40 +34,38 @@ static void clear_exceptional_entry(struct address_space *mapping,
if (shmem_mapping(mapping))
return;
- spin_lock_irq(&mapping->tree_lock);
-
if (dax_mapping(mapping)) {
- if (radix_tree_delete_item(&mapping->page_tree, index, entry))
- mapping->nrexceptional--;
- } else {
- /*
- * Regular page slots are stabilized by the page lock even
- * without the tree itself locked. These unlocked entries
- * need verification under the tree lock.
- */
- if (!__radix_tree_lookup(&mapping->page_tree, index, &node,
- &slot))
- goto unlock;
- if (*slot != entry)
- goto unlock;
- radix_tree_replace_slot(slot, NULL);
- mapping->nrexceptional--;
- if (!node)
- goto unlock;
- workingset_node_shadows_dec(node);
- /*
- * Don't track node without shadow entries.
- *
- * Avoid acquiring the list_lru lock if already untracked.
- * The list_empty() test is safe as node->private_list is
- * protected by mapping->tree_lock.
- */
- if (!workingset_node_shadows(node) &&
- !list_empty(&node->private_list))
- list_lru_del(&workingset_shadow_nodes,
- &node->private_list);
- __radix_tree_delete_node(&mapping->page_tree, node);
+ dax_delete_mapping_entry(mapping, index);
+ return;
}
+ spin_lock_irq(&mapping->tree_lock);
+ /*
+ * Regular page slots are stabilized by the page lock even
+ * without the tree itself locked. These unlocked entries
+ * need verification under the tree lock.
+ */
+ if (!__radix_tree_lookup(&mapping->page_tree, index, &node,
+ &slot))
+ goto unlock;
+ if (*slot != entry)
+ goto unlock;
+ radix_tree_replace_slot(slot, NULL);
+ mapping->nrexceptional--;
+ if (!node)
+ goto unlock;
+ workingset_node_shadows_dec(node);
+ /*
+ * Don't track node without shadow entries.
+ *
+ * Avoid acquiring the list_lru lock if already untracked.
+ * The list_empty() test is safe as node->private_list is
+ * protected by mapping->tree_lock.
+ */
+ if (!workingset_node_shadows(node) &&
+ !list_empty(&node->private_list))
+ list_lru_del(&workingset_shadow_nodes,
+ &node->private_list);
+ __radix_tree_delete_node(&mapping->page_tree, node);
unlock:
spin_unlock_irq(&mapping->tree_lock);
}
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 72698db958e7..b6d4f258cb53 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -45,6 +45,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -483,16 +485,16 @@ static inline unsigned long zs_stat_get(struct size_class *class,
#ifdef CONFIG_ZSMALLOC_STAT
-static int __init zs_stat_init(void)
+static void __init zs_stat_init(void)
{
- if (!debugfs_initialized())
- return -ENODEV;
+ if (!debugfs_initialized()) {
+ pr_warn("debugfs not available, stat dir not created\n");
+ return;
+ }
zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
if (!zs_stat_root)
- return -ENOMEM;
-
- return 0;
+ pr_warn("debugfs 'zsmalloc' stat dir creation failed\n");
}
static void __exit zs_stat_exit(void)
@@ -577,8 +579,10 @@ static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
{
struct dentry *entry;
- if (!zs_stat_root)
+ if (!zs_stat_root) {
+ pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
return;
+ }
entry = debugfs_create_dir(name, zs_stat_root);
if (!entry) {
@@ -592,7 +596,8 @@ static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
if (!entry) {
pr_warn("%s: debugfs file entry <%s> creation failed\n",
name, "classes");
- return;
+ debugfs_remove_recursive(pool->stat_dentry);
+ pool->stat_dentry = NULL;
}
}
@@ -602,9 +607,8 @@ static void zs_pool_stat_destroy(struct zs_pool *pool)
}
#else /* CONFIG_ZSMALLOC_STAT */
-static int __init zs_stat_init(void)
+static void __init zs_stat_init(void)
{
- return 0;
}
static void __exit zs_stat_exit(void)
@@ -2011,17 +2015,10 @@ static int __init zs_init(void)
zpool_register_driver(&zs_zpool_driver);
#endif
- ret = zs_stat_init();
- if (ret) {
- pr_err("zs stat initialization failed\n");
- goto stat_fail;
- }
+ zs_stat_init();
+
return 0;
-stat_fail:
-#ifdef CONFIG_ZPOOL
- zpool_unregister_driver(&zs_zpool_driver);
-#endif
notifier_fail:
zs_unregister_cpu_notifier();
diff --git a/net/9p/client.c b/net/9p/client.c
index ea79ee9a7348..3fc94a49ccd5 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -518,10 +518,10 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
if (err)
goto out_err;
- if (p9_is_proto_dotu(c))
+ if (p9_is_proto_dotu(c) && ecode < 512)
err = -ecode;
- if (!err || !IS_ERR_VALUE(err)) {
+ if (!err) {
err = p9_errstr2errno(ename, strlen(ename));
p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
@@ -605,10 +605,10 @@ static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req,
if (err)
goto out_err;
- if (p9_is_proto_dotu(c))
+ if (p9_is_proto_dotu(c) && ecode < 512)
err = -ecode;
- if (!err || !IS_ERR_VALUE(err)) {
+ if (!err) {
err = p9_errstr2errno(ename, strlen(ename));
p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n",
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index dcc18c6f7cf9..55d2bfee16d7 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -651,7 +651,7 @@ EXPORT_SYMBOL(ceph_destroy_client);
/*
* true if we have the mon map (and have thus joined the cluster)
*/
-static int have_mon_and_osd_map(struct ceph_client *client)
+static bool have_mon_and_osd_map(struct ceph_client *client)
{
return client->monc.monmap && client->monc.monmap->epoch &&
client->osdc.osdmap && client->osdc.osdmap->epoch;
diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c
index 139a9cb19b0c..3773a4fa11e3 100644
--- a/net/ceph/ceph_strings.c
+++ b/net/ceph/ceph_strings.c
@@ -27,6 +27,22 @@ __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
}
}
+const char *ceph_osd_watch_op_name(int o)
+{
+ switch (o) {
+ case CEPH_OSD_WATCH_OP_UNWATCH:
+ return "unwatch";
+ case CEPH_OSD_WATCH_OP_WATCH:
+ return "watch";
+ case CEPH_OSD_WATCH_OP_RECONNECT:
+ return "reconnect";
+ case CEPH_OSD_WATCH_OP_PING:
+ return "ping";
+ default:
+ return "???";
+ }
+}
+
const char *ceph_osd_state_name(int s)
{
switch (s) {
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index b902fbc7863e..e77b04ca7802 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -54,24 +54,25 @@ static int osdmap_show(struct seq_file *s, void *p)
{
int i;
struct ceph_client *client = s->private;
- struct ceph_osdmap *map = client->osdc.osdmap;
+ struct ceph_osd_client *osdc = &client->osdc;
+ struct ceph_osdmap *map = osdc->osdmap;
struct rb_node *n;
if (map == NULL)
return 0;
- seq_printf(s, "epoch %d\n", map->epoch);
- seq_printf(s, "flags%s%s\n",
- (map->flags & CEPH_OSDMAP_NEARFULL) ? " NEARFULL" : "",
- (map->flags & CEPH_OSDMAP_FULL) ? " FULL" : "");
+ down_read(&osdc->lock);
+ seq_printf(s, "epoch %d flags 0x%x\n", map->epoch, map->flags);
for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) {
- struct ceph_pg_pool_info *pool =
+ struct ceph_pg_pool_info *pi =
rb_entry(n, struct ceph_pg_pool_info, node);
- seq_printf(s, "pool %lld pg_num %u (%d) read_tier %lld write_tier %lld\n",
- pool->id, pool->pg_num, pool->pg_num_mask,
- pool->read_tier, pool->write_tier);
+ seq_printf(s, "pool %lld '%s' type %d size %d min_size %d pg_num %u pg_num_mask %d flags 0x%llx lfor %u read_tier %lld write_tier %lld\n",
+ pi->id, pi->name, pi->type, pi->size, pi->min_size,
+ pi->pg_num, pi->pg_num_mask, pi->flags,
+ pi->last_force_request_resend, pi->read_tier,
+ pi->write_tier);
}
for (i = 0; i < map->max_osd; i++) {
struct ceph_entity_addr *addr = &map->osd_addr[i];
@@ -103,6 +104,7 @@ static int osdmap_show(struct seq_file *s, void *p)
pg->pgid.seed, pg->primary_temp.osd);
}
+ up_read(&osdc->lock);
return 0;
}
@@ -126,6 +128,7 @@ static int monc_show(struct seq_file *s, void *p)
CEPH_SUBSCRIBE_ONETIME ? "" : "+"));
seq_putc(s, '\n');
}
+ seq_printf(s, "fs_cluster_id %d\n", monc->fs_cluster_id);
for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
__u16 op;
@@ -143,43 +146,113 @@ static int monc_show(struct seq_file *s, void *p)
return 0;
}
-static int osdc_show(struct seq_file *s, void *pp)
+static void dump_target(struct seq_file *s, struct ceph_osd_request_target *t)
{
- struct ceph_client *client = s->private;
- struct ceph_osd_client *osdc = &client->osdc;
- struct rb_node *p;
+ int i;
- mutex_lock(&osdc->request_mutex);
- for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
- struct ceph_osd_request *req;
- unsigned int i;
- int opcode;
+ seq_printf(s, "osd%d\t%llu.%x\t[", t->osd, t->pgid.pool, t->pgid.seed);
+ for (i = 0; i < t->up.size; i++)
+ seq_printf(s, "%s%d", (!i ? "" : ","), t->up.osds[i]);
+ seq_printf(s, "]/%d\t[", t->up.primary);
+ for (i = 0; i < t->acting.size; i++)
+ seq_printf(s, "%s%d", (!i ? "" : ","), t->acting.osds[i]);
+ seq_printf(s, "]/%d\t%*pE\t0x%x", t->acting.primary,
+ t->target_oid.name_len, t->target_oid.name, t->flags);
+ if (t->paused)
+ seq_puts(s, "\tP");
+}
- req = rb_entry(p, struct ceph_osd_request, r_node);
+static void dump_request(struct seq_file *s, struct ceph_osd_request *req)
+{
+ int i;
- seq_printf(s, "%lld\tosd%d\t%lld.%x\t", req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1,
- req->r_pgid.pool, req->r_pgid.seed);
+ seq_printf(s, "%llu\t", req->r_tid);
+ dump_target(s, &req->r_t);
- seq_printf(s, "%.*s", req->r_base_oid.name_len,
- req->r_base_oid.name);
+ seq_printf(s, "\t%d\t%u'%llu", req->r_attempts,
+ le32_to_cpu(req->r_replay_version.epoch),
+ le64_to_cpu(req->r_replay_version.version));
- if (req->r_reassert_version.epoch)
- seq_printf(s, "\t%u'%llu",
- (unsigned int)le32_to_cpu(req->r_reassert_version.epoch),
- le64_to_cpu(req->r_reassert_version.version));
- else
- seq_printf(s, "\t");
+ for (i = 0; i < req->r_num_ops; i++) {
+ struct ceph_osd_req_op *op = &req->r_ops[i];
+
+ seq_printf(s, "%s%s", (i == 0 ? "\t" : ","),
+ ceph_osd_op_name(op->op));
+ if (op->op == CEPH_OSD_OP_WATCH)
+ seq_printf(s, "-%s",
+ ceph_osd_watch_op_name(op->watch.op));
+ }
+
+ seq_putc(s, '\n');
+}
+
+static void dump_requests(struct seq_file *s, struct ceph_osd *osd)
+{
+ struct rb_node *n;
+
+ mutex_lock(&osd->lock);
+ for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
+ struct ceph_osd_request *req =
+ rb_entry(n, struct ceph_osd_request, r_node);
+
+ dump_request(s, req);
+ }
+
+ mutex_unlock(&osd->lock);
+}
- for (i = 0; i < req->r_num_ops; i++) {
- opcode = req->r_ops[i].op;
- seq_printf(s, "%s%s", (i == 0 ? "\t" : ","),
- ceph_osd_op_name(opcode));
- }
+static void dump_linger_request(struct seq_file *s,
+ struct ceph_osd_linger_request *lreq)
+{
+ seq_printf(s, "%llu\t", lreq->linger_id);
+ dump_target(s, &lreq->t);
+
+ seq_printf(s, "\t%u\t%s%s/%d\n", lreq->register_gen,
+ lreq->is_watch ? "W" : "N", lreq->committed ? "C" : "",
+ lreq->last_error);
+}
+
+static void dump_linger_requests(struct seq_file *s, struct ceph_osd *osd)
+{
+ struct rb_node *n;
+
+ mutex_lock(&osd->lock);
+ for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
+ struct ceph_osd_linger_request *lreq =
+ rb_entry(n, struct ceph_osd_linger_request, node);
+
+ dump_linger_request(s, lreq);
+ }
+
+ mutex_unlock(&osd->lock);
+}
- seq_printf(s, "\n");
+static int osdc_show(struct seq_file *s, void *pp)
+{
+ struct ceph_client *client = s->private;
+ struct ceph_osd_client *osdc = &client->osdc;
+ struct rb_node *n;
+
+ down_read(&osdc->lock);
+ seq_printf(s, "REQUESTS %d homeless %d\n",
+ atomic_read(&osdc->num_requests),
+ atomic_read(&osdc->num_homeless));
+ for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+ struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+
+ dump_requests(s, osd);
}
- mutex_unlock(&osdc->request_mutex);
+ dump_requests(s, &osdc->homeless_osd);
+
+ seq_puts(s, "LINGER REQUESTS\n");
+ for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+ struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+
+ dump_linger_requests(s, osd);
+ }
+ dump_linger_requests(s, &osdc->homeless_osd);
+
+ up_read(&osdc->lock);
return 0;
}
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index cf638c009cfa..37c38a7fb5c5 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -260,20 +260,26 @@ static void __send_subscribe(struct ceph_mon_client *monc)
BUG_ON(num < 1); /* monmap sub is always there */
ceph_encode_32(&p, num);
for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
- const char *s = ceph_sub_str[i];
+ char buf[32];
+ int len;
if (!monc->subs[i].want)
continue;
- dout("%s %s start %llu flags 0x%x\n", __func__, s,
+ len = sprintf(buf, "%s", ceph_sub_str[i]);
+ if (i == CEPH_SUB_MDSMAP &&
+ monc->fs_cluster_id != CEPH_FS_CLUSTER_ID_NONE)
+ len += sprintf(buf + len, ".%d", monc->fs_cluster_id);
+
+ dout("%s %s start %llu flags 0x%x\n", __func__, buf,
le64_to_cpu(monc->subs[i].item.start),
monc->subs[i].item.flags);
- ceph_encode_string(&p, end, s, strlen(s));
+ ceph_encode_string(&p, end, buf, len);
memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item));
p += sizeof(monc->subs[i].item);
}
- BUG_ON(p != (end - 35 - (ARRAY_SIZE(monc->subs) - num) * 19));
+ BUG_ON(p > end);
msg->front.iov_len = p - msg->front.iov_base;
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
ceph_msg_revoke(msg);
@@ -376,19 +382,13 @@ void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch)
}
EXPORT_SYMBOL(ceph_monc_got_map);
-/*
- * Register interest in the next osdmap
- */
-void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
+void ceph_monc_renew_subs(struct ceph_mon_client *monc)
{
- dout("%s have %u\n", __func__, monc->subs[CEPH_SUB_OSDMAP].have);
mutex_lock(&monc->mutex);
- if (__ceph_monc_want_map(monc, CEPH_SUB_OSDMAP,
- monc->subs[CEPH_SUB_OSDMAP].have + 1, false))
- __send_subscribe(monc);
+ __send_subscribe(monc);
mutex_unlock(&monc->mutex);
}
-EXPORT_SYMBOL(ceph_monc_request_next_osdmap);
+EXPORT_SYMBOL(ceph_monc_renew_subs);
/*
* Wait for an osdmap with a given epoch.
@@ -478,51 +478,17 @@ out:
/*
* generic requests (currently statfs, mon_get_version)
*/
-static struct ceph_mon_generic_request *__lookup_generic_req(
- struct ceph_mon_client *monc, u64 tid)
-{
- struct ceph_mon_generic_request *req;
- struct rb_node *n = monc->generic_request_tree.rb_node;
-
- while (n) {
- req = rb_entry(n, struct ceph_mon_generic_request, node);
- if (tid < req->tid)
- n = n->rb_left;
- else if (tid > req->tid)
- n = n->rb_right;
- else
- return req;
- }
- return NULL;
-}
-
-static void __insert_generic_request(struct ceph_mon_client *monc,
- struct ceph_mon_generic_request *new)
-{
- struct rb_node **p = &monc->generic_request_tree.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_mon_generic_request *req = NULL;
-
- while (*p) {
- parent = *p;
- req = rb_entry(parent, struct ceph_mon_generic_request, node);
- if (new->tid < req->tid)
- p = &(*p)->rb_left;
- else if (new->tid > req->tid)
- p = &(*p)->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&new->node, parent, p);
- rb_insert_color(&new->node, &monc->generic_request_tree);
-}
+DEFINE_RB_FUNCS(generic_request, struct ceph_mon_generic_request, tid, node)
static void release_generic_request(struct kref *kref)
{
struct ceph_mon_generic_request *req =
container_of(kref, struct ceph_mon_generic_request, kref);
+ dout("%s greq %p request %p reply %p\n", __func__, req, req->request,
+ req->reply);
+ WARN_ON(!RB_EMPTY_NODE(&req->node));
+
if (req->reply)
ceph_msg_put(req->reply);
if (req->request)
@@ -533,7 +499,8 @@ static void release_generic_request(struct kref *kref)
static void put_generic_request(struct ceph_mon_generic_request *req)
{
- kref_put(&req->kref, release_generic_request);
+ if (req)
+ kref_put(&req->kref, release_generic_request);
}
static void get_generic_request(struct ceph_mon_generic_request *req)
@@ -541,6 +508,103 @@ static void get_generic_request(struct ceph_mon_generic_request *req)
kref_get(&req->kref);
}
+static struct ceph_mon_generic_request *
+alloc_generic_request(struct ceph_mon_client *monc, gfp_t gfp)
+{
+ struct ceph_mon_generic_request *req;
+
+ req = kzalloc(sizeof(*req), gfp);
+ if (!req)
+ return NULL;
+
+ req->monc = monc;
+ kref_init(&req->kref);
+ RB_CLEAR_NODE(&req->node);
+ init_completion(&req->completion);
+
+ dout("%s greq %p\n", __func__, req);
+ return req;
+}
+
+static void register_generic_request(struct ceph_mon_generic_request *req)
+{
+ struct ceph_mon_client *monc = req->monc;
+
+ WARN_ON(req->tid);
+
+ get_generic_request(req);
+ req->tid = ++monc->last_tid;
+ insert_generic_request(&monc->generic_request_tree, req);
+}
+
+static void send_generic_request(struct ceph_mon_client *monc,
+ struct ceph_mon_generic_request *req)
+{
+ WARN_ON(!req->tid);
+
+ dout("%s greq %p tid %llu\n", __func__, req, req->tid);
+ req->request->hdr.tid = cpu_to_le64(req->tid);
+ ceph_con_send(&monc->con, ceph_msg_get(req->request));
+}
+
+static void __finish_generic_request(struct ceph_mon_generic_request *req)
+{
+ struct ceph_mon_client *monc = req->monc;
+
+ dout("%s greq %p tid %llu\n", __func__, req, req->tid);
+ erase_generic_request(&monc->generic_request_tree, req);
+
+ ceph_msg_revoke(req->request);
+ ceph_msg_revoke_incoming(req->reply);
+}
+
+static void finish_generic_request(struct ceph_mon_generic_request *req)
+{
+ __finish_generic_request(req);
+ put_generic_request(req);
+}
+
+static void complete_generic_request(struct ceph_mon_generic_request *req)
+{
+ if (req->complete_cb)
+ req->complete_cb(req);
+ else
+ complete_all(&req->completion);
+ put_generic_request(req);
+}
+
+void cancel_generic_request(struct ceph_mon_generic_request *req)
+{
+ struct ceph_mon_client *monc = req->monc;
+ struct ceph_mon_generic_request *lookup_req;
+
+ dout("%s greq %p tid %llu\n", __func__, req, req->tid);
+
+ mutex_lock(&monc->mutex);
+ lookup_req = lookup_generic_request(&monc->generic_request_tree,
+ req->tid);
+ if (lookup_req) {
+ WARN_ON(lookup_req != req);
+ finish_generic_request(req);
+ }
+
+ mutex_unlock(&monc->mutex);
+}
+
+static int wait_generic_request(struct ceph_mon_generic_request *req)
+{
+ int ret;
+
+ dout("%s greq %p tid %llu\n", __func__, req, req->tid);
+ ret = wait_for_completion_interruptible(&req->completion);
+ if (ret)
+ cancel_generic_request(req);
+ else
+ ret = req->result; /* completed */
+
+ return ret;
+}
+
static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
struct ceph_msg_header *hdr,
int *skip)
@@ -551,7 +615,7 @@ static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
struct ceph_msg *m;
mutex_lock(&monc->mutex);
- req = __lookup_generic_req(monc, tid);
+ req = lookup_generic_request(&monc->generic_request_tree, tid);
if (!req) {
dout("get_generic_reply %lld dne\n", tid);
*skip = 1;
@@ -570,42 +634,6 @@ static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
return m;
}
-static int __do_generic_request(struct ceph_mon_client *monc, u64 tid,
- struct ceph_mon_generic_request *req)
-{
- int err;
-
- /* register request */
- req->tid = tid != 0 ? tid : ++monc->last_tid;
- req->request->hdr.tid = cpu_to_le64(req->tid);
- __insert_generic_request(monc, req);
- monc->num_generic_requests++;
- ceph_con_send(&monc->con, ceph_msg_get(req->request));
- mutex_unlock(&monc->mutex);
-
- err = wait_for_completion_interruptible(&req->completion);
-
- mutex_lock(&monc->mutex);
- rb_erase(&req->node, &monc->generic_request_tree);
- monc->num_generic_requests--;
-
- if (!err)
- err = req->result;
- return err;
-}
-
-static int do_generic_request(struct ceph_mon_client *monc,
- struct ceph_mon_generic_request *req)
-{
- int err;
-
- mutex_lock(&monc->mutex);
- err = __do_generic_request(monc, 0, req);
- mutex_unlock(&monc->mutex);
-
- return err;
-}
-
/*
* statfs
*/
@@ -616,22 +644,24 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
u64 tid = le64_to_cpu(msg->hdr.tid);
+ dout("%s msg %p tid %llu\n", __func__, msg, tid);
+
if (msg->front.iov_len != sizeof(*reply))
goto bad;
- dout("handle_statfs_reply %p tid %llu\n", msg, tid);
mutex_lock(&monc->mutex);
- req = __lookup_generic_req(monc, tid);
- if (req) {
- *(struct ceph_statfs *)req->buf = reply->st;
- req->result = 0;
- get_generic_request(req);
+ req = lookup_generic_request(&monc->generic_request_tree, tid);
+ if (!req) {
+ mutex_unlock(&monc->mutex);
+ return;
}
+
+ req->result = 0;
+ *req->u.st = reply->st; /* struct */
+ __finish_generic_request(req);
mutex_unlock(&monc->mutex);
- if (req) {
- complete_all(&req->completion);
- put_generic_request(req);
- }
+
+ complete_generic_request(req);
return;
bad:
@@ -646,38 +676,38 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
{
struct ceph_mon_generic_request *req;
struct ceph_mon_statfs *h;
- int err;
+ int ret = -ENOMEM;
- req = kzalloc(sizeof(*req), GFP_NOFS);
+ req = alloc_generic_request(monc, GFP_NOFS);
if (!req)
- return -ENOMEM;
-
- kref_init(&req->kref);
- req->buf = buf;
- init_completion(&req->completion);
+ goto out;
- err = -ENOMEM;
req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
true);
if (!req->request)
goto out;
- req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS,
- true);
+
+ req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 64, GFP_NOFS, true);
if (!req->reply)
goto out;
+ req->u.st = buf;
+
+ mutex_lock(&monc->mutex);
+ register_generic_request(req);
/* fill out request */
h = req->request->front.iov_base;
h->monhdr.have_version = 0;
h->monhdr.session_mon = cpu_to_le16(-1);
h->monhdr.session_mon_tid = 0;
h->fsid = monc->monmap->fsid;
+ send_generic_request(monc, req);
+ mutex_unlock(&monc->mutex);
- err = do_generic_request(monc, req);
-
+ ret = wait_generic_request(req);
out:
put_generic_request(req);
- return err;
+ return ret;
}
EXPORT_SYMBOL(ceph_monc_do_statfs);
@@ -690,7 +720,7 @@ static void handle_get_version_reply(struct ceph_mon_client *monc,
void *end = p + msg->front_alloc_len;
u64 handle;
- dout("%s %p tid %llu\n", __func__, msg, tid);
+ dout("%s msg %p tid %llu\n", __func__, msg, tid);
ceph_decode_need(&p, end, 2*sizeof(u64), bad);
handle = ceph_decode_64(&p);
@@ -698,77 +728,111 @@ static void handle_get_version_reply(struct ceph_mon_client *monc,
goto bad;
mutex_lock(&monc->mutex);
- req = __lookup_generic_req(monc, handle);
- if (req) {
- *(u64 *)req->buf = ceph_decode_64(&p);
- req->result = 0;
- get_generic_request(req);
+ req = lookup_generic_request(&monc->generic_request_tree, handle);
+ if (!req) {
+ mutex_unlock(&monc->mutex);
+ return;
}
+
+ req->result = 0;
+ req->u.newest = ceph_decode_64(&p);
+ __finish_generic_request(req);
mutex_unlock(&monc->mutex);
- if (req) {
- complete_all(&req->completion);
- put_generic_request(req);
- }
+ complete_generic_request(req);
return;
+
bad:
pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
ceph_msg_dump(msg);
}
-/*
- * Send MMonGetVersion and wait for the reply.
- *
- * @what: one of "mdsmap", "osdmap" or "monmap"
- */
-int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
- u64 *newest)
+static struct ceph_mon_generic_request *
+__ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
+ ceph_monc_callback_t cb, u64 private_data)
{
struct ceph_mon_generic_request *req;
- void *p, *end;
- u64 tid;
- int err;
- req = kzalloc(sizeof(*req), GFP_NOFS);
+ req = alloc_generic_request(monc, GFP_NOIO);
if (!req)
- return -ENOMEM;
-
- kref_init(&req->kref);
- req->buf = newest;
- init_completion(&req->completion);
+ goto err_put_req;
req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
sizeof(u64) + sizeof(u32) + strlen(what),
- GFP_NOFS, true);
- if (!req->request) {
- err = -ENOMEM;
- goto out;
- }
+ GFP_NOIO, true);
+ if (!req->request)
+ goto err_put_req;
- req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 1024,
- GFP_NOFS, true);
- if (!req->reply) {
- err = -ENOMEM;
- goto out;
- }
+ req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 32, GFP_NOIO,
+ true);
+ if (!req->reply)
+ goto err_put_req;
- p = req->request->front.iov_base;
- end = p + req->request->front_alloc_len;
+ req->complete_cb = cb;
+ req->private_data = private_data;
- /* fill out request */
mutex_lock(&monc->mutex);
- tid = ++monc->last_tid;
- ceph_encode_64(&p, tid); /* handle */
- ceph_encode_string(&p, end, what, strlen(what));
+ register_generic_request(req);
+ {
+ void *p = req->request->front.iov_base;
+ void *const end = p + req->request->front_alloc_len;
+
+ ceph_encode_64(&p, req->tid); /* handle */
+ ceph_encode_string(&p, end, what, strlen(what));
+ WARN_ON(p != end);
+ }
+ send_generic_request(monc, req);
+ mutex_unlock(&monc->mutex);
- err = __do_generic_request(monc, tid, req);
+ return req;
- mutex_unlock(&monc->mutex);
-out:
+err_put_req:
put_generic_request(req);
- return err;
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Send MMonGetVersion and wait for the reply.
+ *
+ * @what: one of "mdsmap", "osdmap" or "monmap"
+ */
+int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
+ u64 *newest)
+{
+ struct ceph_mon_generic_request *req;
+ int ret;
+
+ req = __ceph_monc_get_version(monc, what, NULL, 0);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ ret = wait_generic_request(req);
+ if (!ret)
+ *newest = req->u.newest;
+
+ put_generic_request(req);
+ return ret;
}
-EXPORT_SYMBOL(ceph_monc_do_get_version);
+EXPORT_SYMBOL(ceph_monc_get_version);
+
+/*
+ * Send MMonGetVersion,
+ *
+ * @what: one of "mdsmap", "osdmap" or "monmap"
+ */
+int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
+ ceph_monc_callback_t cb, u64 private_data)
+{
+ struct ceph_mon_generic_request *req;
+
+ req = __ceph_monc_get_version(monc, what, cb, private_data);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ put_generic_request(req);
+ return 0;
+}
+EXPORT_SYMBOL(ceph_monc_get_version_async);
/*
* Resend pending generic requests.
@@ -890,7 +954,7 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
if (!monc->m_subscribe_ack)
goto out_auth;
- monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS,
+ monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128, GFP_NOFS,
true);
if (!monc->m_subscribe)
goto out_subscribe_ack;
@@ -914,9 +978,10 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
monc->generic_request_tree = RB_ROOT;
- monc->num_generic_requests = 0;
monc->last_tid = 0;
+ monc->fs_cluster_id = CEPH_FS_CLUSTER_ID_NONE;
+
return 0;
out_auth_reply:
@@ -954,6 +1019,8 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
ceph_auth_destroy(monc->auth);
+ WARN_ON(!RB_EMPTY_ROOT(&monc->generic_request_tree));
+
ceph_msg_put(monc->m_auth);
ceph_msg_put(monc->m_auth_reply);
ceph_msg_put(monc->m_subscribe);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 40a53a70efdf..0160d7d09a1e 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -19,25 +19,12 @@
#include <linux/ceph/auth.h>
#include <linux/ceph/pagelist.h>
-#define OSD_OP_FRONT_LEN 4096
#define OSD_OPREPLY_FRONT_LEN 512
static struct kmem_cache *ceph_osd_request_cache;
static const struct ceph_connection_operations osd_con_ops;
-static void __send_queued(struct ceph_osd_client *osdc);
-static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd);
-static void __register_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req);
-static void __unregister_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req);
-static void __unregister_linger_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req);
-static void __enqueue_request(struct ceph_osd_request *req);
-static void __send_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req);
-
/*
* Implement client access to distributed object storage cluster.
*
@@ -56,6 +43,52 @@ static void __send_request(struct ceph_osd_client *osdc,
* channel with an OSD is reset.
*/
+static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
+static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
+static void link_linger(struct ceph_osd *osd,
+ struct ceph_osd_linger_request *lreq);
+static void unlink_linger(struct ceph_osd *osd,
+ struct ceph_osd_linger_request *lreq);
+
+#if 1
+static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
+{
+ bool wrlocked = true;
+
+ if (unlikely(down_read_trylock(sem))) {
+ wrlocked = false;
+ up_read(sem);
+ }
+
+ return wrlocked;
+}
+static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
+{
+ WARN_ON(!rwsem_is_locked(&osdc->lock));
+}
+static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
+{
+ WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
+}
+static inline void verify_osd_locked(struct ceph_osd *osd)
+{
+ struct ceph_osd_client *osdc = osd->o_osdc;
+
+ WARN_ON(!(mutex_is_locked(&osd->lock) &&
+ rwsem_is_locked(&osdc->lock)) &&
+ !rwsem_is_wrlocked(&osdc->lock));
+}
+static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
+{
+ WARN_ON(!mutex_is_locked(&lreq->lock));
+}
+#else
+static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
+static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
+static inline void verify_osd_locked(struct ceph_osd *osd) { }
+static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
+#endif
+
/*
* calculate the mapping of a file extent onto an object, and fill out the
* request accordingly. shorten extent as necessary if it crosses an
@@ -144,14 +177,6 @@ osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data);
-struct ceph_osd_data *
-osd_req_op_cls_response_data(struct ceph_osd_request *osd_req,
- unsigned int which)
-{
- return osd_req_op_data(osd_req, which, cls, response_data);
-}
-EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */
-
void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
unsigned int which, struct page **pages,
u64 length, u32 alignment,
@@ -218,6 +243,8 @@ void osd_req_op_cls_request_data_pagelist(
osd_data = osd_req_op_data(osd_req, which, cls, request_data);
ceph_osd_data_pagelist_init(osd_data, pagelist);
+ osd_req->r_ops[which].cls.indata_len += pagelist->length;
+ osd_req->r_ops[which].indata_len += pagelist->length;
}
EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
@@ -230,6 +257,8 @@ void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
osd_data = osd_req_op_data(osd_req, which, cls, request_data);
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
pages_from_pool, own_pages);
+ osd_req->r_ops[which].cls.indata_len += length;
+ osd_req->r_ops[which].indata_len += length;
}
EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
@@ -302,14 +331,76 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
case CEPH_OSD_OP_STAT:
ceph_osd_data_release(&op->raw_data_in);
break;
+ case CEPH_OSD_OP_NOTIFY_ACK:
+ ceph_osd_data_release(&op->notify_ack.request_data);
+ break;
+ case CEPH_OSD_OP_NOTIFY:
+ ceph_osd_data_release(&op->notify.request_data);
+ ceph_osd_data_release(&op->notify.response_data);
+ break;
default:
break;
}
}
/*
+ * Assumes @t is zero-initialized.
+ */
+static void target_init(struct ceph_osd_request_target *t)
+{
+ ceph_oid_init(&t->base_oid);
+ ceph_oloc_init(&t->base_oloc);
+ ceph_oid_init(&t->target_oid);
+ ceph_oloc_init(&t->target_oloc);
+
+ ceph_osds_init(&t->acting);
+ ceph_osds_init(&t->up);
+ t->size = -1;
+ t->min_size = -1;
+
+ t->osd = CEPH_HOMELESS_OSD;
+}
+
+static void target_copy(struct ceph_osd_request_target *dest,
+ const struct ceph_osd_request_target *src)
+{
+ ceph_oid_copy(&dest->base_oid, &src->base_oid);
+ ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
+ ceph_oid_copy(&dest->target_oid, &src->target_oid);
+ ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
+
+ dest->pgid = src->pgid; /* struct */
+ dest->pg_num = src->pg_num;
+ dest->pg_num_mask = src->pg_num_mask;
+ ceph_osds_copy(&dest->acting, &src->acting);
+ ceph_osds_copy(&dest->up, &src->up);
+ dest->size = src->size;
+ dest->min_size = src->min_size;
+ dest->sort_bitwise = src->sort_bitwise;
+
+ dest->flags = src->flags;
+ dest->paused = src->paused;
+
+ dest->osd = src->osd;
+}
+
+static void target_destroy(struct ceph_osd_request_target *t)
+{
+ ceph_oid_destroy(&t->base_oid);
+ ceph_oid_destroy(&t->target_oid);
+}
+
+/*
* requests
*/
+static void request_release_checks(struct ceph_osd_request *req)
+{
+ WARN_ON(!RB_EMPTY_NODE(&req->r_node));
+ WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
+ WARN_ON(!list_empty(&req->r_unsafe_item));
+ WARN_ON(req->r_osd);
+}
+
static void ceph_osdc_release_request(struct kref *kref)
{
struct ceph_osd_request *req = container_of(kref,
@@ -318,24 +409,19 @@ static void ceph_osdc_release_request(struct kref *kref)
dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
req->r_request, req->r_reply);
- WARN_ON(!RB_EMPTY_NODE(&req->r_node));
- WARN_ON(!list_empty(&req->r_req_lru_item));
- WARN_ON(!list_empty(&req->r_osd_item));
- WARN_ON(!list_empty(&req->r_linger_item));
- WARN_ON(!list_empty(&req->r_linger_osd_item));
- WARN_ON(req->r_osd);
+ request_release_checks(req);
if (req->r_request)
ceph_msg_put(req->r_request);
- if (req->r_reply) {
- ceph_msg_revoke_incoming(req->r_reply);
+ if (req->r_reply)
ceph_msg_put(req->r_reply);
- }
for (which = 0; which < req->r_num_ops; which++)
osd_req_op_data_release(req, which);
+ target_destroy(&req->r_t);
ceph_put_snap_context(req->r_snapc);
+
if (req->r_mempool)
mempool_free(req, req->r_osdc->req_mempool);
else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
@@ -354,12 +440,66 @@ EXPORT_SYMBOL(ceph_osdc_get_request);
void ceph_osdc_put_request(struct ceph_osd_request *req)
{
- dout("%s %p (was %d)\n", __func__, req,
- atomic_read(&req->r_kref.refcount));
- kref_put(&req->r_kref, ceph_osdc_release_request);
+ if (req) {
+ dout("%s %p (was %d)\n", __func__, req,
+ atomic_read(&req->r_kref.refcount));
+ kref_put(&req->r_kref, ceph_osdc_release_request);
+ }
}
EXPORT_SYMBOL(ceph_osdc_put_request);
+static void request_init(struct ceph_osd_request *req)
+{
+ /* req only, each op is zeroed in _osd_req_op_init() */
+ memset(req, 0, sizeof(*req));
+
+ kref_init(&req->r_kref);
+ init_completion(&req->r_completion);
+ init_completion(&req->r_safe_completion);
+ RB_CLEAR_NODE(&req->r_node);
+ RB_CLEAR_NODE(&req->r_mc_node);
+ INIT_LIST_HEAD(&req->r_unsafe_item);
+
+ target_init(&req->r_t);
+}
+
+/*
+ * This is ugly, but it allows us to reuse linger registration and ping
+ * requests, keeping the structure of the code around send_linger{_ping}()
+ * reasonable. Setting up a min_nr=2 mempool for each linger request
+ * and dealing with copying ops (this blasts req only, watch op remains
+ * intact) isn't any better.
+ */
+static void request_reinit(struct ceph_osd_request *req)
+{
+ struct ceph_osd_client *osdc = req->r_osdc;
+ bool mempool = req->r_mempool;
+ unsigned int num_ops = req->r_num_ops;
+ u64 snapid = req->r_snapid;
+ struct ceph_snap_context *snapc = req->r_snapc;
+ bool linger = req->r_linger;
+ struct ceph_msg *request_msg = req->r_request;
+ struct ceph_msg *reply_msg = req->r_reply;
+
+ dout("%s req %p\n", __func__, req);
+ WARN_ON(atomic_read(&req->r_kref.refcount) != 1);
+ request_release_checks(req);
+
+ WARN_ON(atomic_read(&request_msg->kref.refcount) != 1);
+ WARN_ON(atomic_read(&reply_msg->kref.refcount) != 1);
+ target_destroy(&req->r_t);
+
+ request_init(req);
+ req->r_osdc = osdc;
+ req->r_mempool = mempool;
+ req->r_num_ops = num_ops;
+ req->r_snapid = snapid;
+ req->r_snapc = snapc;
+ req->r_linger = linger;
+ req->r_request = request_msg;
+ req->r_reply = reply_msg;
+}
+
struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
struct ceph_snap_context *snapc,
unsigned int num_ops,
@@ -367,8 +507,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
gfp_t gfp_flags)
{
struct ceph_osd_request *req;
- struct ceph_msg *msg;
- size_t msg_size;
if (use_mempool) {
BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
@@ -383,73 +521,65 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
if (unlikely(!req))
return NULL;
- /* req only, each op is zeroed in _osd_req_op_init() */
- memset(req, 0, sizeof(*req));
-
+ request_init(req);
req->r_osdc = osdc;
req->r_mempool = use_mempool;
req->r_num_ops = num_ops;
+ req->r_snapid = CEPH_NOSNAP;
+ req->r_snapc = ceph_get_snap_context(snapc);
- kref_init(&req->r_kref);
- init_completion(&req->r_completion);
- init_completion(&req->r_safe_completion);
- RB_CLEAR_NODE(&req->r_node);
- INIT_LIST_HEAD(&req->r_unsafe_item);
- INIT_LIST_HEAD(&req->r_linger_item);
- INIT_LIST_HEAD(&req->r_linger_osd_item);
- INIT_LIST_HEAD(&req->r_req_lru_item);
- INIT_LIST_HEAD(&req->r_osd_item);
-
- req->r_base_oloc.pool = -1;
- req->r_target_oloc.pool = -1;
+ dout("%s req %p\n", __func__, req);
+ return req;
+}
+EXPORT_SYMBOL(ceph_osdc_alloc_request);
- msg_size = OSD_OPREPLY_FRONT_LEN;
- if (num_ops > CEPH_OSD_SLAB_OPS) {
- /* ceph_osd_op and rval */
- msg_size += (num_ops - CEPH_OSD_SLAB_OPS) *
- (sizeof(struct ceph_osd_op) + 4);
- }
+int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
+{
+ struct ceph_osd_client *osdc = req->r_osdc;
+ struct ceph_msg *msg;
+ int msg_size;
- /* create reply message */
- if (use_mempool)
- msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
- else
- msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size,
- gfp_flags, true);
- if (!msg) {
- ceph_osdc_put_request(req);
- return NULL;
- }
- req->r_reply = msg;
+ WARN_ON(ceph_oid_empty(&req->r_base_oid));
+ /* create request message */
msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
msg_size += 1 + 8 + 4 + 4; /* pgid */
- msg_size += 4 + CEPH_MAX_OID_NAME_LEN; /* oid */
- msg_size += 2 + num_ops * sizeof(struct ceph_osd_op);
+ msg_size += 4 + req->r_base_oid.name_len; /* oid */
+ msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
msg_size += 8; /* snapid */
msg_size += 8; /* snap_seq */
- msg_size += 4 + 8 * (snapc ? snapc->num_snaps : 0); /* snaps */
+ msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
msg_size += 4; /* retry_attempt */
- /* create request message; allow space for oid */
- if (use_mempool)
+ if (req->r_mempool)
msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
else
- msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags, true);
- if (!msg) {
- ceph_osdc_put_request(req);
- return NULL;
- }
+ msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
+ if (!msg)
+ return -ENOMEM;
memset(msg->front.iov_base, 0, msg->front.iov_len);
-
req->r_request = msg;
- return req;
+ /* create reply message */
+ msg_size = OSD_OPREPLY_FRONT_LEN;
+ msg_size += req->r_base_oid.name_len;
+ msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
+
+ if (req->r_mempool)
+ msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
+ else
+ msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
+ if (!msg)
+ return -ENOMEM;
+
+ req->r_reply = msg;
+
+ return 0;
}
-EXPORT_SYMBOL(ceph_osdc_alloc_request);
+EXPORT_SYMBOL(ceph_osdc_alloc_messages);
static bool osd_req_opcode_valid(u16 opcode)
{
@@ -587,8 +717,6 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
- op->cls.argc = 0; /* currently unused */
-
op->indata_len = payload_len;
}
EXPORT_SYMBOL(osd_req_op_cls_init);
@@ -627,21 +755,19 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
}
EXPORT_SYMBOL(osd_req_op_xattr_init);
-void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
- unsigned int which, u16 opcode,
- u64 cookie, u64 version, int flag)
+/*
+ * @watch_opcode: CEPH_OSD_WATCH_OP_*
+ */
+static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
+ u64 cookie, u8 watch_opcode)
{
- struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
- opcode, 0);
-
- BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH);
+ struct ceph_osd_req_op *op;
+ op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
op->watch.cookie = cookie;
- op->watch.ver = version;
- if (opcode == CEPH_OSD_OP_WATCH && flag)
- op->watch.flag = (u8)1;
+ op->watch.op = watch_opcode;
+ op->watch.gen = 0;
}
-EXPORT_SYMBOL(osd_req_op_watch_init);
void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
unsigned int which,
@@ -686,16 +812,9 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
}
}
-static u64 osd_req_encode_op(struct ceph_osd_request *req,
- struct ceph_osd_op *dst, unsigned int which)
+static u32 osd_req_encode_op(struct ceph_osd_op *dst,
+ const struct ceph_osd_req_op *src)
{
- struct ceph_osd_req_op *src;
- struct ceph_osd_data *osd_data;
- u64 request_data_len = 0;
- u64 data_length;
-
- BUG_ON(which >= req->r_num_ops);
- src = &req->r_ops[which];
if (WARN_ON(!osd_req_opcode_valid(src->op))) {
pr_err("unrecognized osd opcode %d\n", src->op);
@@ -704,57 +823,36 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
switch (src->op) {
case CEPH_OSD_OP_STAT:
- osd_data = &src->raw_data_in;
- ceph_osdc_msg_data_add(req->r_reply, osd_data);
break;
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
case CEPH_OSD_OP_ZERO:
case CEPH_OSD_OP_TRUNCATE:
- if (src->op == CEPH_OSD_OP_WRITE ||
- src->op == CEPH_OSD_OP_WRITEFULL)
- request_data_len = src->extent.length;
dst->extent.offset = cpu_to_le64(src->extent.offset);
dst->extent.length = cpu_to_le64(src->extent.length);
dst->extent.truncate_size =
cpu_to_le64(src->extent.truncate_size);
dst->extent.truncate_seq =
cpu_to_le32(src->extent.truncate_seq);
- osd_data = &src->extent.osd_data;
- if (src->op == CEPH_OSD_OP_WRITE ||
- src->op == CEPH_OSD_OP_WRITEFULL)
- ceph_osdc_msg_data_add(req->r_request, osd_data);
- else
- ceph_osdc_msg_data_add(req->r_reply, osd_data);
break;
case CEPH_OSD_OP_CALL:
dst->cls.class_len = src->cls.class_len;
dst->cls.method_len = src->cls.method_len;
- osd_data = &src->cls.request_info;
- ceph_osdc_msg_data_add(req->r_request, osd_data);
- BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST);
- request_data_len = osd_data->pagelist->length;
-
- osd_data = &src->cls.request_data;
- data_length = ceph_osd_data_length(osd_data);
- if (data_length) {
- BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE);
- dst->cls.indata_len = cpu_to_le32(data_length);
- ceph_osdc_msg_data_add(req->r_request, osd_data);
- src->indata_len += data_length;
- request_data_len += data_length;
- }
- osd_data = &src->cls.response_data;
- ceph_osdc_msg_data_add(req->r_reply, osd_data);
+ dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
break;
case CEPH_OSD_OP_STARTSYNC:
break;
- case CEPH_OSD_OP_NOTIFY_ACK:
case CEPH_OSD_OP_WATCH:
dst->watch.cookie = cpu_to_le64(src->watch.cookie);
- dst->watch.ver = cpu_to_le64(src->watch.ver);
- dst->watch.flag = src->watch.flag;
+ dst->watch.ver = cpu_to_le64(0);
+ dst->watch.op = src->watch.op;
+ dst->watch.gen = cpu_to_le32(src->watch.gen);
+ break;
+ case CEPH_OSD_OP_NOTIFY_ACK:
+ break;
+ case CEPH_OSD_OP_NOTIFY:
+ dst->notify.cookie = cpu_to_le64(src->notify.cookie);
break;
case CEPH_OSD_OP_SETALLOCHINT:
dst->alloc_hint.expected_object_size =
@@ -768,9 +866,6 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
dst->xattr.cmp_op = src->xattr.cmp_op;
dst->xattr.cmp_mode = src->xattr.cmp_mode;
- osd_data = &src->xattr.osd_data;
- ceph_osdc_msg_data_add(req->r_request, osd_data);
- request_data_len = osd_data->pagelist->length;
break;
case CEPH_OSD_OP_CREATE:
case CEPH_OSD_OP_DELETE:
@@ -787,7 +882,7 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req,
dst->flags = cpu_to_le32(src->flags);
dst->payload_len = cpu_to_le32(src->indata_len);
- return request_data_len;
+ return src->indata_len;
}
/*
@@ -824,17 +919,15 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
GFP_NOFS);
- if (!req)
- return ERR_PTR(-ENOMEM);
-
- req->r_flags = flags;
+ if (!req) {
+ r = -ENOMEM;
+ goto fail;
+ }
/* calculate max write size */
r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
- if (r < 0) {
- ceph_osdc_put_request(req);
- return ERR_PTR(r);
- }
+ if (r)
+ goto fail;
if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
osd_req_op_init(req, which, opcode, 0);
@@ -854,194 +947,71 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
truncate_size, truncate_seq);
}
+ req->r_flags = flags;
req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
+ ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
- snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name),
- "%llx.%08llx", vino.ino, objnum);
- req->r_base_oid.name_len = strlen(req->r_base_oid.name);
+ req->r_snapid = vino.snap;
+ if (flags & CEPH_OSD_FLAG_WRITE)
+ req->r_data_offset = off;
+
+ r = ceph_osdc_alloc_messages(req, GFP_NOFS);
+ if (r)
+ goto fail;
return req;
+
+fail:
+ ceph_osdc_put_request(req);
+ return ERR_PTR(r);
}
EXPORT_SYMBOL(ceph_osdc_new_request);
/*
* We keep osd requests in an rbtree, sorted by ->r_tid.
*/
-static void __insert_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *new)
-{
- struct rb_node **p = &osdc->requests.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_osd_request *req = NULL;
-
- while (*p) {
- parent = *p;
- req = rb_entry(parent, struct ceph_osd_request, r_node);
- if (new->r_tid < req->r_tid)
- p = &(*p)->rb_left;
- else if (new->r_tid > req->r_tid)
- p = &(*p)->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&new->r_node, parent, p);
- rb_insert_color(&new->r_node, &osdc->requests);
-}
-
-static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
- u64 tid)
-{
- struct ceph_osd_request *req;
- struct rb_node *n = osdc->requests.rb_node;
-
- while (n) {
- req = rb_entry(n, struct ceph_osd_request, r_node);
- if (tid < req->r_tid)
- n = n->rb_left;
- else if (tid > req->r_tid)
- n = n->rb_right;
- else
- return req;
- }
- return NULL;
-}
+DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
+DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
-static struct ceph_osd_request *
-__lookup_request_ge(struct ceph_osd_client *osdc,
- u64 tid)
+static bool osd_homeless(struct ceph_osd *osd)
{
- struct ceph_osd_request *req;
- struct rb_node *n = osdc->requests.rb_node;
-
- while (n) {
- req = rb_entry(n, struct ceph_osd_request, r_node);
- if (tid < req->r_tid) {
- if (!n->rb_left)
- return req;
- n = n->rb_left;
- } else if (tid > req->r_tid) {
- n = n->rb_right;
- } else {
- return req;
- }
- }
- return NULL;
+ return osd->o_osd == CEPH_HOMELESS_OSD;
}
-static void __kick_linger_request(struct ceph_osd_request *req)
+static bool osd_registered(struct ceph_osd *osd)
{
- struct ceph_osd_client *osdc = req->r_osdc;
- struct ceph_osd *osd = req->r_osd;
-
- /*
- * Linger requests need to be resent with a new tid to avoid
- * the dup op detection logic on the OSDs. Achieve this with
- * a re-register dance instead of open-coding.
- */
- ceph_osdc_get_request(req);
- if (!list_empty(&req->r_linger_item))
- __unregister_linger_request(osdc, req);
- else
- __unregister_request(osdc, req);
- __register_request(osdc, req);
- ceph_osdc_put_request(req);
-
- /*
- * Unless request has been registered as both normal and
- * lingering, __unregister{,_linger}_request clears r_osd.
- * However, here we need to preserve r_osd to make sure we
- * requeue on the same OSD.
- */
- WARN_ON(req->r_osd || !osd);
- req->r_osd = osd;
+ verify_osdc_locked(osd->o_osdc);
- dout("%s requeueing %p tid %llu\n", __func__, req, req->r_tid);
- __enqueue_request(req);
+ return !RB_EMPTY_NODE(&osd->o_node);
}
/*
- * Resubmit requests pending on the given osd.
+ * Assumes @osd is zero-initialized.
*/
-static void __kick_osd_requests(struct ceph_osd_client *osdc,
- struct ceph_osd *osd)
+static void osd_init(struct ceph_osd *osd)
{
- struct ceph_osd_request *req, *nreq;
- LIST_HEAD(resend);
- LIST_HEAD(resend_linger);
- int err;
-
- dout("%s osd%d\n", __func__, osd->o_osd);
- err = __reset_osd(osdc, osd);
- if (err)
- return;
-
- /*
- * Build up a list of requests to resend by traversing the
- * osd's list of requests. Requests for a given object are
- * sent in tid order, and that is also the order they're
- * kept on this list. Therefore all requests that are in
- * flight will be found first, followed by all requests that
- * have not yet been sent. And to resend requests while
- * preserving this order we will want to put any sent
- * requests back on the front of the osd client's unsent
- * list.
- *
- * So we build a separate ordered list of already-sent
- * requests for the affected osd and splice it onto the
- * front of the osd client's unsent list. Once we've seen a
- * request that has not yet been sent we're done. Those
- * requests are already sitting right where they belong.
- */
- list_for_each_entry(req, &osd->o_requests, r_osd_item) {
- if (!req->r_sent)
- break;
-
- if (!req->r_linger) {
- dout("%s requeueing %p tid %llu\n", __func__, req,
- req->r_tid);
- list_move_tail(&req->r_req_lru_item, &resend);
- req->r_flags |= CEPH_OSD_FLAG_RETRY;
- } else {
- list_move_tail(&req->r_req_lru_item, &resend_linger);
- }
- }
- list_splice(&resend, &osdc->req_unsent);
-
- /*
- * Both registered and not yet registered linger requests are
- * enqueued with a new tid on the same OSD. We add/move them
- * to req_unsent/o_requests at the end to keep things in tid
- * order.
- */
- list_for_each_entry_safe(req, nreq, &osd->o_linger_requests,
- r_linger_osd_item) {
- WARN_ON(!list_empty(&req->r_req_lru_item));
- __kick_linger_request(req);
- }
-
- list_for_each_entry_safe(req, nreq, &resend_linger, r_req_lru_item)
- __kick_linger_request(req);
+ atomic_set(&osd->o_ref, 1);
+ RB_CLEAR_NODE(&osd->o_node);
+ osd->o_requests = RB_ROOT;
+ osd->o_linger_requests = RB_ROOT;
+ INIT_LIST_HEAD(&osd->o_osd_lru);
+ INIT_LIST_HEAD(&osd->o_keepalive_item);
+ osd->o_incarnation = 1;
+ mutex_init(&osd->lock);
}
-/*
- * If the osd connection drops, we need to resubmit all requests.
- */
-static void osd_reset(struct ceph_connection *con)
+static void osd_cleanup(struct ceph_osd *osd)
{
- struct ceph_osd *osd = con->private;
- struct ceph_osd_client *osdc;
-
- if (!osd)
- return;
- dout("osd_reset osd%d\n", osd->o_osd);
- osdc = osd->o_osdc;
- down_read(&osdc->map_sem);
- mutex_lock(&osdc->request_mutex);
- __kick_osd_requests(osdc, osd);
- __send_queued(osdc);
- mutex_unlock(&osdc->request_mutex);
- up_read(&osdc->map_sem);
+ WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
+ WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
+ WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
+ WARN_ON(!list_empty(&osd->o_osd_lru));
+ WARN_ON(!list_empty(&osd->o_keepalive_item));
+
+ if (osd->o_auth.authorizer) {
+ WARN_ON(osd_homeless(osd));
+ ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
+ }
}
/*
@@ -1051,22 +1021,15 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
{
struct ceph_osd *osd;
- osd = kzalloc(sizeof(*osd), GFP_NOFS);
- if (!osd)
- return NULL;
+ WARN_ON(onum == CEPH_HOMELESS_OSD);
- atomic_set(&osd->o_ref, 1);
+ osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
+ osd_init(osd);
osd->o_osdc = osdc;
osd->o_osd = onum;
- RB_CLEAR_NODE(&osd->o_node);
- INIT_LIST_HEAD(&osd->o_requests);
- INIT_LIST_HEAD(&osd->o_linger_requests);
- INIT_LIST_HEAD(&osd->o_osd_lru);
- osd->o_incarnation = 1;
ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
- INIT_LIST_HEAD(&osd->o_keepalive_item);
return osd;
}
@@ -1087,114 +1050,115 @@ static void put_osd(struct ceph_osd *osd)
dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
atomic_read(&osd->o_ref) - 1);
if (atomic_dec_and_test(&osd->o_ref)) {
- if (osd->o_auth.authorizer)
- ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
+ osd_cleanup(osd);
kfree(osd);
}
}
-/*
- * remove an osd from our map
- */
-static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
-{
- dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
- WARN_ON(!list_empty(&osd->o_requests));
- WARN_ON(!list_empty(&osd->o_linger_requests));
+DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
- list_del_init(&osd->o_osd_lru);
- rb_erase(&osd->o_node, &osdc->osds);
- RB_CLEAR_NODE(&osd->o_node);
-}
-
-static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+static void __move_osd_to_lru(struct ceph_osd *osd)
{
- dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
-
- if (!RB_EMPTY_NODE(&osd->o_node)) {
- ceph_con_close(&osd->o_con);
- __remove_osd(osdc, osd);
- put_osd(osd);
- }
-}
-
-static void remove_all_osds(struct ceph_osd_client *osdc)
-{
- dout("%s %p\n", __func__, osdc);
- mutex_lock(&osdc->request_mutex);
- while (!RB_EMPTY_ROOT(&osdc->osds)) {
- struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
- struct ceph_osd, o_node);
- remove_osd(osdc, osd);
- }
- mutex_unlock(&osdc->request_mutex);
-}
+ struct ceph_osd_client *osdc = osd->o_osdc;
-static void __move_osd_to_lru(struct ceph_osd_client *osdc,
- struct ceph_osd *osd)
-{
- dout("%s %p\n", __func__, osd);
+ dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
BUG_ON(!list_empty(&osd->o_osd_lru));
+ spin_lock(&osdc->osd_lru_lock);
list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
+ spin_unlock(&osdc->osd_lru_lock);
+
osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
}
-static void maybe_move_osd_to_lru(struct ceph_osd_client *osdc,
- struct ceph_osd *osd)
+static void maybe_move_osd_to_lru(struct ceph_osd *osd)
{
- dout("%s %p\n", __func__, osd);
-
- if (list_empty(&osd->o_requests) &&
- list_empty(&osd->o_linger_requests))
- __move_osd_to_lru(osdc, osd);
+ if (RB_EMPTY_ROOT(&osd->o_requests) &&
+ RB_EMPTY_ROOT(&osd->o_linger_requests))
+ __move_osd_to_lru(osd);
}
static void __remove_osd_from_lru(struct ceph_osd *osd)
{
- dout("__remove_osd_from_lru %p\n", osd);
+ struct ceph_osd_client *osdc = osd->o_osdc;
+
+ dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
+
+ spin_lock(&osdc->osd_lru_lock);
if (!list_empty(&osd->o_osd_lru))
list_del_init(&osd->o_osd_lru);
+ spin_unlock(&osdc->osd_lru_lock);
}
-static void remove_old_osds(struct ceph_osd_client *osdc)
+/*
+ * Close the connection and assign any leftover requests to the
+ * homeless session.
+ */
+static void close_osd(struct ceph_osd *osd)
{
- struct ceph_osd *osd, *nosd;
+ struct ceph_osd_client *osdc = osd->o_osdc;
+ struct rb_node *n;
- dout("__remove_old_osds %p\n", osdc);
- mutex_lock(&osdc->request_mutex);
- list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
- if (time_before(jiffies, osd->lru_ttl))
- break;
- remove_osd(osdc, osd);
+ verify_osdc_wrlocked(osdc);
+ dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
+
+ ceph_con_close(&osd->o_con);
+
+ for (n = rb_first(&osd->o_requests); n; ) {
+ struct ceph_osd_request *req =
+ rb_entry(n, struct ceph_osd_request, r_node);
+
+ n = rb_next(n); /* unlink_request() */
+
+ dout(" reassigning req %p tid %llu\n", req, req->r_tid);
+ unlink_request(osd, req);
+ link_request(&osdc->homeless_osd, req);
+ }
+ for (n = rb_first(&osd->o_linger_requests); n; ) {
+ struct ceph_osd_linger_request *lreq =
+ rb_entry(n, struct ceph_osd_linger_request, node);
+
+ n = rb_next(n); /* unlink_linger() */
+
+ dout(" reassigning lreq %p linger_id %llu\n", lreq,
+ lreq->linger_id);
+ unlink_linger(osd, lreq);
+ link_linger(&osdc->homeless_osd, lreq);
}
- mutex_unlock(&osdc->request_mutex);
+
+ __remove_osd_from_lru(osd);
+ erase_osd(&osdc->osds, osd);
+ put_osd(osd);
}
/*
* reset osd connect
*/
-static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+static int reopen_osd(struct ceph_osd *osd)
{
struct ceph_entity_addr *peer_addr;
- dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
- if (list_empty(&osd->o_requests) &&
- list_empty(&osd->o_linger_requests)) {
- remove_osd(osdc, osd);
+ dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
+
+ if (RB_EMPTY_ROOT(&osd->o_requests) &&
+ RB_EMPTY_ROOT(&osd->o_linger_requests)) {
+ close_osd(osd);
return -ENODEV;
}
- peer_addr = &osdc->osdmap->osd_addr[osd->o_osd];
+ peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
!ceph_con_opened(&osd->o_con)) {
- struct ceph_osd_request *req;
+ struct rb_node *n;
dout("osd addr hasn't changed and connection never opened, "
"letting msgr retry\n");
/* touch each r_stamp for handle_timeout()'s benfit */
- list_for_each_entry(req, &osd->o_requests, r_osd_item)
+ for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
+ struct ceph_osd_request *req =
+ rb_entry(n, struct ceph_osd_request, r_node);
req->r_stamp = jiffies;
+ }
return -EAGAIN;
}
@@ -1206,455 +1170,1370 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
return 0;
}
-static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
+static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
+ bool wrlocked)
{
- struct rb_node **p = &osdc->osds.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_osd *osd = NULL;
+ struct ceph_osd *osd;
- dout("__insert_osd %p osd%d\n", new, new->o_osd);
- while (*p) {
- parent = *p;
- osd = rb_entry(parent, struct ceph_osd, o_node);
- if (new->o_osd < osd->o_osd)
- p = &(*p)->rb_left;
- else if (new->o_osd > osd->o_osd)
- p = &(*p)->rb_right;
- else
- BUG();
+ if (wrlocked)
+ verify_osdc_wrlocked(osdc);
+ else
+ verify_osdc_locked(osdc);
+
+ if (o != CEPH_HOMELESS_OSD)
+ osd = lookup_osd(&osdc->osds, o);
+ else
+ osd = &osdc->homeless_osd;
+ if (!osd) {
+ if (!wrlocked)
+ return ERR_PTR(-EAGAIN);
+
+ osd = create_osd(osdc, o);
+ insert_osd(&osdc->osds, osd);
+ ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
+ &osdc->osdmap->osd_addr[osd->o_osd]);
}
- rb_link_node(&new->o_node, parent, p);
- rb_insert_color(&new->o_node, &osdc->osds);
+ dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
+ return osd;
}
-static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
+/*
+ * Create request <-> OSD session relation.
+ *
+ * @req has to be assigned a tid, @osd may be homeless.
+ */
+static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
{
- struct ceph_osd *osd;
- struct rb_node *n = osdc->osds.rb_node;
-
- while (n) {
- osd = rb_entry(n, struct ceph_osd, o_node);
- if (o < osd->o_osd)
- n = n->rb_left;
- else if (o > osd->o_osd)
- n = n->rb_right;
- else
- return osd;
- }
- return NULL;
+ verify_osd_locked(osd);
+ WARN_ON(!req->r_tid || req->r_osd);
+ dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
+ req, req->r_tid);
+
+ if (!osd_homeless(osd))
+ __remove_osd_from_lru(osd);
+ else
+ atomic_inc(&osd->o_osdc->num_homeless);
+
+ get_osd(osd);
+ insert_request(&osd->o_requests, req);
+ req->r_osd = osd;
}
-static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
+static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
{
- schedule_delayed_work(&osdc->timeout_work,
- osdc->client->options->osd_keepalive_timeout);
+ verify_osd_locked(osd);
+ WARN_ON(req->r_osd != osd);
+ dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
+ req, req->r_tid);
+
+ req->r_osd = NULL;
+ erase_request(&osd->o_requests, req);
+ put_osd(osd);
+
+ if (!osd_homeless(osd))
+ maybe_move_osd_to_lru(osd);
+ else
+ atomic_dec(&osd->o_osdc->num_homeless);
}
-static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
+static bool __pool_full(struct ceph_pg_pool_info *pi)
{
- cancel_delayed_work(&osdc->timeout_work);
+ return pi->flags & CEPH_POOL_FLAG_FULL;
}
-/*
- * Register request, assign tid. If this is the first request, set up
- * the timeout event.
- */
-static void __register_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req)
+static bool have_pool_full(struct ceph_osd_client *osdc)
{
- req->r_tid = ++osdc->last_tid;
- req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
- dout("__register_request %p tid %lld\n", req, req->r_tid);
- __insert_request(osdc, req);
- ceph_osdc_get_request(req);
- osdc->num_requests++;
- if (osdc->num_requests == 1) {
- dout(" first request, scheduling timeout\n");
- __schedule_osd_timeout(osdc);
+ struct rb_node *n;
+
+ for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
+ struct ceph_pg_pool_info *pi =
+ rb_entry(n, struct ceph_pg_pool_info, node);
+
+ if (__pool_full(pi))
+ return true;
}
+
+ return false;
+}
+
+static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
+{
+ struct ceph_pg_pool_info *pi;
+
+ pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
+ if (!pi)
+ return false;
+
+ return __pool_full(pi);
}
/*
- * called under osdc->request_mutex
+ * Returns whether a request should be blocked from being sent
+ * based on the current osdmap and osd_client settings.
*/
-static void __unregister_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req)
+static bool target_should_be_paused(struct ceph_osd_client *osdc,
+ const struct ceph_osd_request_target *t,
+ struct ceph_pg_pool_info *pi)
{
- if (RB_EMPTY_NODE(&req->r_node)) {
- dout("__unregister_request %p tid %lld not registered\n",
- req, req->r_tid);
- return;
+ bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
+ bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
+ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+ __pool_full(pi);
+
+ WARN_ON(pi->id != t->base_oloc.pool);
+ return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
+ (t->flags & CEPH_OSD_FLAG_WRITE && pausewr);
+}
+
+enum calc_target_result {
+ CALC_TARGET_NO_ACTION = 0,
+ CALC_TARGET_NEED_RESEND,
+ CALC_TARGET_POOL_DNE,
+};
+
+static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
+ struct ceph_osd_request_target *t,
+ u32 *last_force_resend,
+ bool any_change)
+{
+ struct ceph_pg_pool_info *pi;
+ struct ceph_pg pgid, last_pgid;
+ struct ceph_osds up, acting;
+ bool force_resend = false;
+ bool need_check_tiering = false;
+ bool need_resend = false;
+ bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap,
+ CEPH_OSDMAP_SORTBITWISE);
+ enum calc_target_result ct_res;
+ int ret;
+
+ pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
+ if (!pi) {
+ t->osd = CEPH_HOMELESS_OSD;
+ ct_res = CALC_TARGET_POOL_DNE;
+ goto out;
}
- dout("__unregister_request %p tid %lld\n", req, req->r_tid);
- rb_erase(&req->r_node, &osdc->requests);
- RB_CLEAR_NODE(&req->r_node);
- osdc->num_requests--;
+ if (osdc->osdmap->epoch == pi->last_force_request_resend) {
+ if (last_force_resend &&
+ *last_force_resend < pi->last_force_request_resend) {
+ *last_force_resend = pi->last_force_request_resend;
+ force_resend = true;
+ } else if (!last_force_resend) {
+ force_resend = true;
+ }
+ }
+ if (ceph_oid_empty(&t->target_oid) || force_resend) {
+ ceph_oid_copy(&t->target_oid, &t->base_oid);
+ need_check_tiering = true;
+ }
+ if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
+ ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
+ need_check_tiering = true;
+ }
- if (req->r_osd) {
- /* make sure the original request isn't in flight. */
- ceph_msg_revoke(req->r_request);
+ if (need_check_tiering &&
+ (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
+ if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
+ t->target_oloc.pool = pi->read_tier;
+ if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
+ t->target_oloc.pool = pi->write_tier;
+ }
- list_del_init(&req->r_osd_item);
- maybe_move_osd_to_lru(osdc, req->r_osd);
- if (list_empty(&req->r_linger_osd_item))
- req->r_osd = NULL;
+ ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
+ &t->target_oloc, &pgid);
+ if (ret) {
+ WARN_ON(ret != -ENOENT);
+ t->osd = CEPH_HOMELESS_OSD;
+ ct_res = CALC_TARGET_POOL_DNE;
+ goto out;
+ }
+ last_pgid.pool = pgid.pool;
+ last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
+
+ ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
+ if (any_change &&
+ ceph_is_new_interval(&t->acting,
+ &acting,
+ &t->up,
+ &up,
+ t->size,
+ pi->size,
+ t->min_size,
+ pi->min_size,
+ t->pg_num,
+ pi->pg_num,
+ t->sort_bitwise,
+ sort_bitwise,
+ &last_pgid))
+ force_resend = true;
+
+ if (t->paused && !target_should_be_paused(osdc, t, pi)) {
+ t->paused = false;
+ need_resend = true;
}
- list_del_init(&req->r_req_lru_item);
- ceph_osdc_put_request(req);
+ if (ceph_pg_compare(&t->pgid, &pgid) ||
+ ceph_osds_changed(&t->acting, &acting, any_change) ||
+ force_resend) {
+ t->pgid = pgid; /* struct */
+ ceph_osds_copy(&t->acting, &acting);
+ ceph_osds_copy(&t->up, &up);
+ t->size = pi->size;
+ t->min_size = pi->min_size;
+ t->pg_num = pi->pg_num;
+ t->pg_num_mask = pi->pg_num_mask;
+ t->sort_bitwise = sort_bitwise;
+
+ t->osd = acting.primary;
+ need_resend = true;
+ }
+
+ ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
+out:
+ dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
+ return ct_res;
+}
+
+static void setup_request_data(struct ceph_osd_request *req,
+ struct ceph_msg *msg)
+{
+ u32 data_len = 0;
+ int i;
+
+ if (!list_empty(&msg->data))
+ return;
+
+ WARN_ON(msg->data_length);
+ for (i = 0; i < req->r_num_ops; i++) {
+ struct ceph_osd_req_op *op = &req->r_ops[i];
+
+ switch (op->op) {
+ /* request */
+ case CEPH_OSD_OP_WRITE:
+ case CEPH_OSD_OP_WRITEFULL:
+ WARN_ON(op->indata_len != op->extent.length);
+ ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
+ break;
+ case CEPH_OSD_OP_SETXATTR:
+ case CEPH_OSD_OP_CMPXATTR:
+ WARN_ON(op->indata_len != op->xattr.name_len +
+ op->xattr.value_len);
+ ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
+ break;
+ case CEPH_OSD_OP_NOTIFY_ACK:
+ ceph_osdc_msg_data_add(msg,
+ &op->notify_ack.request_data);
+ break;
+
+ /* reply */
+ case CEPH_OSD_OP_STAT:
+ ceph_osdc_msg_data_add(req->r_reply,
+ &op->raw_data_in);
+ break;
+ case CEPH_OSD_OP_READ:
+ ceph_osdc_msg_data_add(req->r_reply,
+ &op->extent.osd_data);
+ break;
+
+ /* both */
+ case CEPH_OSD_OP_CALL:
+ WARN_ON(op->indata_len != op->cls.class_len +
+ op->cls.method_len +
+ op->cls.indata_len);
+ ceph_osdc_msg_data_add(msg, &op->cls.request_info);
+ /* optional, can be NONE */
+ ceph_osdc_msg_data_add(msg, &op->cls.request_data);
+ /* optional, can be NONE */
+ ceph_osdc_msg_data_add(req->r_reply,
+ &op->cls.response_data);
+ break;
+ case CEPH_OSD_OP_NOTIFY:
+ ceph_osdc_msg_data_add(msg,
+ &op->notify.request_data);
+ ceph_osdc_msg_data_add(req->r_reply,
+ &op->notify.response_data);
+ break;
+ }
+
+ data_len += op->indata_len;
+ }
+
+ WARN_ON(data_len != msg->data_length);
+}
+
+static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
+{
+ void *p = msg->front.iov_base;
+ void *const end = p + msg->front_alloc_len;
+ u32 data_len = 0;
+ int i;
+
+ if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
+ /* snapshots aren't writeable */
+ WARN_ON(req->r_snapid != CEPH_NOSNAP);
+ } else {
+ WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
+ req->r_data_offset || req->r_snapc);
+ }
+
+ setup_request_data(req, msg);
+
+ ceph_encode_32(&p, 1); /* client_inc, always 1 */
+ ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
+ ceph_encode_32(&p, req->r_flags);
+ ceph_encode_timespec(p, &req->r_mtime);
+ p += sizeof(struct ceph_timespec);
+ /* aka reassert_version */
+ memcpy(p, &req->r_replay_version, sizeof(req->r_replay_version));
+ p += sizeof(req->r_replay_version);
+
+ /* oloc */
+ ceph_encode_8(&p, 4);
+ ceph_encode_8(&p, 4);
+ ceph_encode_32(&p, 8 + 4 + 4);
+ ceph_encode_64(&p, req->r_t.target_oloc.pool);
+ ceph_encode_32(&p, -1); /* preferred */
+ ceph_encode_32(&p, 0); /* key len */
+
+ /* pgid */
+ ceph_encode_8(&p, 1);
+ ceph_encode_64(&p, req->r_t.pgid.pool);
+ ceph_encode_32(&p, req->r_t.pgid.seed);
+ ceph_encode_32(&p, -1); /* preferred */
+
+ /* oid */
+ ceph_encode_32(&p, req->r_t.target_oid.name_len);
+ memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
+ p += req->r_t.target_oid.name_len;
- if (osdc->num_requests == 0) {
- dout(" no requests, canceling timeout\n");
- __cancel_osd_timeout(osdc);
+ /* ops, can imply data */
+ ceph_encode_16(&p, req->r_num_ops);
+ for (i = 0; i < req->r_num_ops; i++) {
+ data_len += osd_req_encode_op(p, &req->r_ops[i]);
+ p += sizeof(struct ceph_osd_op);
}
+
+ ceph_encode_64(&p, req->r_snapid); /* snapid */
+ if (req->r_snapc) {
+ ceph_encode_64(&p, req->r_snapc->seq);
+ ceph_encode_32(&p, req->r_snapc->num_snaps);
+ for (i = 0; i < req->r_snapc->num_snaps; i++)
+ ceph_encode_64(&p, req->r_snapc->snaps[i]);
+ } else {
+ ceph_encode_64(&p, 0); /* snap_seq */
+ ceph_encode_32(&p, 0); /* snaps len */
+ }
+
+ ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
+
+ BUG_ON(p > end);
+ msg->front.iov_len = p - msg->front.iov_base;
+ msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
+ msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
+ msg->hdr.data_len = cpu_to_le32(data_len);
+ /*
+ * The header "data_off" is a hint to the receiver allowing it
+ * to align received data into its buffers such that there's no
+ * need to re-copy it before writing it to disk (direct I/O).
+ */
+ msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
+
+ dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__,
+ req, req->r_t.target_oid.name_len, req->r_t.target_oid.name,
+ req->r_t.target_oid.name_len, msg->front.iov_len, data_len);
}
/*
- * Cancel a previously queued request message
+ * @req has to be assigned a tid and registered.
*/
-static void __cancel_request(struct ceph_osd_request *req)
+static void send_request(struct ceph_osd_request *req)
{
- if (req->r_sent && req->r_osd) {
+ struct ceph_osd *osd = req->r_osd;
+
+ verify_osd_locked(osd);
+ WARN_ON(osd->o_osd != req->r_t.osd);
+
+ /*
+ * We may have a previously queued request message hanging
+ * around. Cancel it to avoid corrupting the msgr.
+ */
+ if (req->r_sent)
ceph_msg_revoke(req->r_request);
- req->r_sent = 0;
+
+ req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
+ if (req->r_attempts)
+ req->r_flags |= CEPH_OSD_FLAG_RETRY;
+ else
+ WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
+
+ encode_request(req, req->r_request);
+
+ dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
+ __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
+ req->r_t.osd, req->r_flags, req->r_attempts);
+
+ req->r_t.paused = false;
+ req->r_stamp = jiffies;
+ req->r_attempts++;
+
+ req->r_sent = osd->o_incarnation;
+ req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
+ ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
+}
+
+static void maybe_request_map(struct ceph_osd_client *osdc)
+{
+ bool continuous = false;
+
+ verify_osdc_locked(osdc);
+ WARN_ON(!osdc->osdmap->epoch);
+
+ if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
+ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
+ dout("%s osdc %p continuous\n", __func__, osdc);
+ continuous = true;
+ } else {
+ dout("%s osdc %p onetime\n", __func__, osdc);
}
+
+ if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
+ osdc->osdmap->epoch + 1, continuous))
+ ceph_monc_renew_subs(&osdc->client->monc);
}
-static void __register_linger_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req)
+static void send_map_check(struct ceph_osd_request *req);
+
+static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
{
- dout("%s %p tid %llu\n", __func__, req, req->r_tid);
- WARN_ON(!req->r_linger);
+ struct ceph_osd_client *osdc = req->r_osdc;
+ struct ceph_osd *osd;
+ enum calc_target_result ct_res;
+ bool need_send = false;
+ bool promoted = false;
+
+ WARN_ON(req->r_tid || req->r_got_reply);
+ dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
+
+again:
+ ct_res = calc_target(osdc, &req->r_t, &req->r_last_force_resend, false);
+ if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
+ goto promote;
+
+ osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
+ if (IS_ERR(osd)) {
+ WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
+ goto promote;
+ }
+
+ if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
+ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
+ dout("req %p pausewr\n", req);
+ req->r_t.paused = true;
+ maybe_request_map(osdc);
+ } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
+ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
+ dout("req %p pauserd\n", req);
+ req->r_t.paused = true;
+ maybe_request_map(osdc);
+ } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
+ !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
+ CEPH_OSD_FLAG_FULL_FORCE)) &&
+ (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+ pool_full(osdc, req->r_t.base_oloc.pool))) {
+ dout("req %p full/pool_full\n", req);
+ pr_warn_ratelimited("FULL or reached pool quota\n");
+ req->r_t.paused = true;
+ maybe_request_map(osdc);
+ } else if (!osd_homeless(osd)) {
+ need_send = true;
+ } else {
+ maybe_request_map(osdc);
+ }
+
+ mutex_lock(&osd->lock);
+ /*
+ * Assign the tid atomically with send_request() to protect
+ * multiple writes to the same object from racing with each
+ * other, resulting in out of order ops on the OSDs.
+ */
+ req->r_tid = atomic64_inc_return(&osdc->last_tid);
+ link_request(osd, req);
+ if (need_send)
+ send_request(req);
+ mutex_unlock(&osd->lock);
+ if (ct_res == CALC_TARGET_POOL_DNE)
+ send_map_check(req);
+
+ if (promoted)
+ downgrade_write(&osdc->lock);
+ return;
+
+promote:
+ up_read(&osdc->lock);
+ down_write(&osdc->lock);
+ wrlocked = true;
+ promoted = true;
+ goto again;
+}
+
+static void account_request(struct ceph_osd_request *req)
+{
+ unsigned int mask = CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
+
+ if (req->r_flags & CEPH_OSD_FLAG_READ) {
+ WARN_ON(req->r_flags & mask);
+ req->r_flags |= CEPH_OSD_FLAG_ACK;
+ } else if (req->r_flags & CEPH_OSD_FLAG_WRITE)
+ WARN_ON(!(req->r_flags & mask));
+ else
+ WARN_ON(1);
+
+ WARN_ON(req->r_unsafe_callback && (req->r_flags & mask) != mask);
+ atomic_inc(&req->r_osdc->num_requests);
+}
+
+static void submit_request(struct ceph_osd_request *req, bool wrlocked)
+{
ceph_osdc_get_request(req);
- list_add_tail(&req->r_linger_item, &osdc->req_linger);
- if (req->r_osd)
- list_add_tail(&req->r_linger_osd_item,
- &req->r_osd->o_linger_requests);
+ account_request(req);
+ __submit_request(req, wrlocked);
}
-static void __unregister_linger_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req)
+static void __finish_request(struct ceph_osd_request *req)
{
- WARN_ON(!req->r_linger);
+ struct ceph_osd_client *osdc = req->r_osdc;
+ struct ceph_osd *osd = req->r_osd;
- if (list_empty(&req->r_linger_item)) {
- dout("%s %p tid %llu not registered\n", __func__, req,
- req->r_tid);
+ verify_osd_locked(osd);
+ dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
+
+ WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
+ unlink_request(osd, req);
+ atomic_dec(&osdc->num_requests);
+
+ /*
+ * If an OSD has failed or returned and a request has been sent
+ * twice, it's possible to get a reply and end up here while the
+ * request message is queued for delivery. We will ignore the
+ * reply, so not a big deal, but better to try and catch it.
+ */
+ ceph_msg_revoke(req->r_request);
+ ceph_msg_revoke_incoming(req->r_reply);
+}
+
+static void finish_request(struct ceph_osd_request *req)
+{
+ __finish_request(req);
+ ceph_osdc_put_request(req);
+}
+
+static void __complete_request(struct ceph_osd_request *req)
+{
+ if (req->r_callback)
+ req->r_callback(req);
+ else
+ complete_all(&req->r_completion);
+}
+
+/*
+ * Note that this is open-coded in handle_reply(), which has to deal
+ * with ack vs commit, dup acks, etc.
+ */
+static void complete_request(struct ceph_osd_request *req, int err)
+{
+ dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
+
+ req->r_result = err;
+ __finish_request(req);
+ __complete_request(req);
+ complete_all(&req->r_safe_completion);
+ ceph_osdc_put_request(req);
+}
+
+static void cancel_map_check(struct ceph_osd_request *req)
+{
+ struct ceph_osd_client *osdc = req->r_osdc;
+ struct ceph_osd_request *lookup_req;
+
+ verify_osdc_wrlocked(osdc);
+
+ lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
+ if (!lookup_req)
return;
+
+ WARN_ON(lookup_req != req);
+ erase_request_mc(&osdc->map_checks, req);
+ ceph_osdc_put_request(req);
+}
+
+static void cancel_request(struct ceph_osd_request *req)
+{
+ dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
+
+ cancel_map_check(req);
+ finish_request(req);
+}
+
+static void check_pool_dne(struct ceph_osd_request *req)
+{
+ struct ceph_osd_client *osdc = req->r_osdc;
+ struct ceph_osdmap *map = osdc->osdmap;
+
+ verify_osdc_wrlocked(osdc);
+ WARN_ON(!map->epoch);
+
+ if (req->r_attempts) {
+ /*
+ * We sent a request earlier, which means that
+ * previously the pool existed, and now it does not
+ * (i.e., it was deleted).
+ */
+ req->r_map_dne_bound = map->epoch;
+ dout("%s req %p tid %llu pool disappeared\n", __func__, req,
+ req->r_tid);
+ } else {
+ dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
+ req, req->r_tid, req->r_map_dne_bound, map->epoch);
}
- dout("%s %p tid %llu\n", __func__, req, req->r_tid);
- list_del_init(&req->r_linger_item);
+ if (req->r_map_dne_bound) {
+ if (map->epoch >= req->r_map_dne_bound) {
+ /* we had a new enough map */
+ pr_info_ratelimited("tid %llu pool does not exist\n",
+ req->r_tid);
+ complete_request(req, -ENOENT);
+ }
+ } else {
+ send_map_check(req);
+ }
+}
- if (req->r_osd) {
- list_del_init(&req->r_linger_osd_item);
- maybe_move_osd_to_lru(osdc, req->r_osd);
- if (list_empty(&req->r_osd_item))
- req->r_osd = NULL;
+static void map_check_cb(struct ceph_mon_generic_request *greq)
+{
+ struct ceph_osd_client *osdc = &greq->monc->client->osdc;
+ struct ceph_osd_request *req;
+ u64 tid = greq->private_data;
+
+ WARN_ON(greq->result || !greq->u.newest);
+
+ down_write(&osdc->lock);
+ req = lookup_request_mc(&osdc->map_checks, tid);
+ if (!req) {
+ dout("%s tid %llu dne\n", __func__, tid);
+ goto out_unlock;
}
+
+ dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
+ req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
+ if (!req->r_map_dne_bound)
+ req->r_map_dne_bound = greq->u.newest;
+ erase_request_mc(&osdc->map_checks, req);
+ check_pool_dne(req);
+
ceph_osdc_put_request(req);
+out_unlock:
+ up_write(&osdc->lock);
}
-void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req)
+static void send_map_check(struct ceph_osd_request *req)
{
- if (!req->r_linger) {
- dout("set_request_linger %p\n", req);
- req->r_linger = 1;
+ struct ceph_osd_client *osdc = req->r_osdc;
+ struct ceph_osd_request *lookup_req;
+ int ret;
+
+ verify_osdc_wrlocked(osdc);
+
+ lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
+ if (lookup_req) {
+ WARN_ON(lookup_req != req);
+ return;
}
+
+ ceph_osdc_get_request(req);
+ insert_request_mc(&osdc->map_checks, req);
+ ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
+ map_check_cb, req->r_tid);
+ WARN_ON(ret);
}
-EXPORT_SYMBOL(ceph_osdc_set_request_linger);
/*
- * Returns whether a request should be blocked from being sent
- * based on the current osdmap and osd_client settings.
- *
- * Caller should hold map_sem for read.
+ * lingering requests, watch/notify v2 infrastructure
*/
-static bool __req_should_be_paused(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req)
+static void linger_release(struct kref *kref)
{
- bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
- bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
- ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
- return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) ||
- (req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr);
+ struct ceph_osd_linger_request *lreq =
+ container_of(kref, struct ceph_osd_linger_request, kref);
+
+ dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
+ lreq->reg_req, lreq->ping_req);
+ WARN_ON(!RB_EMPTY_NODE(&lreq->node));
+ WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
+ WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
+ WARN_ON(!list_empty(&lreq->scan_item));
+ WARN_ON(!list_empty(&lreq->pending_lworks));
+ WARN_ON(lreq->osd);
+
+ if (lreq->reg_req)
+ ceph_osdc_put_request(lreq->reg_req);
+ if (lreq->ping_req)
+ ceph_osdc_put_request(lreq->ping_req);
+ target_destroy(&lreq->t);
+ kfree(lreq);
}
+static void linger_put(struct ceph_osd_linger_request *lreq)
+{
+ if (lreq)
+ kref_put(&lreq->kref, linger_release);
+}
+
+static struct ceph_osd_linger_request *
+linger_get(struct ceph_osd_linger_request *lreq)
+{
+ kref_get(&lreq->kref);
+ return lreq;
+}
+
+static struct ceph_osd_linger_request *
+linger_alloc(struct ceph_osd_client *osdc)
+{
+ struct ceph_osd_linger_request *lreq;
+
+ lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
+ if (!lreq)
+ return NULL;
+
+ kref_init(&lreq->kref);
+ mutex_init(&lreq->lock);
+ RB_CLEAR_NODE(&lreq->node);
+ RB_CLEAR_NODE(&lreq->osdc_node);
+ RB_CLEAR_NODE(&lreq->mc_node);
+ INIT_LIST_HEAD(&lreq->scan_item);
+ INIT_LIST_HEAD(&lreq->pending_lworks);
+ init_completion(&lreq->reg_commit_wait);
+ init_completion(&lreq->notify_finish_wait);
+
+ lreq->osdc = osdc;
+ target_init(&lreq->t);
+
+ dout("%s lreq %p\n", __func__, lreq);
+ return lreq;
+}
+
+DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
+DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
+DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
+
/*
- * Calculate mapping of a request to a PG. Takes tiering into account.
+ * Create linger request <-> OSD session relation.
+ *
+ * @lreq has to be registered, @osd may be homeless.
*/
-static int __calc_request_pg(struct ceph_osdmap *osdmap,
- struct ceph_osd_request *req,
- struct ceph_pg *pg_out)
+static void link_linger(struct ceph_osd *osd,
+ struct ceph_osd_linger_request *lreq)
{
- bool need_check_tiering;
+ verify_osd_locked(osd);
+ WARN_ON(!lreq->linger_id || lreq->osd);
+ dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
+ osd->o_osd, lreq, lreq->linger_id);
- need_check_tiering = false;
- if (req->r_target_oloc.pool == -1) {
- req->r_target_oloc = req->r_base_oloc; /* struct */
- need_check_tiering = true;
+ if (!osd_homeless(osd))
+ __remove_osd_from_lru(osd);
+ else
+ atomic_inc(&osd->o_osdc->num_homeless);
+
+ get_osd(osd);
+ insert_linger(&osd->o_linger_requests, lreq);
+ lreq->osd = osd;
+}
+
+static void unlink_linger(struct ceph_osd *osd,
+ struct ceph_osd_linger_request *lreq)
+{
+ verify_osd_locked(osd);
+ WARN_ON(lreq->osd != osd);
+ dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
+ osd->o_osd, lreq, lreq->linger_id);
+
+ lreq->osd = NULL;
+ erase_linger(&osd->o_linger_requests, lreq);
+ put_osd(osd);
+
+ if (!osd_homeless(osd))
+ maybe_move_osd_to_lru(osd);
+ else
+ atomic_dec(&osd->o_osdc->num_homeless);
+}
+
+static bool __linger_registered(struct ceph_osd_linger_request *lreq)
+{
+ verify_osdc_locked(lreq->osdc);
+
+ return !RB_EMPTY_NODE(&lreq->osdc_node);
+}
+
+static bool linger_registered(struct ceph_osd_linger_request *lreq)
+{
+ struct ceph_osd_client *osdc = lreq->osdc;
+ bool registered;
+
+ down_read(&osdc->lock);
+ registered = __linger_registered(lreq);
+ up_read(&osdc->lock);
+
+ return registered;
+}
+
+static void linger_register(struct ceph_osd_linger_request *lreq)
+{
+ struct ceph_osd_client *osdc = lreq->osdc;
+
+ verify_osdc_wrlocked(osdc);
+ WARN_ON(lreq->linger_id);
+
+ linger_get(lreq);
+ lreq->linger_id = ++osdc->last_linger_id;
+ insert_linger_osdc(&osdc->linger_requests, lreq);
+}
+
+static void linger_unregister(struct ceph_osd_linger_request *lreq)
+{
+ struct ceph_osd_client *osdc = lreq->osdc;
+
+ verify_osdc_wrlocked(osdc);
+
+ erase_linger_osdc(&osdc->linger_requests, lreq);
+ linger_put(lreq);
+}
+
+static void cancel_linger_request(struct ceph_osd_request *req)
+{
+ struct ceph_osd_linger_request *lreq = req->r_priv;
+
+ WARN_ON(!req->r_linger);
+ cancel_request(req);
+ linger_put(lreq);
+}
+
+struct linger_work {
+ struct work_struct work;
+ struct ceph_osd_linger_request *lreq;
+ struct list_head pending_item;
+ unsigned long queued_stamp;
+
+ union {
+ struct {
+ u64 notify_id;
+ u64 notifier_id;
+ void *payload; /* points into @msg front */
+ size_t payload_len;
+
+ struct ceph_msg *msg; /* for ceph_msg_put() */
+ } notify;
+ struct {
+ int err;
+ } error;
+ };
+};
+
+static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
+ work_func_t workfn)
+{
+ struct linger_work *lwork;
+
+ lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
+ if (!lwork)
+ return NULL;
+
+ INIT_WORK(&lwork->work, workfn);
+ INIT_LIST_HEAD(&lwork->pending_item);
+ lwork->lreq = linger_get(lreq);
+
+ return lwork;
+}
+
+static void lwork_free(struct linger_work *lwork)
+{
+ struct ceph_osd_linger_request *lreq = lwork->lreq;
+
+ mutex_lock(&lreq->lock);
+ list_del(&lwork->pending_item);
+ mutex_unlock(&lreq->lock);
+
+ linger_put(lreq);
+ kfree(lwork);
+}
+
+static void lwork_queue(struct linger_work *lwork)
+{
+ struct ceph_osd_linger_request *lreq = lwork->lreq;
+ struct ceph_osd_client *osdc = lreq->osdc;
+
+ verify_lreq_locked(lreq);
+ WARN_ON(!list_empty(&lwork->pending_item));
+
+ lwork->queued_stamp = jiffies;
+ list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
+ queue_work(osdc->notify_wq, &lwork->work);
+}
+
+static void do_watch_notify(struct work_struct *w)
+{
+ struct linger_work *lwork = container_of(w, struct linger_work, work);
+ struct ceph_osd_linger_request *lreq = lwork->lreq;
+
+ if (!linger_registered(lreq)) {
+ dout("%s lreq %p not registered\n", __func__, lreq);
+ goto out;
}
- if (req->r_target_oid.name_len == 0) {
- ceph_oid_copy(&req->r_target_oid, &req->r_base_oid);
- need_check_tiering = true;
+
+ WARN_ON(!lreq->is_watch);
+ dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
+ __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
+ lwork->notify.payload_len);
+ lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
+ lwork->notify.notifier_id, lwork->notify.payload,
+ lwork->notify.payload_len);
+
+out:
+ ceph_msg_put(lwork->notify.msg);
+ lwork_free(lwork);
+}
+
+static void do_watch_error(struct work_struct *w)
+{
+ struct linger_work *lwork = container_of(w, struct linger_work, work);
+ struct ceph_osd_linger_request *lreq = lwork->lreq;
+
+ if (!linger_registered(lreq)) {
+ dout("%s lreq %p not registered\n", __func__, lreq);
+ goto out;
}
- if (need_check_tiering &&
- (req->r_flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
- struct ceph_pg_pool_info *pi;
-
- pi = ceph_pg_pool_by_id(osdmap, req->r_target_oloc.pool);
- if (pi) {
- if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
- pi->read_tier >= 0)
- req->r_target_oloc.pool = pi->read_tier;
- if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
- pi->write_tier >= 0)
- req->r_target_oloc.pool = pi->write_tier;
+ dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
+ lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
+
+out:
+ lwork_free(lwork);
+}
+
+static void queue_watch_error(struct ceph_osd_linger_request *lreq)
+{
+ struct linger_work *lwork;
+
+ lwork = lwork_alloc(lreq, do_watch_error);
+ if (!lwork) {
+ pr_err("failed to allocate error-lwork\n");
+ return;
+ }
+
+ lwork->error.err = lreq->last_error;
+ lwork_queue(lwork);
+}
+
+static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
+ int result)
+{
+ if (!completion_done(&lreq->reg_commit_wait)) {
+ lreq->reg_commit_error = (result <= 0 ? result : 0);
+ complete_all(&lreq->reg_commit_wait);
+ }
+}
+
+static void linger_commit_cb(struct ceph_osd_request *req)
+{
+ struct ceph_osd_linger_request *lreq = req->r_priv;
+
+ mutex_lock(&lreq->lock);
+ dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
+ lreq->linger_id, req->r_result);
+ WARN_ON(!__linger_registered(lreq));
+ linger_reg_commit_complete(lreq, req->r_result);
+ lreq->committed = true;
+
+ if (!lreq->is_watch) {
+ struct ceph_osd_data *osd_data =
+ osd_req_op_data(req, 0, notify, response_data);
+ void *p = page_address(osd_data->pages[0]);
+
+ WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
+ osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
+
+ /* make note of the notify_id */
+ if (req->r_ops[0].outdata_len >= sizeof(u64)) {
+ lreq->notify_id = ceph_decode_64(&p);
+ dout("lreq %p notify_id %llu\n", lreq,
+ lreq->notify_id);
+ } else {
+ dout("lreq %p no notify_id\n", lreq);
}
- /* !pi is caught in ceph_oloc_oid_to_pg() */
}
- return ceph_oloc_oid_to_pg(osdmap, &req->r_target_oloc,
- &req->r_target_oid, pg_out);
+ mutex_unlock(&lreq->lock);
+ linger_put(lreq);
}
-static void __enqueue_request(struct ceph_osd_request *req)
+static int normalize_watch_error(int err)
{
- struct ceph_osd_client *osdc = req->r_osdc;
+ /*
+ * Translate ENOENT -> ENOTCONN so that a delete->disconnection
+ * notification and a failure to reconnect because we raced with
+ * the delete appear the same to the user.
+ */
+ if (err == -ENOENT)
+ err = -ENOTCONN;
+
+ return err;
+}
+
+static void linger_reconnect_cb(struct ceph_osd_request *req)
+{
+ struct ceph_osd_linger_request *lreq = req->r_priv;
+
+ mutex_lock(&lreq->lock);
+ dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
+ lreq, lreq->linger_id, req->r_result, lreq->last_error);
+ if (req->r_result < 0) {
+ if (!lreq->last_error) {
+ lreq->last_error = normalize_watch_error(req->r_result);
+ queue_watch_error(lreq);
+ }
+ }
- dout("%s %p tid %llu to osd%d\n", __func__, req, req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1);
+ mutex_unlock(&lreq->lock);
+ linger_put(lreq);
+}
+
+static void send_linger(struct ceph_osd_linger_request *lreq)
+{
+ struct ceph_osd_request *req = lreq->reg_req;
+ struct ceph_osd_req_op *op = &req->r_ops[0];
- if (req->r_osd) {
- __remove_osd_from_lru(req->r_osd);
- list_add_tail(&req->r_osd_item, &req->r_osd->o_requests);
- list_move_tail(&req->r_req_lru_item, &osdc->req_unsent);
+ verify_osdc_wrlocked(req->r_osdc);
+ dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+
+ if (req->r_osd)
+ cancel_linger_request(req);
+
+ request_reinit(req);
+ ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
+ ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
+ req->r_flags = lreq->t.flags;
+ req->r_mtime = lreq->mtime;
+
+ mutex_lock(&lreq->lock);
+ if (lreq->is_watch && lreq->committed) {
+ WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
+ op->watch.cookie != lreq->linger_id);
+ op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
+ op->watch.gen = ++lreq->register_gen;
+ dout("lreq %p reconnect register_gen %u\n", lreq,
+ op->watch.gen);
+ req->r_callback = linger_reconnect_cb;
} else {
- list_move_tail(&req->r_req_lru_item, &osdc->req_notarget);
+ if (!lreq->is_watch)
+ lreq->notify_id = 0;
+ else
+ WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
+ dout("lreq %p register\n", lreq);
+ req->r_callback = linger_commit_cb;
}
+ mutex_unlock(&lreq->lock);
+
+ req->r_priv = linger_get(lreq);
+ req->r_linger = true;
+
+ submit_request(req, true);
}
-/*
- * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
- * (as needed), and set the request r_osd appropriately. If there is
- * no up osd, set r_osd to NULL. Move the request to the appropriate list
- * (unsent, homeless) or leave on in-flight lru.
- *
- * Return 0 if unchanged, 1 if changed, or negative on error.
- *
- * Caller should hold map_sem for read and request_mutex.
- */
-static int __map_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req, int force_resend)
+static void linger_ping_cb(struct ceph_osd_request *req)
{
- struct ceph_pg pgid;
- int acting[CEPH_PG_MAX_SIZE];
- int num, o;
- int err;
- bool was_paused;
-
- dout("map_request %p tid %lld\n", req, req->r_tid);
-
- err = __calc_request_pg(osdc->osdmap, req, &pgid);
- if (err) {
- list_move(&req->r_req_lru_item, &osdc->req_notarget);
- return err;
- }
- req->r_pgid = pgid;
-
- num = ceph_calc_pg_acting(osdc->osdmap, pgid, acting, &o);
- if (num < 0)
- num = 0;
-
- was_paused = req->r_paused;
- req->r_paused = __req_should_be_paused(osdc, req);
- if (was_paused && !req->r_paused)
- force_resend = 1;
-
- if ((!force_resend &&
- req->r_osd && req->r_osd->o_osd == o &&
- req->r_sent >= req->r_osd->o_incarnation &&
- req->r_num_pg_osds == num &&
- memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
- (req->r_osd == NULL && o == -1) ||
- req->r_paused)
- return 0; /* no change */
-
- dout("map_request tid %llu pgid %lld.%x osd%d (was osd%d)\n",
- req->r_tid, pgid.pool, pgid.seed, o,
- req->r_osd ? req->r_osd->o_osd : -1);
-
- /* record full pg acting set */
- memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
- req->r_num_pg_osds = num;
-
- if (req->r_osd) {
- __cancel_request(req);
- list_del_init(&req->r_osd_item);
- list_del_init(&req->r_linger_osd_item);
- req->r_osd = NULL;
- }
-
- req->r_osd = __lookup_osd(osdc, o);
- if (!req->r_osd && o >= 0) {
- err = -ENOMEM;
- req->r_osd = create_osd(osdc, o);
- if (!req->r_osd) {
- list_move(&req->r_req_lru_item, &osdc->req_notarget);
- goto out;
+ struct ceph_osd_linger_request *lreq = req->r_priv;
+
+ mutex_lock(&lreq->lock);
+ dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
+ __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
+ lreq->last_error);
+ if (lreq->register_gen == req->r_ops[0].watch.gen) {
+ if (!req->r_result) {
+ lreq->watch_valid_thru = lreq->ping_sent;
+ } else if (!lreq->last_error) {
+ lreq->last_error = normalize_watch_error(req->r_result);
+ queue_watch_error(lreq);
}
+ } else {
+ dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
+ lreq->register_gen, req->r_ops[0].watch.gen);
+ }
- dout("map_request osd %p is osd%d\n", req->r_osd, o);
- __insert_osd(osdc, req->r_osd);
+ mutex_unlock(&lreq->lock);
+ linger_put(lreq);
+}
+
+static void send_linger_ping(struct ceph_osd_linger_request *lreq)
+{
+ struct ceph_osd_client *osdc = lreq->osdc;
+ struct ceph_osd_request *req = lreq->ping_req;
+ struct ceph_osd_req_op *op = &req->r_ops[0];
- ceph_con_open(&req->r_osd->o_con,
- CEPH_ENTITY_TYPE_OSD, o,
- &osdc->osdmap->osd_addr[o]);
+ if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
+ dout("%s PAUSERD\n", __func__);
+ return;
}
- __enqueue_request(req);
- err = 1; /* osd or pg changed */
+ lreq->ping_sent = jiffies;
+ dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
+ __func__, lreq, lreq->linger_id, lreq->ping_sent,
+ lreq->register_gen);
-out:
- return err;
+ if (req->r_osd)
+ cancel_linger_request(req);
+
+ request_reinit(req);
+ target_copy(&req->r_t, &lreq->t);
+
+ WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
+ op->watch.cookie != lreq->linger_id ||
+ op->watch.op != CEPH_OSD_WATCH_OP_PING);
+ op->watch.gen = lreq->register_gen;
+ req->r_callback = linger_ping_cb;
+ req->r_priv = linger_get(lreq);
+ req->r_linger = true;
+
+ ceph_osdc_get_request(req);
+ account_request(req);
+ req->r_tid = atomic64_inc_return(&osdc->last_tid);
+ link_request(lreq->osd, req);
+ send_request(req);
}
-/*
- * caller should hold map_sem (for read) and request_mutex
- */
-static void __send_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req)
+static void linger_submit(struct ceph_osd_linger_request *lreq)
{
- void *p;
+ struct ceph_osd_client *osdc = lreq->osdc;
+ struct ceph_osd *osd;
- dout("send_request %p tid %llu to osd%d flags %d pg %lld.%x\n",
- req, req->r_tid, req->r_osd->o_osd, req->r_flags,
- (unsigned long long)req->r_pgid.pool, req->r_pgid.seed);
+ calc_target(osdc, &lreq->t, &lreq->last_force_resend, false);
+ osd = lookup_create_osd(osdc, lreq->t.osd, true);
+ link_linger(osd, lreq);
- /* fill in message content that changes each time we send it */
- put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch);
- put_unaligned_le32(req->r_flags, req->r_request_flags);
- put_unaligned_le64(req->r_target_oloc.pool, req->r_request_pool);
- p = req->r_request_pgid;
- ceph_encode_64(&p, req->r_pgid.pool);
- ceph_encode_32(&p, req->r_pgid.seed);
- put_unaligned_le64(1, req->r_request_attempts); /* FIXME */
- memcpy(req->r_request_reassert_version, &req->r_reassert_version,
- sizeof(req->r_reassert_version));
+ send_linger(lreq);
+}
- req->r_stamp = jiffies;
- list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
+static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
+{
+ struct ceph_osd_client *osdc = lreq->osdc;
+ struct ceph_osd_linger_request *lookup_lreq;
- ceph_msg_get(req->r_request); /* send consumes a ref */
+ verify_osdc_wrlocked(osdc);
- req->r_sent = req->r_osd->o_incarnation;
+ lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
+ lreq->linger_id);
+ if (!lookup_lreq)
+ return;
- ceph_con_send(&req->r_osd->o_con, req->r_request);
+ WARN_ON(lookup_lreq != lreq);
+ erase_linger_mc(&osdc->linger_map_checks, lreq);
+ linger_put(lreq);
}
/*
- * Send any requests in the queue (req_unsent).
+ * @lreq has to be both registered and linked.
*/
-static void __send_queued(struct ceph_osd_client *osdc)
+static void __linger_cancel(struct ceph_osd_linger_request *lreq)
+{
+ if (lreq->is_watch && lreq->ping_req->r_osd)
+ cancel_linger_request(lreq->ping_req);
+ if (lreq->reg_req->r_osd)
+ cancel_linger_request(lreq->reg_req);
+ cancel_linger_map_check(lreq);
+ unlink_linger(lreq->osd, lreq);
+ linger_unregister(lreq);
+}
+
+static void linger_cancel(struct ceph_osd_linger_request *lreq)
{
- struct ceph_osd_request *req, *tmp;
+ struct ceph_osd_client *osdc = lreq->osdc;
- dout("__send_queued\n");
- list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item)
- __send_request(osdc, req);
+ down_write(&osdc->lock);
+ if (__linger_registered(lreq))
+ __linger_cancel(lreq);
+ up_write(&osdc->lock);
}
-/*
- * Caller should hold map_sem for read and request_mutex.
- */
-static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req,
- bool nofail)
-{
- int rc;
-
- __register_request(osdc, req);
- req->r_sent = 0;
- req->r_got_reply = 0;
- rc = __map_request(osdc, req, 0);
- if (rc < 0) {
- if (nofail) {
- dout("osdc_start_request failed map, "
- " will retry %lld\n", req->r_tid);
- rc = 0;
- } else {
- __unregister_request(osdc, req);
- }
- return rc;
+static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
+
+static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
+{
+ struct ceph_osd_client *osdc = lreq->osdc;
+ struct ceph_osdmap *map = osdc->osdmap;
+
+ verify_osdc_wrlocked(osdc);
+ WARN_ON(!map->epoch);
+
+ if (lreq->register_gen) {
+ lreq->map_dne_bound = map->epoch;
+ dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
+ lreq, lreq->linger_id);
+ } else {
+ dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
+ __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
+ map->epoch);
}
- if (req->r_osd == NULL) {
- dout("send_request %p no up osds in pg\n", req);
- ceph_monc_request_next_osdmap(&osdc->client->monc);
+ if (lreq->map_dne_bound) {
+ if (map->epoch >= lreq->map_dne_bound) {
+ /* we had a new enough map */
+ pr_info("linger_id %llu pool does not exist\n",
+ lreq->linger_id);
+ linger_reg_commit_complete(lreq, -ENOENT);
+ __linger_cancel(lreq);
+ }
} else {
- __send_queued(osdc);
+ send_linger_map_check(lreq);
}
+}
- return 0;
+static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
+{
+ struct ceph_osd_client *osdc = &greq->monc->client->osdc;
+ struct ceph_osd_linger_request *lreq;
+ u64 linger_id = greq->private_data;
+
+ WARN_ON(greq->result || !greq->u.newest);
+
+ down_write(&osdc->lock);
+ lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
+ if (!lreq) {
+ dout("%s linger_id %llu dne\n", __func__, linger_id);
+ goto out_unlock;
+ }
+
+ dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
+ __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
+ greq->u.newest);
+ if (!lreq->map_dne_bound)
+ lreq->map_dne_bound = greq->u.newest;
+ erase_linger_mc(&osdc->linger_map_checks, lreq);
+ check_linger_pool_dne(lreq);
+
+ linger_put(lreq);
+out_unlock:
+ up_write(&osdc->lock);
+}
+
+static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
+{
+ struct ceph_osd_client *osdc = lreq->osdc;
+ struct ceph_osd_linger_request *lookup_lreq;
+ int ret;
+
+ verify_osdc_wrlocked(osdc);
+
+ lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
+ lreq->linger_id);
+ if (lookup_lreq) {
+ WARN_ON(lookup_lreq != lreq);
+ return;
+ }
+
+ linger_get(lreq);
+ insert_linger_mc(&osdc->linger_map_checks, lreq);
+ ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
+ linger_map_check_cb, lreq->linger_id);
+ WARN_ON(ret);
+}
+
+static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
+{
+ int ret;
+
+ dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+ ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
+ return ret ?: lreq->reg_commit_error;
+}
+
+static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
+{
+ int ret;
+
+ dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
+ ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
+ return ret ?: lreq->notify_finish_error;
}
/*
- * Timeout callback, called every N seconds when 1 or more osd
- * requests has been active for more than N seconds. When this
- * happens, we ping all OSDs with requests who have timed out to
- * ensure any communications channel reset is detected. Reset the
- * request timeouts another N seconds in the future as we go.
- * Reschedule the timeout event another N seconds in future (unless
- * there are no open requests).
+ * Timeout callback, called every N seconds. When 1 or more OSD
+ * requests has been active for more than N seconds, we send a keepalive
+ * (tag + timestamp) to its OSD to ensure any communications channel
+ * reset is detected.
*/
static void handle_timeout(struct work_struct *work)
{
struct ceph_osd_client *osdc =
container_of(work, struct ceph_osd_client, timeout_work.work);
struct ceph_options *opts = osdc->client->options;
- struct ceph_osd_request *req;
- struct ceph_osd *osd;
- struct list_head slow_osds;
- dout("timeout\n");
- down_read(&osdc->map_sem);
-
- ceph_monc_request_next_osdmap(&osdc->client->monc);
+ unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
+ LIST_HEAD(slow_osds);
+ struct rb_node *n, *p;
- mutex_lock(&osdc->request_mutex);
+ dout("%s osdc %p\n", __func__, osdc);
+ down_write(&osdc->lock);
/*
* ping osds that are a bit slow. this ensures that if there
* is a break in the TCP connection we will notice, and reopen
* a connection with that osd (from the fault callback).
*/
- INIT_LIST_HEAD(&slow_osds);
- list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
- if (time_before(jiffies,
- req->r_stamp + opts->osd_keepalive_timeout))
- break;
+ for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+ struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+ bool found = false;
+
+ for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
+ struct ceph_osd_request *req =
+ rb_entry(p, struct ceph_osd_request, r_node);
+
+ if (time_before(req->r_stamp, cutoff)) {
+ dout(" req %p tid %llu on osd%d is laggy\n",
+ req, req->r_tid, osd->o_osd);
+ found = true;
+ }
+ }
+ for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
+ struct ceph_osd_linger_request *lreq =
+ rb_entry(p, struct ceph_osd_linger_request, node);
+
+ dout(" lreq %p linger_id %llu is served by osd%d\n",
+ lreq, lreq->linger_id, osd->o_osd);
+ found = true;
+
+ mutex_lock(&lreq->lock);
+ if (lreq->is_watch && lreq->committed && !lreq->last_error)
+ send_linger_ping(lreq);
+ mutex_unlock(&lreq->lock);
+ }
- osd = req->r_osd;
- BUG_ON(!osd);
- dout(" tid %llu is slow, will send keepalive on osd%d\n",
- req->r_tid, osd->o_osd);
- list_move_tail(&osd->o_keepalive_item, &slow_osds);
+ if (found)
+ list_move_tail(&osd->o_keepalive_item, &slow_osds);
}
+
+ if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
+ maybe_request_map(osdc);
+
while (!list_empty(&slow_osds)) {
- osd = list_entry(slow_osds.next, struct ceph_osd,
- o_keepalive_item);
+ struct ceph_osd *osd = list_first_entry(&slow_osds,
+ struct ceph_osd,
+ o_keepalive_item);
list_del_init(&osd->o_keepalive_item);
ceph_con_keepalive(&osd->o_con);
}
- __schedule_osd_timeout(osdc);
- __send_queued(osdc);
- mutex_unlock(&osdc->request_mutex);
- up_read(&osdc->map_sem);
+ up_write(&osdc->lock);
+ schedule_delayed_work(&osdc->timeout_work,
+ osdc->client->options->osd_keepalive_timeout);
}
static void handle_osds_timeout(struct work_struct *work)
@@ -1663,12 +2542,20 @@ static void handle_osds_timeout(struct work_struct *work)
container_of(work, struct ceph_osd_client,
osds_timeout_work.work);
unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
+ struct ceph_osd *osd, *nosd;
- dout("osds timeout\n");
- down_read(&osdc->map_sem);
- remove_old_osds(osdc);
- up_read(&osdc->map_sem);
+ dout("%s osdc %p\n", __func__, osdc);
+ down_write(&osdc->lock);
+ list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
+ if (time_before(jiffies, osd->lru_ttl))
+ break;
+ WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
+ WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
+ close_osd(osd);
+ }
+
+ up_write(&osdc->lock);
schedule_delayed_work(&osdc->osds_timeout_work,
round_jiffies_relative(delay));
}
@@ -1776,107 +2663,76 @@ e_inval:
goto out;
}
-static void complete_request(struct ceph_osd_request *req)
-{
- complete_all(&req->r_safe_completion); /* fsync waiter */
-}
+struct MOSDOpReply {
+ struct ceph_pg pgid;
+ u64 flags;
+ int result;
+ u32 epoch;
+ int num_ops;
+ u32 outdata_len[CEPH_OSD_MAX_OPS];
+ s32 rval[CEPH_OSD_MAX_OPS];
+ int retry_attempt;
+ struct ceph_eversion replay_version;
+ u64 user_version;
+ struct ceph_request_redirect redirect;
+};
-/*
- * handle osd op reply. either call the callback if it is specified,
- * or do the completion to wake up the waiting thread.
- */
-static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
{
- void *p, *end;
- struct ceph_osd_request *req;
- struct ceph_request_redirect redir;
- u64 tid;
- int object_len;
- unsigned int numops;
- int payload_len, flags;
- s32 result;
- s32 retry_attempt;
- struct ceph_pg pg;
- int err;
- u32 reassert_epoch;
- u64 reassert_version;
- u32 osdmap_epoch;
- int already_completed;
- u32 bytes;
+ void *p = msg->front.iov_base;
+ void *const end = p + msg->front.iov_len;
+ u16 version = le16_to_cpu(msg->hdr.version);
+ struct ceph_eversion bad_replay_version;
u8 decode_redir;
- unsigned int i;
-
- tid = le64_to_cpu(msg->hdr.tid);
- dout("handle_reply %p tid %llu\n", msg, tid);
+ u32 len;
+ int ret;
+ int i;
- p = msg->front.iov_base;
- end = p + msg->front.iov_len;
+ ceph_decode_32_safe(&p, end, len, e_inval);
+ ceph_decode_need(&p, end, len, e_inval);
+ p += len; /* skip oid */
- ceph_decode_need(&p, end, 4, bad);
- object_len = ceph_decode_32(&p);
- ceph_decode_need(&p, end, object_len, bad);
- p += object_len;
+ ret = ceph_decode_pgid(&p, end, &m->pgid);
+ if (ret)
+ return ret;
- err = ceph_decode_pgid(&p, end, &pg);
- if (err)
- goto bad;
+ ceph_decode_64_safe(&p, end, m->flags, e_inval);
+ ceph_decode_32_safe(&p, end, m->result, e_inval);
+ ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
+ memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
+ p += sizeof(bad_replay_version);
+ ceph_decode_32_safe(&p, end, m->epoch, e_inval);
- ceph_decode_need(&p, end, 8 + 4 + 4 + 8 + 4, bad);
- flags = ceph_decode_64(&p);
- result = ceph_decode_32(&p);
- reassert_epoch = ceph_decode_32(&p);
- reassert_version = ceph_decode_64(&p);
- osdmap_epoch = ceph_decode_32(&p);
-
- /* lookup */
- down_read(&osdc->map_sem);
- mutex_lock(&osdc->request_mutex);
- req = __lookup_request(osdc, tid);
- if (req == NULL) {
- dout("handle_reply tid %llu dne\n", tid);
- goto bad_mutex;
- }
- ceph_osdc_get_request(req);
+ ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
+ if (m->num_ops > ARRAY_SIZE(m->outdata_len))
+ goto e_inval;
- dout("handle_reply %p tid %llu req %p result %d\n", msg, tid,
- req, result);
-
- ceph_decode_need(&p, end, 4, bad_put);
- numops = ceph_decode_32(&p);
- if (numops > CEPH_OSD_MAX_OPS)
- goto bad_put;
- if (numops != req->r_num_ops)
- goto bad_put;
- payload_len = 0;
- ceph_decode_need(&p, end, numops * sizeof(struct ceph_osd_op), bad_put);
- for (i = 0; i < numops; i++) {
+ ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
+ e_inval);
+ for (i = 0; i < m->num_ops; i++) {
struct ceph_osd_op *op = p;
- int len;
- len = le32_to_cpu(op->payload_len);
- req->r_ops[i].outdata_len = len;
- dout(" op %d has %d bytes\n", i, len);
- payload_len += len;
+ m->outdata_len[i] = le32_to_cpu(op->payload_len);
p += sizeof(*op);
}
- bytes = le32_to_cpu(msg->hdr.data_len);
- if (payload_len != bytes) {
- pr_warn("sum of op payload lens %d != data_len %d\n",
- payload_len, bytes);
- goto bad_put;
- }
- ceph_decode_need(&p, end, 4 + numops * 4, bad_put);
- retry_attempt = ceph_decode_32(&p);
- for (i = 0; i < numops; i++)
- req->r_ops[i].rval = ceph_decode_32(&p);
+ ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
+ for (i = 0; i < m->num_ops; i++)
+ ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
- if (le16_to_cpu(msg->hdr.version) >= 6) {
- p += 8 + 4; /* skip replay_version */
- p += 8; /* skip user_version */
+ if (version >= 5) {
+ ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
+ memcpy(&m->replay_version, p, sizeof(m->replay_version));
+ p += sizeof(m->replay_version);
+ ceph_decode_64_safe(&p, end, m->user_version, e_inval);
+ } else {
+ m->replay_version = bad_replay_version; /* struct */
+ m->user_version = le64_to_cpu(m->replay_version.version);
+ }
- if (le16_to_cpu(msg->hdr.version) >= 7)
- ceph_decode_8_safe(&p, end, decode_redir, bad_put);
+ if (version >= 6) {
+ if (version >= 7)
+ ceph_decode_8_safe(&p, end, decode_redir, e_inval);
else
decode_redir = 1;
} else {
@@ -1884,228 +2740,410 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
}
if (decode_redir) {
- err = ceph_redirect_decode(&p, end, &redir);
- if (err)
- goto bad_put;
+ ret = ceph_redirect_decode(&p, end, &m->redirect);
+ if (ret)
+ return ret;
} else {
- redir.oloc.pool = -1;
+ ceph_oloc_init(&m->redirect.oloc);
}
- if (redir.oloc.pool != -1) {
- dout("redirect pool %lld\n", redir.oloc.pool);
-
- __unregister_request(osdc, req);
-
- req->r_target_oloc = redir.oloc; /* struct */
+ return 0;
- /*
- * Start redirect requests with nofail=true. If
- * mapping fails, request will end up on the notarget
- * list, waiting for the new osdmap (which can take
- * a while), even though the original request mapped
- * successfully. In the future we might want to follow
- * original request's nofail setting here.
- */
- err = __ceph_osdc_start_request(osdc, req, true);
- BUG_ON(err);
+e_inval:
+ return -EINVAL;
+}
- goto out_unlock;
- }
+/*
+ * We are done with @req if
+ * - @m is a safe reply, or
+ * - @m is an unsafe reply and we didn't want a safe one
+ */
+static bool done_request(const struct ceph_osd_request *req,
+ const struct MOSDOpReply *m)
+{
+ return (m->result < 0 ||
+ (m->flags & CEPH_OSD_FLAG_ONDISK) ||
+ !(req->r_flags & CEPH_OSD_FLAG_ONDISK));
+}
- already_completed = req->r_got_reply;
- if (!req->r_got_reply) {
- req->r_result = result;
- dout("handle_reply result %d bytes %d\n", req->r_result,
- bytes);
- if (req->r_result == 0)
- req->r_result = bytes;
+/*
+ * handle osd op reply. either call the callback if it is specified,
+ * or do the completion to wake up the waiting thread.
+ *
+ * ->r_unsafe_callback is set? yes no
+ *
+ * first reply is OK (needed r_cb/r_completion, r_cb/r_completion,
+ * any or needed/got safe) r_safe_completion r_safe_completion
+ *
+ * first reply is unsafe r_unsafe_cb(true) (nothing)
+ *
+ * when we get the safe reply r_unsafe_cb(false), r_cb/r_completion,
+ * r_safe_completion r_safe_completion
+ */
+static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
+{
+ struct ceph_osd_client *osdc = osd->o_osdc;
+ struct ceph_osd_request *req;
+ struct MOSDOpReply m;
+ u64 tid = le64_to_cpu(msg->hdr.tid);
+ u32 data_len = 0;
+ bool already_acked;
+ int ret;
+ int i;
- /* in case this is a write and we need to replay, */
- req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch);
- req->r_reassert_version.version = cpu_to_le64(reassert_version);
+ dout("%s msg %p tid %llu\n", __func__, msg, tid);
- req->r_got_reply = 1;
- } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
- dout("handle_reply tid %llu dup ack\n", tid);
- goto out_unlock;
+ down_read(&osdc->lock);
+ if (!osd_registered(osd)) {
+ dout("%s osd%d unknown\n", __func__, osd->o_osd);
+ goto out_unlock_osdc;
}
+ WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
- dout("handle_reply tid %llu flags %d\n", tid, flags);
+ mutex_lock(&osd->lock);
+ req = lookup_request(&osd->o_requests, tid);
+ if (!req) {
+ dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
+ goto out_unlock_session;
+ }
- if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK))
- __register_linger_request(osdc, req);
+ ret = decode_MOSDOpReply(msg, &m);
+ if (ret) {
+ pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
+ req->r_tid, ret);
+ ceph_msg_dump(msg);
+ goto fail_request;
+ }
+ dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
+ __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
+ m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
+ le64_to_cpu(m.replay_version.version), m.user_version);
+
+ if (m.retry_attempt >= 0) {
+ if (m.retry_attempt != req->r_attempts - 1) {
+ dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
+ req, req->r_tid, m.retry_attempt,
+ req->r_attempts - 1);
+ goto out_unlock_session;
+ }
+ } else {
+ WARN_ON(1); /* MOSDOpReply v4 is assumed */
+ }
- /* either this is a read, or we got the safe response */
- if (result < 0 ||
- (flags & CEPH_OSD_FLAG_ONDISK) ||
- ((flags & CEPH_OSD_FLAG_WRITE) == 0))
- __unregister_request(osdc, req);
+ if (!ceph_oloc_empty(&m.redirect.oloc)) {
+ dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
+ m.redirect.oloc.pool);
+ unlink_request(osd, req);
+ mutex_unlock(&osd->lock);
+
+ ceph_oloc_copy(&req->r_t.target_oloc, &m.redirect.oloc);
+ req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
+ req->r_tid = 0;
+ __submit_request(req, false);
+ goto out_unlock_osdc;
+ }
- mutex_unlock(&osdc->request_mutex);
- up_read(&osdc->map_sem);
+ if (m.num_ops != req->r_num_ops) {
+ pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
+ req->r_num_ops, req->r_tid);
+ goto fail_request;
+ }
+ for (i = 0; i < req->r_num_ops; i++) {
+ dout(" req %p tid %llu op %d rval %d len %u\n", req,
+ req->r_tid, i, m.rval[i], m.outdata_len[i]);
+ req->r_ops[i].rval = m.rval[i];
+ req->r_ops[i].outdata_len = m.outdata_len[i];
+ data_len += m.outdata_len[i];
+ }
+ if (data_len != le32_to_cpu(msg->hdr.data_len)) {
+ pr_err("sum of lens %u != %u for tid %llu\n", data_len,
+ le32_to_cpu(msg->hdr.data_len), req->r_tid);
+ goto fail_request;
+ }
+ dout("%s req %p tid %llu acked %d result %d data_len %u\n", __func__,
+ req, req->r_tid, req->r_got_reply, m.result, data_len);
+
+ already_acked = req->r_got_reply;
+ if (!already_acked) {
+ req->r_result = m.result ?: data_len;
+ req->r_replay_version = m.replay_version; /* struct */
+ req->r_got_reply = true;
+ } else if (!(m.flags & CEPH_OSD_FLAG_ONDISK)) {
+ dout("req %p tid %llu dup ack\n", req, req->r_tid);
+ goto out_unlock_session;
+ }
- if (!already_completed) {
- if (req->r_unsafe_callback &&
- result >= 0 && !(flags & CEPH_OSD_FLAG_ONDISK))
- req->r_unsafe_callback(req, true);
- if (req->r_callback)
- req->r_callback(req, msg);
- else
- complete_all(&req->r_completion);
+ if (done_request(req, &m)) {
+ __finish_request(req);
+ if (req->r_linger) {
+ WARN_ON(req->r_unsafe_callback);
+ dout("req %p tid %llu cb (locked)\n", req, req->r_tid);
+ __complete_request(req);
+ }
}
- if (flags & CEPH_OSD_FLAG_ONDISK) {
- if (req->r_unsafe_callback && already_completed)
+ mutex_unlock(&osd->lock);
+ up_read(&osdc->lock);
+
+ if (done_request(req, &m)) {
+ if (already_acked && req->r_unsafe_callback) {
+ dout("req %p tid %llu safe-cb\n", req, req->r_tid);
req->r_unsafe_callback(req, false);
- complete_request(req);
+ } else if (!req->r_linger) {
+ dout("req %p tid %llu cb\n", req, req->r_tid);
+ __complete_request(req);
+ }
+ } else {
+ if (req->r_unsafe_callback) {
+ dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
+ req->r_unsafe_callback(req, true);
+ } else {
+ WARN_ON(1);
+ }
}
+ if (m.flags & CEPH_OSD_FLAG_ONDISK)
+ complete_all(&req->r_safe_completion);
-out:
- dout("req=%p req->r_linger=%d\n", req, req->r_linger);
ceph_osdc_put_request(req);
return;
-out_unlock:
- mutex_unlock(&osdc->request_mutex);
- up_read(&osdc->map_sem);
- goto out;
-bad_put:
- req->r_result = -EIO;
- __unregister_request(osdc, req);
- if (req->r_callback)
- req->r_callback(req, msg);
- else
- complete_all(&req->r_completion);
- complete_request(req);
- ceph_osdc_put_request(req);
-bad_mutex:
- mutex_unlock(&osdc->request_mutex);
- up_read(&osdc->map_sem);
-bad:
- pr_err("corrupt osd_op_reply got %d %d\n",
- (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
- ceph_msg_dump(msg);
+fail_request:
+ complete_request(req, -EIO);
+out_unlock_session:
+ mutex_unlock(&osd->lock);
+out_unlock_osdc:
+ up_read(&osdc->lock);
}
-static void reset_changed_osds(struct ceph_osd_client *osdc)
+static void set_pool_was_full(struct ceph_osd_client *osdc)
{
- struct rb_node *p, *n;
+ struct rb_node *n;
- dout("%s %p\n", __func__, osdc);
- for (p = rb_first(&osdc->osds); p; p = n) {
- struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
+ for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
+ struct ceph_pg_pool_info *pi =
+ rb_entry(n, struct ceph_pg_pool_info, node);
- n = rb_next(p);
- if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
- memcmp(&osd->o_con.peer_addr,
- ceph_osd_addr(osdc->osdmap,
- osd->o_osd),
- sizeof(struct ceph_entity_addr)) != 0)
- __reset_osd(osdc, osd);
+ pi->was_full = __pool_full(pi);
}
}
+static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
+{
+ struct ceph_pg_pool_info *pi;
+
+ pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
+ if (!pi)
+ return false;
+
+ return pi->was_full && !__pool_full(pi);
+}
+
+static enum calc_target_result
+recalc_linger_target(struct ceph_osd_linger_request *lreq)
+{
+ struct ceph_osd_client *osdc = lreq->osdc;
+ enum calc_target_result ct_res;
+
+ ct_res = calc_target(osdc, &lreq->t, &lreq->last_force_resend, true);
+ if (ct_res == CALC_TARGET_NEED_RESEND) {
+ struct ceph_osd *osd;
+
+ osd = lookup_create_osd(osdc, lreq->t.osd, true);
+ if (osd != lreq->osd) {
+ unlink_linger(lreq->osd, lreq);
+ link_linger(osd, lreq);
+ }
+ }
+
+ return ct_res;
+}
+
/*
- * Requeue requests whose mapping to an OSD has changed. If requests map to
- * no osd, request a new map.
- *
- * Caller should hold map_sem for read.
+ * Requeue requests whose mapping to an OSD has changed.
*/
-static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
- bool force_resend_writes)
+static void scan_requests(struct ceph_osd *osd,
+ bool force_resend,
+ bool cleared_full,
+ bool check_pool_cleared_full,
+ struct rb_root *need_resend,
+ struct list_head *need_resend_linger)
{
- struct ceph_osd_request *req, *nreq;
- struct rb_node *p;
- int needmap = 0;
- int err;
- bool force_resend_req;
+ struct ceph_osd_client *osdc = osd->o_osdc;
+ struct rb_node *n;
+ bool force_resend_writes;
+
+ for (n = rb_first(&osd->o_linger_requests); n; ) {
+ struct ceph_osd_linger_request *lreq =
+ rb_entry(n, struct ceph_osd_linger_request, node);
+ enum calc_target_result ct_res;
+
+ n = rb_next(n); /* recalc_linger_target() */
+
+ dout("%s lreq %p linger_id %llu\n", __func__, lreq,
+ lreq->linger_id);
+ ct_res = recalc_linger_target(lreq);
+ switch (ct_res) {
+ case CALC_TARGET_NO_ACTION:
+ force_resend_writes = cleared_full ||
+ (check_pool_cleared_full &&
+ pool_cleared_full(osdc, lreq->t.base_oloc.pool));
+ if (!force_resend && !force_resend_writes)
+ break;
+
+ /* fall through */
+ case CALC_TARGET_NEED_RESEND:
+ cancel_linger_map_check(lreq);
+ /*
+ * scan_requests() for the previous epoch(s)
+ * may have already added it to the list, since
+ * it's not unlinked here.
+ */
+ if (list_empty(&lreq->scan_item))
+ list_add_tail(&lreq->scan_item, need_resend_linger);
+ break;
+ case CALC_TARGET_POOL_DNE:
+ check_linger_pool_dne(lreq);
+ break;
+ }
+ }
- dout("kick_requests %s %s\n", force_resend ? " (force resend)" : "",
- force_resend_writes ? " (force resend writes)" : "");
- mutex_lock(&osdc->request_mutex);
- for (p = rb_first(&osdc->requests); p; ) {
- req = rb_entry(p, struct ceph_osd_request, r_node);
- p = rb_next(p);
+ for (n = rb_first(&osd->o_requests); n; ) {
+ struct ceph_osd_request *req =
+ rb_entry(n, struct ceph_osd_request, r_node);
+ enum calc_target_result ct_res;
+
+ n = rb_next(n); /* unlink_request(), check_pool_dne() */
+
+ dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
+ ct_res = calc_target(osdc, &req->r_t,
+ &req->r_last_force_resend, false);
+ switch (ct_res) {
+ case CALC_TARGET_NO_ACTION:
+ force_resend_writes = cleared_full ||
+ (check_pool_cleared_full &&
+ pool_cleared_full(osdc, req->r_t.base_oloc.pool));
+ if (!force_resend &&
+ (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
+ !force_resend_writes))
+ break;
+
+ /* fall through */
+ case CALC_TARGET_NEED_RESEND:
+ cancel_map_check(req);
+ unlink_request(osd, req);
+ insert_request(need_resend, req);
+ break;
+ case CALC_TARGET_POOL_DNE:
+ check_pool_dne(req);
+ break;
+ }
+ }
+}
+static int handle_one_map(struct ceph_osd_client *osdc,
+ void *p, void *end, bool incremental,
+ struct rb_root *need_resend,
+ struct list_head *need_resend_linger)
+{
+ struct ceph_osdmap *newmap;
+ struct rb_node *n;
+ bool skipped_map = false;
+ bool was_full;
+
+ was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+ set_pool_was_full(osdc);
+
+ if (incremental)
+ newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
+ else
+ newmap = ceph_osdmap_decode(&p, end);
+ if (IS_ERR(newmap))
+ return PTR_ERR(newmap);
+
+ if (newmap != osdc->osdmap) {
/*
- * For linger requests that have not yet been
- * registered, move them to the linger list; they'll
- * be sent to the osd in the loop below. Unregister
- * the request before re-registering it as a linger
- * request to ensure the __map_request() below
- * will decide it needs to be sent.
+ * Preserve ->was_full before destroying the old map.
+ * For pools that weren't in the old map, ->was_full
+ * should be false.
*/
- if (req->r_linger && list_empty(&req->r_linger_item)) {
- dout("%p tid %llu restart on osd%d\n",
- req, req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1);
- ceph_osdc_get_request(req);
- __unregister_request(osdc, req);
- __register_linger_request(osdc, req);
- ceph_osdc_put_request(req);
- continue;
+ for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
+ struct ceph_pg_pool_info *pi =
+ rb_entry(n, struct ceph_pg_pool_info, node);
+ struct ceph_pg_pool_info *old_pi;
+
+ old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
+ if (old_pi)
+ pi->was_full = old_pi->was_full;
+ else
+ WARN_ON(pi->was_full);
}
- force_resend_req = force_resend ||
- (force_resend_writes &&
- req->r_flags & CEPH_OSD_FLAG_WRITE);
- err = __map_request(osdc, req, force_resend_req);
- if (err < 0)
- continue; /* error */
- if (req->r_osd == NULL) {
- dout("%p tid %llu maps to no osd\n", req, req->r_tid);
- needmap++; /* request a newer map */
- } else if (err > 0) {
- if (!req->r_linger) {
- dout("%p tid %llu requeued on osd%d\n", req,
- req->r_tid,
- req->r_osd ? req->r_osd->o_osd : -1);
- req->r_flags |= CEPH_OSD_FLAG_RETRY;
- }
+ if (osdc->osdmap->epoch &&
+ osdc->osdmap->epoch + 1 < newmap->epoch) {
+ WARN_ON(incremental);
+ skipped_map = true;
}
+
+ ceph_osdmap_destroy(osdc->osdmap);
+ osdc->osdmap = newmap;
}
- list_for_each_entry_safe(req, nreq, &osdc->req_linger,
- r_linger_item) {
- dout("linger req=%p req->r_osd=%p\n", req, req->r_osd);
-
- err = __map_request(osdc, req,
- force_resend || force_resend_writes);
- dout("__map_request returned %d\n", err);
- if (err < 0)
- continue; /* hrm! */
- if (req->r_osd == NULL || err > 0) {
- if (req->r_osd == NULL) {
- dout("lingering %p tid %llu maps to no osd\n",
- req, req->r_tid);
- /*
- * A homeless lingering request makes
- * no sense, as it's job is to keep
- * a particular OSD connection open.
- * Request a newer map and kick the
- * request, knowing that it won't be
- * resent until we actually get a map
- * that can tell us where to send it.
- */
- needmap++;
- }
+ was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+ scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
+ need_resend, need_resend_linger);
+
+ for (n = rb_first(&osdc->osds); n; ) {
+ struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
+
+ n = rb_next(n); /* close_osd() */
+
+ scan_requests(osd, skipped_map, was_full, true, need_resend,
+ need_resend_linger);
+ if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
+ memcmp(&osd->o_con.peer_addr,
+ ceph_osd_addr(osdc->osdmap, osd->o_osd),
+ sizeof(struct ceph_entity_addr)))
+ close_osd(osd);
+ }
- dout("kicking lingering %p tid %llu osd%d\n", req,
- req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
- __register_request(osdc, req);
- __unregister_linger_request(osdc, req);
+ return 0;
+}
+
+static void kick_requests(struct ceph_osd_client *osdc,
+ struct rb_root *need_resend,
+ struct list_head *need_resend_linger)
+{
+ struct ceph_osd_linger_request *lreq, *nlreq;
+ struct rb_node *n;
+
+ for (n = rb_first(need_resend); n; ) {
+ struct ceph_osd_request *req =
+ rb_entry(n, struct ceph_osd_request, r_node);
+ struct ceph_osd *osd;
+
+ n = rb_next(n);
+ erase_request(need_resend, req); /* before link_request() */
+
+ WARN_ON(req->r_osd);
+ calc_target(osdc, &req->r_t, NULL, false);
+ osd = lookup_create_osd(osdc, req->r_t.osd, true);
+ link_request(osd, req);
+ if (!req->r_linger) {
+ if (!osd_homeless(osd) && !req->r_t.paused)
+ send_request(req);
+ } else {
+ cancel_linger_request(req);
}
}
- reset_changed_osds(osdc);
- mutex_unlock(&osdc->request_mutex);
- if (needmap) {
- dout("%d requests for down osds, need new map\n", needmap);
- ceph_monc_request_next_osdmap(&osdc->client->monc);
+ list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
+ if (!osd_homeless(lreq->osd))
+ send_linger(lreq);
+
+ list_del_init(&lreq->scan_item);
}
}
-
/*
* Process updated osd map.
*
@@ -2115,27 +3153,31 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
*/
void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
{
- void *p, *end, *next;
+ void *p = msg->front.iov_base;
+ void *const end = p + msg->front.iov_len;
u32 nr_maps, maplen;
u32 epoch;
- struct ceph_osdmap *newmap = NULL, *oldmap;
- int err;
struct ceph_fsid fsid;
- bool was_full;
+ struct rb_root need_resend = RB_ROOT;
+ LIST_HEAD(need_resend_linger);
+ bool handled_incremental = false;
+ bool was_pauserd, was_pausewr;
+ bool pauserd, pausewr;
+ int err;
- dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
- p = msg->front.iov_base;
- end = p + msg->front.iov_len;
+ dout("%s have %u\n", __func__, osdc->osdmap->epoch);
+ down_write(&osdc->lock);
/* verify fsid */
ceph_decode_need(&p, end, sizeof(fsid), bad);
ceph_decode_copy(&p, &fsid, sizeof(fsid));
if (ceph_check_fsid(osdc->client, &fsid) < 0)
- return;
+ goto bad;
- down_write(&osdc->map_sem);
-
- was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
+ was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
+ was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
+ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+ have_pool_full(osdc);
/* incremental maps */
ceph_decode_32_safe(&p, end, nr_maps, bad);
@@ -2145,34 +3187,23 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
epoch = ceph_decode_32(&p);
maplen = ceph_decode_32(&p);
ceph_decode_need(&p, end, maplen, bad);
- next = p + maplen;
- if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
+ if (osdc->osdmap->epoch &&
+ osdc->osdmap->epoch + 1 == epoch) {
dout("applying incremental map %u len %d\n",
epoch, maplen);
- newmap = osdmap_apply_incremental(&p, next,
- osdc->osdmap,
- &osdc->client->msgr);
- if (IS_ERR(newmap)) {
- err = PTR_ERR(newmap);
+ err = handle_one_map(osdc, p, p + maplen, true,
+ &need_resend, &need_resend_linger);
+ if (err)
goto bad;
- }
- BUG_ON(!newmap);
- if (newmap != osdc->osdmap) {
- ceph_osdmap_destroy(osdc->osdmap);
- osdc->osdmap = newmap;
- }
- was_full = was_full ||
- ceph_osdmap_flag(osdc->osdmap,
- CEPH_OSDMAP_FULL);
- kick_requests(osdc, 0, was_full);
+ handled_incremental = true;
} else {
dout("ignoring incremental map %u len %d\n",
epoch, maplen);
}
- p = next;
+ p += maplen;
nr_maps--;
}
- if (newmap)
+ if (handled_incremental)
goto done;
/* full maps */
@@ -2186,455 +3217,647 @@ void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
if (nr_maps > 1) {
dout("skipping non-latest full map %u len %d\n",
epoch, maplen);
- } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
+ } else if (osdc->osdmap->epoch >= epoch) {
dout("skipping full map %u len %d, "
"older than our %u\n", epoch, maplen,
osdc->osdmap->epoch);
} else {
- int skipped_map = 0;
-
dout("taking full map %u len %d\n", epoch, maplen);
- newmap = ceph_osdmap_decode(&p, p+maplen);
- if (IS_ERR(newmap)) {
- err = PTR_ERR(newmap);
+ err = handle_one_map(osdc, p, p + maplen, false,
+ &need_resend, &need_resend_linger);
+ if (err)
goto bad;
- }
- BUG_ON(!newmap);
- oldmap = osdc->osdmap;
- osdc->osdmap = newmap;
- if (oldmap) {
- if (oldmap->epoch + 1 < newmap->epoch)
- skipped_map = 1;
- ceph_osdmap_destroy(oldmap);
- }
- was_full = was_full ||
- ceph_osdmap_flag(osdc->osdmap,
- CEPH_OSDMAP_FULL);
- kick_requests(osdc, skipped_map, was_full);
}
p += maplen;
nr_maps--;
}
- if (!osdc->osdmap)
- goto bad;
done:
- downgrade_write(&osdc->map_sem);
- ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
- osdc->osdmap->epoch);
-
/*
* subscribe to subsequent osdmap updates if full to ensure
* we find out when we are no longer full and stop returning
* ENOSPC.
*/
- if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
- ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
- ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR))
- ceph_monc_request_next_osdmap(&osdc->client->monc);
-
- mutex_lock(&osdc->request_mutex);
- __send_queued(osdc);
- mutex_unlock(&osdc->request_mutex);
- up_read(&osdc->map_sem);
+ pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
+ pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
+ ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
+ have_pool_full(osdc);
+ if (was_pauserd || was_pausewr || pauserd || pausewr)
+ maybe_request_map(osdc);
+
+ kick_requests(osdc, &need_resend, &need_resend_linger);
+
+ ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
+ osdc->osdmap->epoch);
+ up_write(&osdc->lock);
wake_up_all(&osdc->client->auth_wq);
return;
bad:
pr_err("osdc handle_map corrupt msg\n");
ceph_msg_dump(msg);
- up_write(&osdc->map_sem);
+ up_write(&osdc->lock);
}
/*
- * watch/notify callback event infrastructure
- *
- * These callbacks are used both for watch and notify operations.
+ * Resubmit requests pending on the given osd.
*/
-static void __release_event(struct kref *kref)
+static void kick_osd_requests(struct ceph_osd *osd)
{
- struct ceph_osd_event *event =
- container_of(kref, struct ceph_osd_event, kref);
+ struct rb_node *n;
- dout("__release_event %p\n", event);
- kfree(event);
-}
+ for (n = rb_first(&osd->o_requests); n; ) {
+ struct ceph_osd_request *req =
+ rb_entry(n, struct ceph_osd_request, r_node);
-static void get_event(struct ceph_osd_event *event)
-{
- kref_get(&event->kref);
-}
+ n = rb_next(n); /* cancel_linger_request() */
-void ceph_osdc_put_event(struct ceph_osd_event *event)
-{
- kref_put(&event->kref, __release_event);
+ if (!req->r_linger) {
+ if (!req->r_t.paused)
+ send_request(req);
+ } else {
+ cancel_linger_request(req);
+ }
+ }
+ for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
+ struct ceph_osd_linger_request *lreq =
+ rb_entry(n, struct ceph_osd_linger_request, node);
+
+ send_linger(lreq);
+ }
}
-EXPORT_SYMBOL(ceph_osdc_put_event);
-static void __insert_event(struct ceph_osd_client *osdc,
- struct ceph_osd_event *new)
+/*
+ * If the osd connection drops, we need to resubmit all requests.
+ */
+static void osd_fault(struct ceph_connection *con)
{
- struct rb_node **p = &osdc->event_tree.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_osd_event *event = NULL;
+ struct ceph_osd *osd = con->private;
+ struct ceph_osd_client *osdc = osd->o_osdc;
- while (*p) {
- parent = *p;
- event = rb_entry(parent, struct ceph_osd_event, node);
- if (new->cookie < event->cookie)
- p = &(*p)->rb_left;
- else if (new->cookie > event->cookie)
- p = &(*p)->rb_right;
- else
- BUG();
+ dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
+
+ down_write(&osdc->lock);
+ if (!osd_registered(osd)) {
+ dout("%s osd%d unknown\n", __func__, osd->o_osd);
+ goto out_unlock;
}
- rb_link_node(&new->node, parent, p);
- rb_insert_color(&new->node, &osdc->event_tree);
+ if (!reopen_osd(osd))
+ kick_osd_requests(osd);
+ maybe_request_map(osdc);
+
+out_unlock:
+ up_write(&osdc->lock);
}
-static struct ceph_osd_event *__find_event(struct ceph_osd_client *osdc,
- u64 cookie)
+/*
+ * Process osd watch notifications
+ */
+static void handle_watch_notify(struct ceph_osd_client *osdc,
+ struct ceph_msg *msg)
{
- struct rb_node **p = &osdc->event_tree.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_osd_event *event = NULL;
+ void *p = msg->front.iov_base;
+ void *const end = p + msg->front.iov_len;
+ struct ceph_osd_linger_request *lreq;
+ struct linger_work *lwork;
+ u8 proto_ver, opcode;
+ u64 cookie, notify_id;
+ u64 notifier_id = 0;
+ s32 return_code = 0;
+ void *payload = NULL;
+ u32 payload_len = 0;
- while (*p) {
- parent = *p;
- event = rb_entry(parent, struct ceph_osd_event, node);
- if (cookie < event->cookie)
- p = &(*p)->rb_left;
- else if (cookie > event->cookie)
- p = &(*p)->rb_right;
- else
- return event;
+ ceph_decode_8_safe(&p, end, proto_ver, bad);
+ ceph_decode_8_safe(&p, end, opcode, bad);
+ ceph_decode_64_safe(&p, end, cookie, bad);
+ p += 8; /* skip ver */
+ ceph_decode_64_safe(&p, end, notify_id, bad);
+
+ if (proto_ver >= 1) {
+ ceph_decode_32_safe(&p, end, payload_len, bad);
+ ceph_decode_need(&p, end, payload_len, bad);
+ payload = p;
+ p += payload_len;
}
- return NULL;
-}
-static void __remove_event(struct ceph_osd_event *event)
-{
- struct ceph_osd_client *osdc = event->osdc;
+ if (le16_to_cpu(msg->hdr.version) >= 2)
+ ceph_decode_32_safe(&p, end, return_code, bad);
- if (!RB_EMPTY_NODE(&event->node)) {
- dout("__remove_event removed %p\n", event);
- rb_erase(&event->node, &osdc->event_tree);
- ceph_osdc_put_event(event);
+ if (le16_to_cpu(msg->hdr.version) >= 3)
+ ceph_decode_64_safe(&p, end, notifier_id, bad);
+
+ down_read(&osdc->lock);
+ lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
+ if (!lreq) {
+ dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
+ cookie);
+ goto out_unlock_osdc;
+ }
+
+ mutex_lock(&lreq->lock);
+ dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
+ opcode, cookie, lreq, lreq->is_watch);
+ if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
+ if (!lreq->last_error) {
+ lreq->last_error = -ENOTCONN;
+ queue_watch_error(lreq);
+ }
+ } else if (!lreq->is_watch) {
+ /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
+ if (lreq->notify_id && lreq->notify_id != notify_id) {
+ dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
+ lreq->notify_id, notify_id);
+ } else if (!completion_done(&lreq->notify_finish_wait)) {
+ struct ceph_msg_data *data =
+ list_first_entry_or_null(&msg->data,
+ struct ceph_msg_data,
+ links);
+
+ if (data) {
+ if (lreq->preply_pages) {
+ WARN_ON(data->type !=
+ CEPH_MSG_DATA_PAGES);
+ *lreq->preply_pages = data->pages;
+ *lreq->preply_len = data->length;
+ } else {
+ ceph_release_page_vector(data->pages,
+ calc_pages_for(0, data->length));
+ }
+ }
+ lreq->notify_finish_error = return_code;
+ complete_all(&lreq->notify_finish_wait);
+ }
} else {
- dout("__remove_event didn't remove %p\n", event);
+ /* CEPH_WATCH_EVENT_NOTIFY */
+ lwork = lwork_alloc(lreq, do_watch_notify);
+ if (!lwork) {
+ pr_err("failed to allocate notify-lwork\n");
+ goto out_unlock_lreq;
+ }
+
+ lwork->notify.notify_id = notify_id;
+ lwork->notify.notifier_id = notifier_id;
+ lwork->notify.payload = payload;
+ lwork->notify.payload_len = payload_len;
+ lwork->notify.msg = ceph_msg_get(msg);
+ lwork_queue(lwork);
}
+
+out_unlock_lreq:
+ mutex_unlock(&lreq->lock);
+out_unlock_osdc:
+ up_read(&osdc->lock);
+ return;
+
+bad:
+ pr_err("osdc handle_watch_notify corrupt msg\n");
}
-int ceph_osdc_create_event(struct ceph_osd_client *osdc,
- void (*event_cb)(u64, u64, u8, void *),
- void *data, struct ceph_osd_event **pevent)
+/*
+ * Register request, send initial attempt.
+ */
+int ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req,
+ bool nofail)
{
- struct ceph_osd_event *event;
-
- event = kmalloc(sizeof(*event), GFP_NOIO);
- if (!event)
- return -ENOMEM;
+ down_read(&osdc->lock);
+ submit_request(req, false);
+ up_read(&osdc->lock);
- dout("create_event %p\n", event);
- event->cb = event_cb;
- event->one_shot = 0;
- event->data = data;
- event->osdc = osdc;
- INIT_LIST_HEAD(&event->osd_node);
- RB_CLEAR_NODE(&event->node);
- kref_init(&event->kref); /* one ref for us */
- kref_get(&event->kref); /* one ref for the caller */
-
- spin_lock(&osdc->event_lock);
- event->cookie = ++osdc->event_count;
- __insert_event(osdc, event);
- spin_unlock(&osdc->event_lock);
-
- *pevent = event;
return 0;
}
-EXPORT_SYMBOL(ceph_osdc_create_event);
+EXPORT_SYMBOL(ceph_osdc_start_request);
-void ceph_osdc_cancel_event(struct ceph_osd_event *event)
+/*
+ * Unregister a registered request. The request is not completed (i.e.
+ * no callbacks or wakeups) - higher layers are supposed to know what
+ * they are canceling.
+ */
+void ceph_osdc_cancel_request(struct ceph_osd_request *req)
{
- struct ceph_osd_client *osdc = event->osdc;
+ struct ceph_osd_client *osdc = req->r_osdc;
- dout("cancel_event %p\n", event);
- spin_lock(&osdc->event_lock);
- __remove_event(event);
- spin_unlock(&osdc->event_lock);
- ceph_osdc_put_event(event); /* caller's */
+ down_write(&osdc->lock);
+ if (req->r_osd)
+ cancel_request(req);
+ up_write(&osdc->lock);
}
-EXPORT_SYMBOL(ceph_osdc_cancel_event);
-
+EXPORT_SYMBOL(ceph_osdc_cancel_request);
-static void do_event_work(struct work_struct *work)
+/*
+ * @timeout: in jiffies, 0 means "wait forever"
+ */
+static int wait_request_timeout(struct ceph_osd_request *req,
+ unsigned long timeout)
{
- struct ceph_osd_event_work *event_work =
- container_of(work, struct ceph_osd_event_work, work);
- struct ceph_osd_event *event = event_work->event;
- u64 ver = event_work->ver;
- u64 notify_id = event_work->notify_id;
- u8 opcode = event_work->opcode;
+ long left;
- dout("do_event_work completing %p\n", event);
- event->cb(ver, notify_id, opcode, event->data);
- dout("do_event_work completed %p\n", event);
- ceph_osdc_put_event(event);
- kfree(event_work);
+ dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
+ left = wait_for_completion_killable_timeout(&req->r_completion,
+ ceph_timeout_jiffies(timeout));
+ if (left <= 0) {
+ left = left ?: -ETIMEDOUT;
+ ceph_osdc_cancel_request(req);
+
+ /* kludge - need to to wake ceph_osdc_sync() */
+ complete_all(&req->r_safe_completion);
+ } else {
+ left = req->r_result; /* completed */
+ }
+
+ return left;
}
+/*
+ * wait for a request to complete
+ */
+int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req)
+{
+ return wait_request_timeout(req, 0);
+}
+EXPORT_SYMBOL(ceph_osdc_wait_request);
/*
- * Process osd watch notifications
+ * sync - wait for all in-flight requests to flush. avoid starvation.
*/
-static void handle_watch_notify(struct ceph_osd_client *osdc,
- struct ceph_msg *msg)
+void ceph_osdc_sync(struct ceph_osd_client *osdc)
{
- void *p, *end;
- u8 proto_ver;
- u64 cookie, ver, notify_id;
- u8 opcode;
- struct ceph_osd_event *event;
- struct ceph_osd_event_work *event_work;
+ struct rb_node *n, *p;
+ u64 last_tid = atomic64_read(&osdc->last_tid);
- p = msg->front.iov_base;
- end = p + msg->front.iov_len;
+again:
+ down_read(&osdc->lock);
+ for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
+ struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
- ceph_decode_8_safe(&p, end, proto_ver, bad);
- ceph_decode_8_safe(&p, end, opcode, bad);
- ceph_decode_64_safe(&p, end, cookie, bad);
- ceph_decode_64_safe(&p, end, ver, bad);
- ceph_decode_64_safe(&p, end, notify_id, bad);
+ mutex_lock(&osd->lock);
+ for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
+ struct ceph_osd_request *req =
+ rb_entry(p, struct ceph_osd_request, r_node);
+
+ if (req->r_tid > last_tid)
+ break;
+
+ if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
+ continue;
- spin_lock(&osdc->event_lock);
- event = __find_event(osdc, cookie);
- if (event) {
- BUG_ON(event->one_shot);
- get_event(event);
- }
- spin_unlock(&osdc->event_lock);
- dout("handle_watch_notify cookie %lld ver %lld event %p\n",
- cookie, ver, event);
- if (event) {
- event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
- if (!event_work) {
- pr_err("couldn't allocate event_work\n");
- ceph_osdc_put_event(event);
- return;
+ ceph_osdc_get_request(req);
+ mutex_unlock(&osd->lock);
+ up_read(&osdc->lock);
+ dout("%s waiting on req %p tid %llu last_tid %llu\n",
+ __func__, req, req->r_tid, last_tid);
+ wait_for_completion(&req->r_safe_completion);
+ ceph_osdc_put_request(req);
+ goto again;
}
- INIT_WORK(&event_work->work, do_event_work);
- event_work->event = event;
- event_work->ver = ver;
- event_work->notify_id = notify_id;
- event_work->opcode = opcode;
- queue_work(osdc->notify_wq, &event_work->work);
+ mutex_unlock(&osd->lock);
}
- return;
+ up_read(&osdc->lock);
+ dout("%s done last_tid %llu\n", __func__, last_tid);
+}
+EXPORT_SYMBOL(ceph_osdc_sync);
-bad:
- pr_err("osdc handle_watch_notify corrupt msg\n");
+static struct ceph_osd_request *
+alloc_linger_request(struct ceph_osd_linger_request *lreq)
+{
+ struct ceph_osd_request *req;
+
+ req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
+ if (!req)
+ return NULL;
+
+ ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
+ ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
+
+ if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
+ ceph_osdc_put_request(req);
+ return NULL;
+ }
+
+ return req;
}
/*
- * build new request AND message
- *
+ * Returns a handle, caller owns a ref.
*/
-void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off,
- struct ceph_snap_context *snapc, u64 snap_id,
- struct timespec *mtime)
-{
- struct ceph_msg *msg = req->r_request;
- void *p;
- size_t msg_size;
- int flags = req->r_flags;
- u64 data_len;
- unsigned int i;
-
- req->r_snapid = snap_id;
- req->r_snapc = ceph_get_snap_context(snapc);
-
- /* encode request */
- msg->hdr.version = cpu_to_le16(4);
-
- p = msg->front.iov_base;
- ceph_encode_32(&p, 1); /* client_inc is always 1 */
- req->r_request_osdmap_epoch = p;
- p += 4;
- req->r_request_flags = p;
- p += 4;
- if (req->r_flags & CEPH_OSD_FLAG_WRITE)
- ceph_encode_timespec(p, mtime);
- p += sizeof(struct ceph_timespec);
- req->r_request_reassert_version = p;
- p += sizeof(struct ceph_eversion); /* will get filled in */
-
- /* oloc */
- ceph_encode_8(&p, 4);
- ceph_encode_8(&p, 4);
- ceph_encode_32(&p, 8 + 4 + 4);
- req->r_request_pool = p;
- p += 8;
- ceph_encode_32(&p, -1); /* preferred */
- ceph_encode_32(&p, 0); /* key len */
+struct ceph_osd_linger_request *
+ceph_osdc_watch(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ rados_watchcb2_t wcb,
+ rados_watcherrcb_t errcb,
+ void *data)
+{
+ struct ceph_osd_linger_request *lreq;
+ int ret;
- ceph_encode_8(&p, 1);
- req->r_request_pgid = p;
- p += 8 + 4;
- ceph_encode_32(&p, -1); /* preferred */
+ lreq = linger_alloc(osdc);
+ if (!lreq)
+ return ERR_PTR(-ENOMEM);
- /* oid */
- ceph_encode_32(&p, req->r_base_oid.name_len);
- memcpy(p, req->r_base_oid.name, req->r_base_oid.name_len);
- dout("oid '%.*s' len %d\n", req->r_base_oid.name_len,
- req->r_base_oid.name, req->r_base_oid.name_len);
- p += req->r_base_oid.name_len;
-
- /* ops--can imply data */
- ceph_encode_16(&p, (u16)req->r_num_ops);
- data_len = 0;
- for (i = 0; i < req->r_num_ops; i++) {
- data_len += osd_req_encode_op(req, p, i);
- p += sizeof(struct ceph_osd_op);
+ lreq->is_watch = true;
+ lreq->wcb = wcb;
+ lreq->errcb = errcb;
+ lreq->data = data;
+ lreq->watch_valid_thru = jiffies;
+
+ ceph_oid_copy(&lreq->t.base_oid, oid);
+ ceph_oloc_copy(&lreq->t.base_oloc, oloc);
+ lreq->t.flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
+ lreq->mtime = CURRENT_TIME;
+
+ lreq->reg_req = alloc_linger_request(lreq);
+ if (!lreq->reg_req) {
+ ret = -ENOMEM;
+ goto err_put_lreq;
}
- /* snaps */
- ceph_encode_64(&p, req->r_snapid);
- ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0);
- ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0);
- if (req->r_snapc) {
- for (i = 0; i < snapc->num_snaps; i++) {
- ceph_encode_64(&p, req->r_snapc->snaps[i]);
- }
+ lreq->ping_req = alloc_linger_request(lreq);
+ if (!lreq->ping_req) {
+ ret = -ENOMEM;
+ goto err_put_lreq;
}
- req->r_request_attempts = p;
- p += 4;
-
- /* data */
- if (flags & CEPH_OSD_FLAG_WRITE) {
- u16 data_off;
-
- /*
- * The header "data_off" is a hint to the receiver
- * allowing it to align received data into its
- * buffers such that there's no need to re-copy
- * it before writing it to disk (direct I/O).
- */
- data_off = (u16) (off & 0xffff);
- req->r_request->hdr.data_off = cpu_to_le16(data_off);
+ down_write(&osdc->lock);
+ linger_register(lreq); /* before osd_req_op_* */
+ osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
+ CEPH_OSD_WATCH_OP_WATCH);
+ osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
+ CEPH_OSD_WATCH_OP_PING);
+ linger_submit(lreq);
+ up_write(&osdc->lock);
+
+ ret = linger_reg_commit_wait(lreq);
+ if (ret) {
+ linger_cancel(lreq);
+ goto err_put_lreq;
}
- req->r_request->hdr.data_len = cpu_to_le32(data_len);
- BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
- msg_size = p - msg->front.iov_base;
- msg->front.iov_len = msg_size;
- msg->hdr.front_len = cpu_to_le32(msg_size);
+ return lreq;
- dout("build_request msg_size was %d\n", (int)msg_size);
+err_put_lreq:
+ linger_put(lreq);
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL(ceph_osdc_build_request);
+EXPORT_SYMBOL(ceph_osdc_watch);
/*
- * Register request, send initial attempt.
+ * Releases a ref.
+ *
+ * Times out after mount_timeout to preserve rbd unmap behaviour
+ * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
+ * with mount_timeout").
*/
-int ceph_osdc_start_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req,
- bool nofail)
+int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
+ struct ceph_osd_linger_request *lreq)
{
- int rc;
+ struct ceph_options *opts = osdc->client->options;
+ struct ceph_osd_request *req;
+ int ret;
- down_read(&osdc->map_sem);
- mutex_lock(&osdc->request_mutex);
+ req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
+ if (!req)
+ return -ENOMEM;
+
+ ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
+ ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
+ req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
+ req->r_mtime = CURRENT_TIME;
+ osd_req_op_watch_init(req, 0, lreq->linger_id,
+ CEPH_OSD_WATCH_OP_UNWATCH);
- rc = __ceph_osdc_start_request(osdc, req, nofail);
+ ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
+ if (ret)
+ goto out_put_req;
- mutex_unlock(&osdc->request_mutex);
- up_read(&osdc->map_sem);
+ ceph_osdc_start_request(osdc, req, false);
+ linger_cancel(lreq);
+ linger_put(lreq);
+ ret = wait_request_timeout(req, opts->mount_timeout);
- return rc;
+out_put_req:
+ ceph_osdc_put_request(req);
+ return ret;
}
-EXPORT_SYMBOL(ceph_osdc_start_request);
+EXPORT_SYMBOL(ceph_osdc_unwatch);
-/*
- * Unregister a registered request. The request is not completed (i.e.
- * no callbacks or wakeups) - higher layers are supposed to know what
- * they are canceling.
- */
-void ceph_osdc_cancel_request(struct ceph_osd_request *req)
+static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
+ u64 notify_id, u64 cookie, void *payload,
+ size_t payload_len)
{
- struct ceph_osd_client *osdc = req->r_osdc;
+ struct ceph_osd_req_op *op;
+ struct ceph_pagelist *pl;
+ int ret;
- mutex_lock(&osdc->request_mutex);
- if (req->r_linger)
- __unregister_linger_request(osdc, req);
- __unregister_request(osdc, req);
- mutex_unlock(&osdc->request_mutex);
+ op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
+
+ pl = kmalloc(sizeof(*pl), GFP_NOIO);
+ if (!pl)
+ return -ENOMEM;
+
+ ceph_pagelist_init(pl);
+ ret = ceph_pagelist_encode_64(pl, notify_id);
+ ret |= ceph_pagelist_encode_64(pl, cookie);
+ if (payload) {
+ ret |= ceph_pagelist_encode_32(pl, payload_len);
+ ret |= ceph_pagelist_append(pl, payload, payload_len);
+ } else {
+ ret |= ceph_pagelist_encode_32(pl, 0);
+ }
+ if (ret) {
+ ceph_pagelist_release(pl);
+ return -ENOMEM;
+ }
- dout("%s %p tid %llu canceled\n", __func__, req, req->r_tid);
+ ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
+ op->indata_len = pl->length;
+ return 0;
}
-EXPORT_SYMBOL(ceph_osdc_cancel_request);
-/*
- * wait for a request to complete
- */
-int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
- struct ceph_osd_request *req)
+int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ u64 notify_id,
+ u64 cookie,
+ void *payload,
+ size_t payload_len)
{
- int rc;
+ struct ceph_osd_request *req;
+ int ret;
- dout("%s %p tid %llu\n", __func__, req, req->r_tid);
+ req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
+ if (!req)
+ return -ENOMEM;
- rc = wait_for_completion_interruptible(&req->r_completion);
- if (rc < 0) {
- dout("%s %p tid %llu interrupted\n", __func__, req, req->r_tid);
- ceph_osdc_cancel_request(req);
- complete_request(req);
- return rc;
+ ceph_oid_copy(&req->r_base_oid, oid);
+ ceph_oloc_copy(&req->r_base_oloc, oloc);
+ req->r_flags = CEPH_OSD_FLAG_READ;
+
+ ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
+ if (ret)
+ goto out_put_req;
+
+ ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
+ payload_len);
+ if (ret)
+ goto out_put_req;
+
+ ceph_osdc_start_request(osdc, req, false);
+ ret = ceph_osdc_wait_request(osdc, req);
+
+out_put_req:
+ ceph_osdc_put_request(req);
+ return ret;
+}
+EXPORT_SYMBOL(ceph_osdc_notify_ack);
+
+static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
+ u64 cookie, u32 prot_ver, u32 timeout,
+ void *payload, size_t payload_len)
+{
+ struct ceph_osd_req_op *op;
+ struct ceph_pagelist *pl;
+ int ret;
+
+ op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
+ op->notify.cookie = cookie;
+
+ pl = kmalloc(sizeof(*pl), GFP_NOIO);
+ if (!pl)
+ return -ENOMEM;
+
+ ceph_pagelist_init(pl);
+ ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
+ ret |= ceph_pagelist_encode_32(pl, timeout);
+ ret |= ceph_pagelist_encode_32(pl, payload_len);
+ ret |= ceph_pagelist_append(pl, payload, payload_len);
+ if (ret) {
+ ceph_pagelist_release(pl);
+ return -ENOMEM;
}
- dout("%s %p tid %llu result %d\n", __func__, req, req->r_tid,
- req->r_result);
- return req->r_result;
+ ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
+ op->indata_len = pl->length;
+ return 0;
}
-EXPORT_SYMBOL(ceph_osdc_wait_request);
/*
- * sync - wait for all in-flight requests to flush. avoid starvation.
+ * @timeout: in seconds
+ *
+ * @preply_{pages,len} are initialized both on success and error.
+ * The caller is responsible for:
+ *
+ * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
*/
-void ceph_osdc_sync(struct ceph_osd_client *osdc)
+int ceph_osdc_notify(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ void *payload,
+ size_t payload_len,
+ u32 timeout,
+ struct page ***preply_pages,
+ size_t *preply_len)
{
- struct ceph_osd_request *req;
- u64 last_tid, next_tid = 0;
+ struct ceph_osd_linger_request *lreq;
+ struct page **pages;
+ int ret;
- mutex_lock(&osdc->request_mutex);
- last_tid = osdc->last_tid;
- while (1) {
- req = __lookup_request_ge(osdc, next_tid);
- if (!req)
- break;
- if (req->r_tid > last_tid)
- break;
+ WARN_ON(!timeout);
+ if (preply_pages) {
+ *preply_pages = NULL;
+ *preply_len = 0;
+ }
- next_tid = req->r_tid + 1;
- if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
- continue;
+ lreq = linger_alloc(osdc);
+ if (!lreq)
+ return -ENOMEM;
- ceph_osdc_get_request(req);
- mutex_unlock(&osdc->request_mutex);
- dout("sync waiting on tid %llu (last is %llu)\n",
- req->r_tid, last_tid);
- wait_for_completion(&req->r_safe_completion);
- mutex_lock(&osdc->request_mutex);
- ceph_osdc_put_request(req);
+ lreq->preply_pages = preply_pages;
+ lreq->preply_len = preply_len;
+
+ ceph_oid_copy(&lreq->t.base_oid, oid);
+ ceph_oloc_copy(&lreq->t.base_oloc, oloc);
+ lreq->t.flags = CEPH_OSD_FLAG_READ;
+
+ lreq->reg_req = alloc_linger_request(lreq);
+ if (!lreq->reg_req) {
+ ret = -ENOMEM;
+ goto out_put_lreq;
}
- mutex_unlock(&osdc->request_mutex);
- dout("sync done (thru tid %llu)\n", last_tid);
+
+ /* for notify_id */
+ pages = ceph_alloc_page_vector(1, GFP_NOIO);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out_put_lreq;
+ }
+
+ down_write(&osdc->lock);
+ linger_register(lreq); /* before osd_req_op_* */
+ ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
+ timeout, payload, payload_len);
+ if (ret) {
+ linger_unregister(lreq);
+ up_write(&osdc->lock);
+ ceph_release_page_vector(pages, 1);
+ goto out_put_lreq;
+ }
+ ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
+ response_data),
+ pages, PAGE_SIZE, 0, false, true);
+ linger_submit(lreq);
+ up_write(&osdc->lock);
+
+ ret = linger_reg_commit_wait(lreq);
+ if (!ret)
+ ret = linger_notify_finish_wait(lreq);
+ else
+ dout("lreq %p failed to initiate notify %d\n", lreq, ret);
+
+ linger_cancel(lreq);
+out_put_lreq:
+ linger_put(lreq);
+ return ret;
+}
+EXPORT_SYMBOL(ceph_osdc_notify);
+
+/*
+ * Return the number of milliseconds since the watch was last
+ * confirmed, or an error. If there is an error, the watch is no
+ * longer valid, and should be destroyed with ceph_osdc_unwatch().
+ */
+int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
+ struct ceph_osd_linger_request *lreq)
+{
+ unsigned long stamp, age;
+ int ret;
+
+ down_read(&osdc->lock);
+ mutex_lock(&lreq->lock);
+ stamp = lreq->watch_valid_thru;
+ if (!list_empty(&lreq->pending_lworks)) {
+ struct linger_work *lwork =
+ list_first_entry(&lreq->pending_lworks,
+ struct linger_work,
+ pending_item);
+
+ if (time_before(lwork->queued_stamp, stamp))
+ stamp = lwork->queued_stamp;
+ }
+ age = jiffies - stamp;
+ dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
+ lreq, lreq->linger_id, age, lreq->last_error);
+ /* we are truncating to msecs, so return a safe upper bound */
+ ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
+
+ mutex_unlock(&lreq->lock);
+ up_read(&osdc->lock);
+ return ret;
}
-EXPORT_SYMBOL(ceph_osdc_sync);
/*
* Call all pending notify callbacks - for use after a watch is
@@ -2646,6 +3869,13 @@ void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
}
EXPORT_SYMBOL(ceph_osdc_flush_notifies);
+void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
+{
+ down_read(&osdc->lock);
+ maybe_request_map(osdc);
+ up_read(&osdc->lock);
+}
+EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
/*
* init, shutdown
@@ -2656,43 +3886,35 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
dout("init\n");
osdc->client = client;
- osdc->osdmap = NULL;
- init_rwsem(&osdc->map_sem);
- init_completion(&osdc->map_waiters);
- osdc->last_requested_map = 0;
- mutex_init(&osdc->request_mutex);
- osdc->last_tid = 0;
+ init_rwsem(&osdc->lock);
osdc->osds = RB_ROOT;
INIT_LIST_HEAD(&osdc->osd_lru);
- osdc->requests = RB_ROOT;
- INIT_LIST_HEAD(&osdc->req_lru);
- INIT_LIST_HEAD(&osdc->req_unsent);
- INIT_LIST_HEAD(&osdc->req_notarget);
- INIT_LIST_HEAD(&osdc->req_linger);
- osdc->num_requests = 0;
+ spin_lock_init(&osdc->osd_lru_lock);
+ osd_init(&osdc->homeless_osd);
+ osdc->homeless_osd.o_osdc = osdc;
+ osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
+ osdc->linger_requests = RB_ROOT;
+ osdc->map_checks = RB_ROOT;
+ osdc->linger_map_checks = RB_ROOT;
INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
- spin_lock_init(&osdc->event_lock);
- osdc->event_tree = RB_ROOT;
- osdc->event_count = 0;
-
- schedule_delayed_work(&osdc->osds_timeout_work,
- round_jiffies_relative(osdc->client->options->osd_idle_ttl));
err = -ENOMEM;
+ osdc->osdmap = ceph_osdmap_alloc();
+ if (!osdc->osdmap)
+ goto out;
+
osdc->req_mempool = mempool_create_slab_pool(10,
ceph_osd_request_cache);
if (!osdc->req_mempool)
- goto out;
+ goto out_map;
err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
- OSD_OP_FRONT_LEN, 10, true,
- "osd_op");
+ PAGE_SIZE, 10, true, "osd_op");
if (err < 0)
goto out_mempool;
err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
- OSD_OPREPLY_FRONT_LEN, 10, true,
- "osd_op_reply");
+ PAGE_SIZE, 10, true, "osd_op_reply");
if (err < 0)
goto out_msgpool;
@@ -2701,6 +3923,11 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
if (!osdc->notify_wq)
goto out_msgpool_reply;
+ schedule_delayed_work(&osdc->timeout_work,
+ osdc->client->options->osd_keepalive_timeout);
+ schedule_delayed_work(&osdc->osds_timeout_work,
+ round_jiffies_relative(osdc->client->options->osd_idle_ttl));
+
return 0;
out_msgpool_reply:
@@ -2709,6 +3936,8 @@ out_msgpool:
ceph_msgpool_destroy(&osdc->msgpool_op);
out_mempool:
mempool_destroy(osdc->req_mempool);
+out_map:
+ ceph_osdmap_destroy(osdc->osdmap);
out:
return err;
}
@@ -2719,11 +3948,25 @@ void ceph_osdc_stop(struct ceph_osd_client *osdc)
destroy_workqueue(osdc->notify_wq);
cancel_delayed_work_sync(&osdc->timeout_work);
cancel_delayed_work_sync(&osdc->osds_timeout_work);
- if (osdc->osdmap) {
- ceph_osdmap_destroy(osdc->osdmap);
- osdc->osdmap = NULL;
+
+ down_write(&osdc->lock);
+ while (!RB_EMPTY_ROOT(&osdc->osds)) {
+ struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
+ struct ceph_osd, o_node);
+ close_osd(osd);
}
- remove_all_osds(osdc);
+ up_write(&osdc->lock);
+ WARN_ON(atomic_read(&osdc->homeless_osd.o_ref) != 1);
+ osd_cleanup(&osdc->homeless_osd);
+
+ WARN_ON(!list_empty(&osdc->osd_lru));
+ WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
+ WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
+ WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
+ WARN_ON(atomic_read(&osdc->num_requests));
+ WARN_ON(atomic_read(&osdc->num_homeless));
+
+ ceph_osdmap_destroy(osdc->osdmap);
mempool_destroy(osdc->req_mempool);
ceph_msgpool_destroy(&osdc->msgpool_op);
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
@@ -2752,15 +3995,12 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
return PTR_ERR(req);
/* it may be a short read due to an object boundary */
-
osd_req_op_extent_osd_data_pages(req, 0,
pages, *plen, page_align, false, false);
dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
off, *plen, *plen, page_align);
- ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
-
rc = ceph_osdc_start_request(osdc, req, false);
if (!rc)
rc = ceph_osdc_wait_request(osdc, req);
@@ -2786,7 +4026,6 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
int rc = 0;
int page_align = off & ~PAGE_MASK;
- BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */
req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
CEPH_OSD_OP_WRITE,
CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
@@ -2800,8 +4039,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
false, false);
dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
- ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime);
-
+ req->r_mtime = *mtime;
rc = ceph_osdc_start_request(osdc, req, true);
if (!rc)
rc = ceph_osdc_wait_request(osdc, req);
@@ -2841,19 +4079,15 @@ EXPORT_SYMBOL(ceph_osdc_cleanup);
static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
{
struct ceph_osd *osd = con->private;
- struct ceph_osd_client *osdc;
+ struct ceph_osd_client *osdc = osd->o_osdc;
int type = le16_to_cpu(msg->hdr.type);
- if (!osd)
- goto out;
- osdc = osd->o_osdc;
-
switch (type) {
case CEPH_MSG_OSD_MAP:
ceph_osdc_handle_map(osdc, msg);
break;
case CEPH_MSG_OSD_OPREPLY:
- handle_reply(osdc, msg);
+ handle_reply(osd, msg);
break;
case CEPH_MSG_WATCH_NOTIFY:
handle_watch_notify(osdc, msg);
@@ -2863,7 +4097,7 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
pr_err("received unknown message type %d %s\n", type,
ceph_msg_type_name(type));
}
-out:
+
ceph_msg_put(msg);
}
@@ -2878,21 +4112,27 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
{
struct ceph_osd *osd = con->private;
struct ceph_osd_client *osdc = osd->o_osdc;
- struct ceph_msg *m;
+ struct ceph_msg *m = NULL;
struct ceph_osd_request *req;
int front_len = le32_to_cpu(hdr->front_len);
int data_len = le32_to_cpu(hdr->data_len);
- u64 tid;
+ u64 tid = le64_to_cpu(hdr->tid);
- tid = le64_to_cpu(hdr->tid);
- mutex_lock(&osdc->request_mutex);
- req = __lookup_request(osdc, tid);
+ down_read(&osdc->lock);
+ if (!osd_registered(osd)) {
+ dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
+ *skip = 1;
+ goto out_unlock_osdc;
+ }
+ WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
+
+ mutex_lock(&osd->lock);
+ req = lookup_request(&osd->o_requests, tid);
if (!req) {
dout("%s osd%d tid %llu unknown, skipping\n", __func__,
osd->o_osd, tid);
- m = NULL;
*skip = 1;
- goto out;
+ goto out_unlock_session;
}
ceph_msg_revoke_incoming(req->r_reply);
@@ -2904,7 +4144,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
false);
if (!m)
- goto out;
+ goto out_unlock_session;
ceph_msg_put(req->r_reply);
req->r_reply = m;
}
@@ -2915,14 +4155,49 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
req->r_reply->data_length);
m = NULL;
*skip = 1;
- goto out;
+ goto out_unlock_session;
}
m = ceph_msg_get(req->r_reply);
dout("get_reply tid %lld %p\n", tid, m);
-out:
- mutex_unlock(&osdc->request_mutex);
+out_unlock_session:
+ mutex_unlock(&osd->lock);
+out_unlock_osdc:
+ up_read(&osdc->lock);
+ return m;
+}
+
+/*
+ * TODO: switch to a msg-owned pagelist
+ */
+static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
+{
+ struct ceph_msg *m;
+ int type = le16_to_cpu(hdr->type);
+ u32 front_len = le32_to_cpu(hdr->front_len);
+ u32 data_len = le32_to_cpu(hdr->data_len);
+
+ m = ceph_msg_new(type, front_len, GFP_NOIO, false);
+ if (!m)
+ return NULL;
+
+ if (data_len) {
+ struct page **pages;
+ struct ceph_osd_data osd_data;
+
+ pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
+ GFP_NOIO);
+ if (!pages) {
+ ceph_msg_put(m);
+ return NULL;
+ }
+
+ ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
+ false);
+ ceph_osdc_msg_data_add(m, &osd_data);
+ }
+
return m;
}
@@ -2932,18 +4207,17 @@ static struct ceph_msg *alloc_msg(struct ceph_connection *con,
{
struct ceph_osd *osd = con->private;
int type = le16_to_cpu(hdr->type);
- int front = le32_to_cpu(hdr->front_len);
*skip = 0;
switch (type) {
case CEPH_MSG_OSD_MAP:
case CEPH_MSG_WATCH_NOTIFY:
- return ceph_msg_new(type, front, GFP_NOFS, false);
+ return alloc_msg_with_page_vector(hdr);
case CEPH_MSG_OSD_OPREPLY:
return get_reply(con, hdr, skip);
default:
- pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
- osd->o_osd);
+ pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
+ osd->o_osd, type);
*skip = 1;
return NULL;
}
@@ -3047,5 +4321,5 @@ static const struct ceph_connection_operations osd_con_ops = {
.alloc_msg = alloc_msg,
.sign_message = osd_sign_message,
.check_message_signature = osd_check_message_signature,
- .fault = osd_reset,
+ .fault = osd_fault,
};
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 243574c8cf33..cde52e94732f 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -380,23 +380,24 @@ bad:
return ERR_PTR(err);
}
-/*
- * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
- * to a set of osds) and primary_temp (explicit primary setting)
- */
-static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
+int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
{
- if (l.pool < r.pool)
+ if (lhs->pool < rhs->pool)
return -1;
- if (l.pool > r.pool)
+ if (lhs->pool > rhs->pool)
return 1;
- if (l.seed < r.seed)
+ if (lhs->seed < rhs->seed)
return -1;
- if (l.seed > r.seed)
+ if (lhs->seed > rhs->seed)
return 1;
+
return 0;
}
+/*
+ * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
+ * to a set of osds) and primary_temp (explicit primary setting)
+ */
static int __insert_pg_mapping(struct ceph_pg_mapping *new,
struct rb_root *root)
{
@@ -409,7 +410,7 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new,
while (*p) {
parent = *p;
pg = rb_entry(parent, struct ceph_pg_mapping, node);
- c = pgid_cmp(new->pgid, pg->pgid);
+ c = ceph_pg_compare(&new->pgid, &pg->pgid);
if (c < 0)
p = &(*p)->rb_left;
else if (c > 0)
@@ -432,7 +433,7 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
while (n) {
pg = rb_entry(n, struct ceph_pg_mapping, node);
- c = pgid_cmp(pgid, pg->pgid);
+ c = ceph_pg_compare(&pgid, &pg->pgid);
if (c < 0) {
n = n->rb_left;
} else if (c > 0) {
@@ -596,7 +597,9 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
*p += 4; /* skip crash_replay_interval */
if (ev >= 7)
- *p += 1; /* skip min_size */
+ pi->min_size = ceph_decode_8(p);
+ else
+ pi->min_size = pi->size - pi->size / 2;
if (ev >= 8)
*p += 8 + 8; /* skip quota_max_* */
@@ -616,6 +619,50 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
pi->write_tier = -1;
}
+ if (ev >= 10) {
+ /* skip properties */
+ num = ceph_decode_32(p);
+ while (num--) {
+ len = ceph_decode_32(p);
+ *p += len; /* key */
+ len = ceph_decode_32(p);
+ *p += len; /* val */
+ }
+ }
+
+ if (ev >= 11) {
+ /* skip hit_set_params */
+ *p += 1 + 1; /* versions */
+ len = ceph_decode_32(p);
+ *p += len;
+
+ *p += 4; /* skip hit_set_period */
+ *p += 4; /* skip hit_set_count */
+ }
+
+ if (ev >= 12)
+ *p += 4; /* skip stripe_width */
+
+ if (ev >= 13) {
+ *p += 8; /* skip target_max_bytes */
+ *p += 8; /* skip target_max_objects */
+ *p += 4; /* skip cache_target_dirty_ratio_micro */
+ *p += 4; /* skip cache_target_full_ratio_micro */
+ *p += 4; /* skip cache_min_flush_age */
+ *p += 4; /* skip cache_min_evict_age */
+ }
+
+ if (ev >= 14) {
+ /* skip erasure_code_profile */
+ len = ceph_decode_32(p);
+ *p += len;
+ }
+
+ if (ev >= 15)
+ pi->last_force_request_resend = ceph_decode_32(p);
+ else
+ pi->last_force_request_resend = 0;
+
/* ignore the rest */
*p = pool_end;
@@ -660,6 +707,23 @@ bad:
/*
* osd map
*/
+struct ceph_osdmap *ceph_osdmap_alloc(void)
+{
+ struct ceph_osdmap *map;
+
+ map = kzalloc(sizeof(*map), GFP_NOIO);
+ if (!map)
+ return NULL;
+
+ map->pg_pools = RB_ROOT;
+ map->pool_max = -1;
+ map->pg_temp = RB_ROOT;
+ map->primary_temp = RB_ROOT;
+ mutex_init(&map->crush_scratch_mutex);
+
+ return map;
+}
+
void ceph_osdmap_destroy(struct ceph_osdmap *map)
{
dout("osdmap_destroy %p\n", map);
@@ -1183,14 +1247,10 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
struct ceph_osdmap *map;
int ret;
- map = kzalloc(sizeof(*map), GFP_NOFS);
+ map = ceph_osdmap_alloc();
if (!map)
return ERR_PTR(-ENOMEM);
- map->pg_temp = RB_ROOT;
- map->primary_temp = RB_ROOT;
- mutex_init(&map->crush_scratch_mutex);
-
ret = osdmap_decode(p, end, map);
if (ret) {
ceph_osdmap_destroy(map);
@@ -1204,8 +1264,7 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
* decode and apply an incremental map update.
*/
struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
- struct ceph_osdmap *map,
- struct ceph_messenger *msgr)
+ struct ceph_osdmap *map)
{
struct crush_map *newcrush = NULL;
struct ceph_fsid fsid;
@@ -1381,8 +1440,252 @@ bad:
return ERR_PTR(err);
}
+void ceph_oid_copy(struct ceph_object_id *dest,
+ const struct ceph_object_id *src)
+{
+ WARN_ON(!ceph_oid_empty(dest));
+
+ if (src->name != src->inline_name) {
+ /* very rare, see ceph_object_id definition */
+ dest->name = kmalloc(src->name_len + 1,
+ GFP_NOIO | __GFP_NOFAIL);
+ }
+
+ memcpy(dest->name, src->name, src->name_len + 1);
+ dest->name_len = src->name_len;
+}
+EXPORT_SYMBOL(ceph_oid_copy);
+
+static __printf(2, 0)
+int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
+{
+ int len;
+
+ WARN_ON(!ceph_oid_empty(oid));
+
+ len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
+ if (len >= sizeof(oid->inline_name))
+ return len;
+
+ oid->name_len = len;
+ return 0;
+}
+
+/*
+ * If oid doesn't fit into inline buffer, BUG.
+ */
+void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ BUG_ON(oid_printf_vargs(oid, fmt, ap));
+ va_end(ap);
+}
+EXPORT_SYMBOL(ceph_oid_printf);
+
+static __printf(3, 0)
+int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
+ const char *fmt, va_list ap)
+{
+ va_list aq;
+ int len;
+
+ va_copy(aq, ap);
+ len = oid_printf_vargs(oid, fmt, aq);
+ va_end(aq);
+
+ if (len) {
+ char *external_name;
+
+ external_name = kmalloc(len + 1, gfp);
+ if (!external_name)
+ return -ENOMEM;
+
+ oid->name = external_name;
+ WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
+ oid->name_len = len;
+ }
+
+ return 0;
+}
+
+/*
+ * If oid doesn't fit into inline buffer, allocate.
+ */
+int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
+ const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, fmt);
+ ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+EXPORT_SYMBOL(ceph_oid_aprintf);
+
+void ceph_oid_destroy(struct ceph_object_id *oid)
+{
+ if (oid->name != oid->inline_name)
+ kfree(oid->name);
+}
+EXPORT_SYMBOL(ceph_oid_destroy);
+
+/*
+ * osds only
+ */
+static bool __osds_equal(const struct ceph_osds *lhs,
+ const struct ceph_osds *rhs)
+{
+ if (lhs->size == rhs->size &&
+ !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
+ return true;
+
+ return false;
+}
+
+/*
+ * osds + primary
+ */
+static bool osds_equal(const struct ceph_osds *lhs,
+ const struct ceph_osds *rhs)
+{
+ if (__osds_equal(lhs, rhs) &&
+ lhs->primary == rhs->primary)
+ return true;
+
+ return false;
+}
+
+static bool osds_valid(const struct ceph_osds *set)
+{
+ /* non-empty set */
+ if (set->size > 0 && set->primary >= 0)
+ return true;
+
+ /* empty can_shift_osds set */
+ if (!set->size && set->primary == -1)
+ return true;
+
+ /* empty !can_shift_osds set - all NONE */
+ if (set->size > 0 && set->primary == -1) {
+ int i;
+
+ for (i = 0; i < set->size; i++) {
+ if (set->osds[i] != CRUSH_ITEM_NONE)
+ break;
+ }
+ if (i == set->size)
+ return true;
+ }
+
+ return false;
+}
+
+void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
+{
+ memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
+ dest->size = src->size;
+ dest->primary = src->primary;
+}
+
+static bool is_split(const struct ceph_pg *pgid,
+ u32 old_pg_num,
+ u32 new_pg_num)
+{
+ int old_bits = calc_bits_of(old_pg_num);
+ int old_mask = (1 << old_bits) - 1;
+ int n;
+
+ WARN_ON(pgid->seed >= old_pg_num);
+ if (new_pg_num <= old_pg_num)
+ return false;
+
+ for (n = 1; ; n++) {
+ int next_bit = n << (old_bits - 1);
+ u32 s = next_bit | pgid->seed;
+
+ if (s < old_pg_num || s == pgid->seed)
+ continue;
+ if (s >= new_pg_num)
+ break;
+
+ s = ceph_stable_mod(s, old_pg_num, old_mask);
+ if (s == pgid->seed)
+ return true;
+ }
+
+ return false;
+}
+
+bool ceph_is_new_interval(const struct ceph_osds *old_acting,
+ const struct ceph_osds *new_acting,
+ const struct ceph_osds *old_up,
+ const struct ceph_osds *new_up,
+ int old_size,
+ int new_size,
+ int old_min_size,
+ int new_min_size,
+ u32 old_pg_num,
+ u32 new_pg_num,
+ bool old_sort_bitwise,
+ bool new_sort_bitwise,
+ const struct ceph_pg *pgid)
+{
+ return !osds_equal(old_acting, new_acting) ||
+ !osds_equal(old_up, new_up) ||
+ old_size != new_size ||
+ old_min_size != new_min_size ||
+ is_split(pgid, old_pg_num, new_pg_num) ||
+ old_sort_bitwise != new_sort_bitwise;
+}
+
+static int calc_pg_rank(int osd, const struct ceph_osds *acting)
+{
+ int i;
+
+ for (i = 0; i < acting->size; i++) {
+ if (acting->osds[i] == osd)
+ return i;
+ }
+
+ return -1;
+}
+
+static bool primary_changed(const struct ceph_osds *old_acting,
+ const struct ceph_osds *new_acting)
+{
+ if (!old_acting->size && !new_acting->size)
+ return false; /* both still empty */
+ if (!old_acting->size ^ !new_acting->size)
+ return true; /* was empty, now not, or vice versa */
+ if (old_acting->primary != new_acting->primary)
+ return true; /* primary changed */
+
+ if (calc_pg_rank(old_acting->primary, old_acting) !=
+ calc_pg_rank(new_acting->primary, new_acting))
+ return true;
+
+ return false; /* same primary (tho replicas may have changed) */
+}
+
+bool ceph_osds_changed(const struct ceph_osds *old_acting,
+ const struct ceph_osds *new_acting,
+ bool any_change)
+{
+ if (primary_changed(old_acting, new_acting))
+ return true;
+
+ if (any_change && !__osds_equal(old_acting, new_acting))
+ return true;
+
+ return false;
+}
/*
* calculate file layout from given offset, length.
@@ -1455,30 +1758,71 @@ invalid:
EXPORT_SYMBOL(ceph_calc_file_object_mapping);
/*
- * Calculate mapping of a (oloc, oid) pair to a PG. Should only be
- * called with target's (oloc, oid), since tiering isn't taken into
- * account.
+ * Map an object into a PG.
+ *
+ * Should only be called with target_oid and target_oloc (as opposed to
+ * base_oid and base_oloc), since tiering isn't taken into account.
*/
-int ceph_oloc_oid_to_pg(struct ceph_osdmap *osdmap,
- struct ceph_object_locator *oloc,
- struct ceph_object_id *oid,
- struct ceph_pg *pg_out)
+int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ struct ceph_pg *raw_pgid)
{
struct ceph_pg_pool_info *pi;
- pi = __lookup_pg_pool(&osdmap->pg_pools, oloc->pool);
+ pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
if (!pi)
- return -EIO;
+ return -ENOENT;
- pg_out->pool = oloc->pool;
- pg_out->seed = ceph_str_hash(pi->object_hash, oid->name,
- oid->name_len);
+ raw_pgid->pool = oloc->pool;
+ raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
+ oid->name_len);
- dout("%s '%.*s' pgid %llu.%x\n", __func__, oid->name_len, oid->name,
- pg_out->pool, pg_out->seed);
+ dout("%s %*pE -> raw_pgid %llu.%x\n", __func__, oid->name_len,
+ oid->name, raw_pgid->pool, raw_pgid->seed);
return 0;
}
-EXPORT_SYMBOL(ceph_oloc_oid_to_pg);
+EXPORT_SYMBOL(ceph_object_locator_to_pg);
+
+/*
+ * Map a raw PG (full precision ps) into an actual PG.
+ */
+static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
+ const struct ceph_pg *raw_pgid,
+ struct ceph_pg *pgid)
+{
+ pgid->pool = raw_pgid->pool;
+ pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
+ pi->pg_num_mask);
+}
+
+/*
+ * Map a raw PG (full precision ps) into a placement ps (placement
+ * seed). Include pool id in that value so that different pools don't
+ * use the same seeds.
+ */
+static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
+ const struct ceph_pg *raw_pgid)
+{
+ if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
+ /* hash pool id and seed so that pool PGs do not overlap */
+ return crush_hash32_2(CRUSH_HASH_RJENKINS1,
+ ceph_stable_mod(raw_pgid->seed,
+ pi->pgp_num,
+ pi->pgp_num_mask),
+ raw_pgid->pool);
+ } else {
+ /*
+ * legacy behavior: add ps and pool together. this is
+ * not a great approach because the PGs from each pool
+ * will overlap on top of each other: 0.5 == 1.4 ==
+ * 2.3 == ...
+ */
+ return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
+ pi->pgp_num_mask) +
+ (unsigned)raw_pgid->pool;
+ }
+}
static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
int *result, int result_max,
@@ -1497,84 +1841,92 @@ static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
}
/*
- * Calculate raw (crush) set for given pgid.
+ * Calculate raw set (CRUSH output) for given PG. The result may
+ * contain nonexistent OSDs. ->primary is undefined for a raw set.
*
- * Return raw set length, or error.
+ * Placement seed (CRUSH input) is returned through @ppps.
*/
-static int pg_to_raw_osds(struct ceph_osdmap *osdmap,
- struct ceph_pg_pool_info *pool,
- struct ceph_pg pgid, u32 pps, int *osds)
+static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
+ struct ceph_pg_pool_info *pi,
+ const struct ceph_pg *raw_pgid,
+ struct ceph_osds *raw,
+ u32 *ppps)
{
+ u32 pps = raw_pg_to_pps(pi, raw_pgid);
int ruleno;
int len;
- /* crush */
- ruleno = crush_find_rule(osdmap->crush, pool->crush_ruleset,
- pool->type, pool->size);
+ ceph_osds_init(raw);
+ if (ppps)
+ *ppps = pps;
+
+ ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
+ pi->size);
if (ruleno < 0) {
pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
- pgid.pool, pool->crush_ruleset, pool->type,
- pool->size);
- return -ENOENT;
+ pi->id, pi->crush_ruleset, pi->type, pi->size);
+ return;
}
- len = do_crush(osdmap, ruleno, pps, osds,
- min_t(int, pool->size, CEPH_PG_MAX_SIZE),
+ len = do_crush(osdmap, ruleno, pps, raw->osds,
+ min_t(int, pi->size, ARRAY_SIZE(raw->osds)),
osdmap->osd_weight, osdmap->max_osd);
if (len < 0) {
pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
- len, ruleno, pgid.pool, pool->crush_ruleset,
- pool->type, pool->size);
- return len;
+ len, ruleno, pi->id, pi->crush_ruleset, pi->type,
+ pi->size);
+ return;
}
- return len;
+ raw->size = len;
}
/*
- * Given raw set, calculate up set and up primary.
+ * Given raw set, calculate up set and up primary. By definition of an
+ * up set, the result won't contain nonexistent or down OSDs.
*
- * Return up set length. *primary is set to up primary osd id, or -1
- * if up set is empty.
+ * This is done in-place - on return @set is the up set. If it's
+ * empty, ->primary will remain undefined.
*/
-static int raw_to_up_osds(struct ceph_osdmap *osdmap,
- struct ceph_pg_pool_info *pool,
- int *osds, int len, int *primary)
+static void raw_to_up_osds(struct ceph_osdmap *osdmap,
+ struct ceph_pg_pool_info *pi,
+ struct ceph_osds *set)
{
- int up_primary = -1;
int i;
- if (ceph_can_shift_osds(pool)) {
+ /* ->primary is undefined for a raw set */
+ BUG_ON(set->primary != -1);
+
+ if (ceph_can_shift_osds(pi)) {
int removed = 0;
- for (i = 0; i < len; i++) {
- if (ceph_osd_is_down(osdmap, osds[i])) {
+ /* shift left */
+ for (i = 0; i < set->size; i++) {
+ if (ceph_osd_is_down(osdmap, set->osds[i])) {
removed++;
continue;
}
if (removed)
- osds[i - removed] = osds[i];
+ set->osds[i - removed] = set->osds[i];
}
-
- len -= removed;
- if (len > 0)
- up_primary = osds[0];
+ set->size -= removed;
+ if (set->size > 0)
+ set->primary = set->osds[0];
} else {
- for (i = len - 1; i >= 0; i--) {
- if (ceph_osd_is_down(osdmap, osds[i]))
- osds[i] = CRUSH_ITEM_NONE;
+ /* set down/dne devices to NONE */
+ for (i = set->size - 1; i >= 0; i--) {
+ if (ceph_osd_is_down(osdmap, set->osds[i]))
+ set->osds[i] = CRUSH_ITEM_NONE;
else
- up_primary = osds[i];
+ set->primary = set->osds[i];
}
}
-
- *primary = up_primary;
- return len;
}
-static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
- struct ceph_pg_pool_info *pool,
- int *osds, int len, int *primary)
+static void apply_primary_affinity(struct ceph_osdmap *osdmap,
+ struct ceph_pg_pool_info *pi,
+ u32 pps,
+ struct ceph_osds *up)
{
int i;
int pos = -1;
@@ -1586,8 +1938,8 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
if (!osdmap->osd_primary_affinity)
return;
- for (i = 0; i < len; i++) {
- int osd = osds[i];
+ for (i = 0; i < up->size; i++) {
+ int osd = up->osds[i];
if (osd != CRUSH_ITEM_NONE &&
osdmap->osd_primary_affinity[osd] !=
@@ -1595,7 +1947,7 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
break;
}
}
- if (i == len)
+ if (i == up->size)
return;
/*
@@ -1603,8 +1955,8 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
* osd into the hash/rng so that a proportional fraction of an
* osd's pgs get rejected as primary.
*/
- for (i = 0; i < len; i++) {
- int osd = osds[i];
+ for (i = 0; i < up->size; i++) {
+ int osd = up->osds[i];
u32 aff;
if (osd == CRUSH_ITEM_NONE)
@@ -1629,135 +1981,110 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
if (pos < 0)
return;
- *primary = osds[pos];
+ up->primary = up->osds[pos];
- if (ceph_can_shift_osds(pool) && pos > 0) {
+ if (ceph_can_shift_osds(pi) && pos > 0) {
/* move the new primary to the front */
for (i = pos; i > 0; i--)
- osds[i] = osds[i - 1];
- osds[0] = *primary;
+ up->osds[i] = up->osds[i - 1];
+ up->osds[0] = up->primary;
}
}
/*
- * Given up set, apply pg_temp and primary_temp mappings.
+ * Get pg_temp and primary_temp mappings for given PG.
*
- * Return acting set length. *primary is set to acting primary osd id,
- * or -1 if acting set is empty.
+ * Note that a PG may have none, only pg_temp, only primary_temp or
+ * both pg_temp and primary_temp mappings. This means @temp isn't
+ * always a valid OSD set on return: in the "only primary_temp" case,
+ * @temp will have its ->primary >= 0 but ->size == 0.
*/
-static int apply_temps(struct ceph_osdmap *osdmap,
- struct ceph_pg_pool_info *pool, struct ceph_pg pgid,
- int *osds, int len, int *primary)
+static void get_temp_osds(struct ceph_osdmap *osdmap,
+ struct ceph_pg_pool_info *pi,
+ const struct ceph_pg *raw_pgid,
+ struct ceph_osds *temp)
{
+ struct ceph_pg pgid;
struct ceph_pg_mapping *pg;
- int temp_len;
- int temp_primary;
int i;
- /* raw_pg -> pg */
- pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
- pool->pg_num_mask);
+ raw_pg_to_pg(pi, raw_pgid, &pgid);
+ ceph_osds_init(temp);
/* pg_temp? */
pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
if (pg) {
- temp_len = 0;
- temp_primary = -1;
-
for (i = 0; i < pg->pg_temp.len; i++) {
if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
- if (ceph_can_shift_osds(pool))
+ if (ceph_can_shift_osds(pi))
continue;
- else
- osds[temp_len++] = CRUSH_ITEM_NONE;
+
+ temp->osds[temp->size++] = CRUSH_ITEM_NONE;
} else {
- osds[temp_len++] = pg->pg_temp.osds[i];
+ temp->osds[temp->size++] = pg->pg_temp.osds[i];
}
}
/* apply pg_temp's primary */
- for (i = 0; i < temp_len; i++) {
- if (osds[i] != CRUSH_ITEM_NONE) {
- temp_primary = osds[i];
+ for (i = 0; i < temp->size; i++) {
+ if (temp->osds[i] != CRUSH_ITEM_NONE) {
+ temp->primary = temp->osds[i];
break;
}
}
- } else {
- temp_len = len;
- temp_primary = *primary;
}
/* primary_temp? */
pg = __lookup_pg_mapping(&osdmap->primary_temp, pgid);
if (pg)
- temp_primary = pg->primary_temp.osd;
-
- *primary = temp_primary;
- return temp_len;
+ temp->primary = pg->primary_temp.osd;
}
/*
- * Calculate acting set for given pgid.
+ * Map a PG to its acting set as well as its up set.
*
- * Return acting set length, or error. *primary is set to acting
- * primary osd id, or -1 if acting set is empty or on error.
+ * Acting set is used for data mapping purposes, while up set can be
+ * recorded for detecting interval changes and deciding whether to
+ * resend a request.
*/
-int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
- int *osds, int *primary)
+void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
+ const struct ceph_pg *raw_pgid,
+ struct ceph_osds *up,
+ struct ceph_osds *acting)
{
- struct ceph_pg_pool_info *pool;
+ struct ceph_pg_pool_info *pi;
u32 pps;
- int len;
- pool = __lookup_pg_pool(&osdmap->pg_pools, pgid.pool);
- if (!pool) {
- *primary = -1;
- return -ENOENT;
+ pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
+ if (!pi) {
+ ceph_osds_init(up);
+ ceph_osds_init(acting);
+ goto out;
}
- if (pool->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
- /* hash pool id and seed so that pool PGs do not overlap */
- pps = crush_hash32_2(CRUSH_HASH_RJENKINS1,
- ceph_stable_mod(pgid.seed, pool->pgp_num,
- pool->pgp_num_mask),
- pgid.pool);
- } else {
- /*
- * legacy behavior: add ps and pool together. this is
- * not a great approach because the PGs from each pool
- * will overlap on top of each other: 0.5 == 1.4 ==
- * 2.3 == ...
- */
- pps = ceph_stable_mod(pgid.seed, pool->pgp_num,
- pool->pgp_num_mask) +
- (unsigned)pgid.pool;
- }
-
- len = pg_to_raw_osds(osdmap, pool, pgid, pps, osds);
- if (len < 0) {
- *primary = -1;
- return len;
+ pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
+ raw_to_up_osds(osdmap, pi, up);
+ apply_primary_affinity(osdmap, pi, pps, up);
+ get_temp_osds(osdmap, pi, raw_pgid, acting);
+ if (!acting->size) {
+ memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
+ acting->size = up->size;
+ if (acting->primary == -1)
+ acting->primary = up->primary;
}
-
- len = raw_to_up_osds(osdmap, pool, osds, len, primary);
-
- apply_primary_affinity(osdmap, pps, pool, osds, len, primary);
-
- len = apply_temps(osdmap, pool, pgid, osds, len, primary);
-
- return len;
+out:
+ WARN_ON(!osds_valid(up) || !osds_valid(acting));
}
/*
- * Return primary osd for given pgid, or -1 if none.
+ * Return acting primary for given PG, or -1 if none.
*/
-int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
+int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
+ const struct ceph_pg *raw_pgid)
{
- int osds[CEPH_PG_MAX_SIZE];
- int primary;
-
- ceph_calc_pg_acting(osdmap, pgid, osds, &primary);
+ struct ceph_osds up, acting;
- return primary;
+ ceph_pg_to_up_acting_osds(osdmap, raw_pgid, &up, &acting);
+ return acting.primary;
}
-EXPORT_SYMBOL(ceph_calc_pg_primary);
+EXPORT_SYMBOL(ceph_pg_to_acting_primary);
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 02f53674dc39..040ff627c18a 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -543,7 +543,7 @@ rpcauth_cache_enforce_limit(void)
*/
struct rpc_cred *
rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
- int flags)
+ int flags, gfp_t gfp)
{
LIST_HEAD(free);
struct rpc_cred_cache *cache = auth->au_credcache;
@@ -580,7 +580,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
if (flags & RPCAUTH_LOOKUP_RCU)
return ERR_PTR(-ECHILD);
- new = auth->au_ops->crcreate(auth, acred, flags);
+ new = auth->au_ops->crcreate(auth, acred, flags, gfp);
if (IS_ERR(new)) {
cred = new;
goto out;
@@ -703,8 +703,7 @@ rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags)
new = rpcauth_bind_new_cred(task, lookupflags);
if (IS_ERR(new))
return PTR_ERR(new);
- if (req->rq_cred != NULL)
- put_rpccred(req->rq_cred);
+ put_rpccred(req->rq_cred);
req->rq_cred = new;
return 0;
}
@@ -712,6 +711,8 @@ rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags)
void
put_rpccred(struct rpc_cred *cred)
{
+ if (cred == NULL)
+ return;
/* Fast path for unhashed credentials */
if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) {
if (atomic_dec_and_test(&cred->cr_count))
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 41248b1820c7..54dd3fdead54 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -38,6 +38,13 @@ struct rpc_cred *rpc_lookup_cred(void)
}
EXPORT_SYMBOL_GPL(rpc_lookup_cred);
+struct rpc_cred *
+rpc_lookup_generic_cred(struct auth_cred *acred, int flags, gfp_t gfp)
+{
+ return rpcauth_lookup_credcache(&generic_auth, acred, flags, gfp);
+}
+EXPORT_SYMBOL_GPL(rpc_lookup_generic_cred);
+
struct rpc_cred *rpc_lookup_cred_nonblock(void)
{
return rpcauth_lookupcred(&generic_auth, RPCAUTH_LOOKUP_RCU);
@@ -77,15 +84,15 @@ static struct rpc_cred *generic_bind_cred(struct rpc_task *task,
static struct rpc_cred *
generic_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
{
- return rpcauth_lookup_credcache(&generic_auth, acred, flags);
+ return rpcauth_lookup_credcache(&generic_auth, acred, flags, GFP_KERNEL);
}
static struct rpc_cred *
-generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
+generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
{
struct generic_cred *gcred;
- gcred = kmalloc(sizeof(*gcred), GFP_KERNEL);
+ gcred = kmalloc(sizeof(*gcred), gfp);
if (gcred == NULL)
return ERR_PTR(-ENOMEM);
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 15612ffa8d57..e64ae93d5b4f 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1299,11 +1299,11 @@ gss_destroy_cred(struct rpc_cred *cred)
static struct rpc_cred *
gss_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
{
- return rpcauth_lookup_credcache(auth, acred, flags);
+ return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
}
static struct rpc_cred *
-gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
+gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
{
struct gss_auth *gss_auth = container_of(auth, struct gss_auth, rpc_auth);
struct gss_cred *cred = NULL;
@@ -1313,7 +1313,7 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
__func__, from_kuid(&init_user_ns, acred->uid),
auth->au_flavor);
- if (!(cred = kzalloc(sizeof(*cred), GFP_NOFS)))
+ if (!(cred = kzalloc(sizeof(*cred), gfp)))
goto out_err;
rpcauth_init_cred(&cred->gc_base, acred, auth, &gss_credops);
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 0d3dd364c22f..9f65452b7cbc 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -52,11 +52,11 @@ unx_destroy(struct rpc_auth *auth)
static struct rpc_cred *
unx_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
{
- return rpcauth_lookup_credcache(auth, acred, flags);
+ return rpcauth_lookup_credcache(auth, acred, flags, GFP_NOFS);
}
static struct rpc_cred *
-unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
+unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags, gfp_t gfp)
{
struct unx_cred *cred;
unsigned int groups = 0;
@@ -66,7 +66,7 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
from_kuid(&init_user_ns, acred->uid),
from_kgid(&init_user_ns, acred->gid));
- if (!(cred = kmalloc(sizeof(*cred), GFP_NOFS)))
+ if (!(cred = kmalloc(sizeof(*cred), gfp)))
return ERR_PTR(-ENOMEM);
rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7e0c9bf22df8..06b4df9faaa1 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1414,6 +1414,23 @@ size_t rpc_max_payload(struct rpc_clnt *clnt)
EXPORT_SYMBOL_GPL(rpc_max_payload);
/**
+ * rpc_max_bc_payload - Get maximum backchannel payload size, in bytes
+ * @clnt: RPC client to query
+ */
+size_t rpc_max_bc_payload(struct rpc_clnt *clnt)
+{
+ struct rpc_xprt *xprt;
+ size_t ret;
+
+ rcu_read_lock();
+ xprt = rcu_dereference(clnt->cl_xprt);
+ ret = xprt->ops->bc_maxpayload(xprt);
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rpc_max_bc_payload);
+
+/**
* rpc_get_timeout - Get timeout for transport in units of HZ
* @clnt: RPC client to query
*/
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 6bdb3865212d..c4f3cc0c0775 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -797,6 +797,8 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
xdr_set_iov(xdr, buf->head, buf->len);
else if (buf->page_len != 0)
xdr_set_page_base(xdr, 0, buf->len);
+ else
+ xdr_set_iov(xdr, buf->head, buf->len);
if (p != NULL && p > xdr->p && xdr->end >= p) {
xdr->nwords -= p - xdr->p;
xdr->p = p;
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 2dcd7640eeb5..87762d976b63 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -192,6 +192,22 @@ int xprt_rdma_bc_up(struct svc_serv *serv, struct net *net)
}
/**
+ * xprt_rdma_bc_maxpayload - Return maximum backchannel message size
+ * @xprt: transport
+ *
+ * Returns maximum size, in bytes, of a backchannel message
+ */
+size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
+{
+ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
+ struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
+ size_t maxmsg;
+
+ maxmsg = min_t(unsigned int, cdata->inline_rsize, cdata->inline_wsize);
+ return maxmsg - RPCRDMA_HDRLEN_MIN;
+}
+
+/**
* rpcrdma_bc_marshal_reply - Send backwards direction reply
* @rqst: buffer containing RPC reply data
*
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index b289e106540b..6326ebe8b595 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -35,10 +35,71 @@
/* Maximum scatter/gather per FMR */
#define RPCRDMA_MAX_FMR_SGES (64)
+static struct workqueue_struct *fmr_recovery_wq;
+
+#define FMR_RECOVERY_WQ_FLAGS (WQ_UNBOUND)
+
+int
+fmr_alloc_recovery_wq(void)
+{
+ fmr_recovery_wq = alloc_workqueue("fmr_recovery", WQ_UNBOUND, 0);
+ return !fmr_recovery_wq ? -ENOMEM : 0;
+}
+
+void
+fmr_destroy_recovery_wq(void)
+{
+ struct workqueue_struct *wq;
+
+ if (!fmr_recovery_wq)
+ return;
+
+ wq = fmr_recovery_wq;
+ fmr_recovery_wq = NULL;
+ destroy_workqueue(wq);
+}
+
+static int
+__fmr_unmap(struct rpcrdma_mw *mw)
+{
+ LIST_HEAD(l);
+
+ list_add(&mw->fmr.fmr->list, &l);
+ return ib_unmap_fmr(&l);
+}
+
+/* Deferred reset of a single FMR. Generate a fresh rkey by
+ * replacing the MR. There's no recovery if this fails.
+ */
+static void
+__fmr_recovery_worker(struct work_struct *work)
+{
+ struct rpcrdma_mw *mw = container_of(work, struct rpcrdma_mw,
+ mw_work);
+ struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
+
+ __fmr_unmap(mw);
+ rpcrdma_put_mw(r_xprt, mw);
+ return;
+}
+
+/* A broken MR was discovered in a context that can't sleep.
+ * Defer recovery to the recovery worker.
+ */
+static void
+__fmr_queue_recovery(struct rpcrdma_mw *mw)
+{
+ INIT_WORK(&mw->mw_work, __fmr_recovery_worker);
+ queue_work(fmr_recovery_wq, &mw->mw_work);
+}
+
static int
fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
struct rpcrdma_create_data_internal *cdata)
{
+ rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
+ RPCRDMA_MAX_DATA_SEGS /
+ RPCRDMA_MAX_FMR_SGES));
return 0;
}
@@ -48,7 +109,7 @@ static size_t
fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
{
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
- rpcrdma_max_segments(r_xprt) * RPCRDMA_MAX_FMR_SGES);
+ RPCRDMA_MAX_HDR_SEGS * RPCRDMA_MAX_FMR_SGES);
}
static int
@@ -89,6 +150,7 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt)
if (IS_ERR(r->fmr.fmr))
goto out_fmr_err;
+ r->mw_xprt = r_xprt;
list_add(&r->mw_list, &buf->rb_mws);
list_add(&r->mw_all, &buf->rb_all);
}
@@ -104,15 +166,6 @@ out:
return rc;
}
-static int
-__fmr_unmap(struct rpcrdma_mw *r)
-{
- LIST_HEAD(l);
-
- list_add(&r->fmr.fmr->list, &l);
- return ib_unmap_fmr(&l);
-}
-
/* Use the ib_map_phys_fmr() verb to register a memory region
* for remote access via RDMA READ or RDMA WRITE.
*/
@@ -183,15 +236,10 @@ static void
__fmr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
{
struct ib_device *device = r_xprt->rx_ia.ri_device;
- struct rpcrdma_mw *mw = seg->rl_mw;
int nsegs = seg->mr_nsegs;
- seg->rl_mw = NULL;
-
while (nsegs--)
rpcrdma_unmap_one(device, seg++);
-
- rpcrdma_put_mw(r_xprt, mw);
}
/* Invalidate all memory regions that were registered for "req".
@@ -234,42 +282,50 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
seg = &req->rl_segments[i];
__fmr_dma_unmap(r_xprt, seg);
+ rpcrdma_put_mw(r_xprt, seg->rl_mw);
i += seg->mr_nsegs;
seg->mr_nsegs = 0;
+ seg->rl_mw = NULL;
}
req->rl_nchunks = 0;
}
-/* Use the ib_unmap_fmr() verb to prevent further remote
- * access via RDMA READ or RDMA WRITE.
+/* Use a slow, safe mechanism to invalidate all memory regions
+ * that were registered for "req".
+ *
+ * In the asynchronous case, DMA unmapping occurs first here
+ * because the rpcrdma_mr_seg is released immediately after this
+ * call. It's contents won't be available in __fmr_dma_unmap later.
+ * FIXME.
*/
-static int
-fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
+static void
+fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+ bool sync)
{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rpcrdma_mr_seg *seg1 = seg;
- struct rpcrdma_mw *mw = seg1->rl_mw;
- int rc, nsegs = seg->mr_nsegs;
+ struct rpcrdma_mr_seg *seg;
+ struct rpcrdma_mw *mw;
+ unsigned int i;
- dprintk("RPC: %s: FMR %p\n", __func__, mw);
+ for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
+ seg = &req->rl_segments[i];
+ mw = seg->rl_mw;
- seg1->rl_mw = NULL;
- while (seg1->mr_nsegs--)
- rpcrdma_unmap_one(ia->ri_device, seg++);
- rc = __fmr_unmap(mw);
- if (rc)
- goto out_err;
- rpcrdma_put_mw(r_xprt, mw);
- return nsegs;
+ if (sync) {
+ /* ORDER */
+ __fmr_unmap(mw);
+ __fmr_dma_unmap(r_xprt, seg);
+ rpcrdma_put_mw(r_xprt, mw);
+ } else {
+ __fmr_dma_unmap(r_xprt, seg);
+ __fmr_queue_recovery(mw);
+ }
-out_err:
- /* The FMR is abandoned, but remains in rb_all. fmr_op_destroy
- * will attempt to release it when the transport is destroyed.
- */
- dprintk("RPC: %s: ib_unmap_fmr status %i\n", __func__, rc);
- return nsegs;
+ i += seg->mr_nsegs;
+ seg->mr_nsegs = 0;
+ seg->rl_mw = NULL;
+ }
}
static void
@@ -295,7 +351,7 @@ fmr_op_destroy(struct rpcrdma_buffer *buf)
const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
.ro_map = fmr_op_map,
.ro_unmap_sync = fmr_op_unmap_sync,
- .ro_unmap = fmr_op_unmap,
+ .ro_unmap_safe = fmr_op_unmap_safe,
.ro_open = fmr_op_open,
.ro_maxpages = fmr_op_maxpages,
.ro_init = fmr_op_init,
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 94c3fa910b85..c0947544babe 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -98,6 +98,47 @@ frwr_destroy_recovery_wq(void)
destroy_workqueue(wq);
}
+static int
+__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
+{
+ struct rpcrdma_frmr *f = &r->frmr;
+ int rc;
+
+ rc = ib_dereg_mr(f->fr_mr);
+ if (rc) {
+ pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
+ rc, r);
+ return rc;
+ }
+
+ f->fr_mr = ib_alloc_mr(ia->ri_pd, IB_MR_TYPE_MEM_REG,
+ ia->ri_max_frmr_depth);
+ if (IS_ERR(f->fr_mr)) {
+ pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
+ PTR_ERR(f->fr_mr), r);
+ return PTR_ERR(f->fr_mr);
+ }
+
+ dprintk("RPC: %s: recovered FRMR %p\n", __func__, r);
+ f->fr_state = FRMR_IS_INVALID;
+ return 0;
+}
+
+static void
+__frwr_reset_and_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
+{
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct rpcrdma_frmr *f = &mw->frmr;
+ int rc;
+
+ rc = __frwr_reset_mr(ia, mw);
+ ib_dma_unmap_sg(ia->ri_device, f->fr_sg, f->fr_nents, f->fr_dir);
+ if (rc)
+ return;
+
+ rpcrdma_put_mw(r_xprt, mw);
+}
+
/* Deferred reset of a single FRMR. Generate a fresh rkey by
* replacing the MR.
*
@@ -109,26 +150,10 @@ static void
__frwr_recovery_worker(struct work_struct *work)
{
struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw,
- frmr.fr_work);
- struct rpcrdma_xprt *r_xprt = r->frmr.fr_xprt;
- unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth;
- struct ib_pd *pd = r_xprt->rx_ia.ri_pd;
-
- if (ib_dereg_mr(r->frmr.fr_mr))
- goto out_fail;
+ mw_work);
- r->frmr.fr_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, depth);
- if (IS_ERR(r->frmr.fr_mr))
- goto out_fail;
-
- dprintk("RPC: %s: recovered FRMR %p\n", __func__, r);
- r->frmr.fr_state = FRMR_IS_INVALID;
- rpcrdma_put_mw(r_xprt, r);
+ __frwr_reset_and_unmap(r->mw_xprt, r);
return;
-
-out_fail:
- pr_warn("RPC: %s: FRMR %p unrecovered\n",
- __func__, r);
}
/* A broken MR was discovered in a context that can't sleep.
@@ -137,8 +162,8 @@ out_fail:
static void
__frwr_queue_recovery(struct rpcrdma_mw *r)
{
- INIT_WORK(&r->frmr.fr_work, __frwr_recovery_worker);
- queue_work(frwr_recovery_wq, &r->frmr.fr_work);
+ INIT_WORK(&r->mw_work, __frwr_recovery_worker);
+ queue_work(frwr_recovery_wq, &r->mw_work);
}
static int
@@ -152,11 +177,11 @@ __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device,
if (IS_ERR(f->fr_mr))
goto out_mr_err;
- f->sg = kcalloc(depth, sizeof(*f->sg), GFP_KERNEL);
- if (!f->sg)
+ f->fr_sg = kcalloc(depth, sizeof(*f->fr_sg), GFP_KERNEL);
+ if (!f->fr_sg)
goto out_list_err;
- sg_init_table(f->sg, depth);
+ sg_init_table(f->fr_sg, depth);
init_completion(&f->fr_linv_done);
@@ -185,7 +210,7 @@ __frwr_release(struct rpcrdma_mw *r)
if (rc)
dprintk("RPC: %s: ib_dereg_mr status %i\n",
__func__, rc);
- kfree(r->frmr.sg);
+ kfree(r->frmr.fr_sg);
}
static int
@@ -231,6 +256,9 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
depth;
}
+ rpcrdma_set_max_header_sizes(ia, cdata, max_t(unsigned int, 1,
+ RPCRDMA_MAX_DATA_SEGS /
+ ia->ri_max_frmr_depth));
return 0;
}
@@ -243,7 +271,7 @@ frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
- rpcrdma_max_segments(r_xprt) * ia->ri_max_frmr_depth);
+ RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
}
static void
@@ -350,9 +378,9 @@ frwr_op_init(struct rpcrdma_xprt *r_xprt)
return rc;
}
+ r->mw_xprt = r_xprt;
list_add(&r->mw_list, &buf->rb_mws);
list_add(&r->mw_all, &buf->rb_all);
- r->frmr.fr_xprt = r_xprt;
}
return 0;
@@ -396,12 +424,12 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
for (i = 0; i < nsegs;) {
if (seg->mr_page)
- sg_set_page(&frmr->sg[i],
+ sg_set_page(&frmr->fr_sg[i],
seg->mr_page,
seg->mr_len,
offset_in_page(seg->mr_offset));
else
- sg_set_buf(&frmr->sg[i], seg->mr_offset,
+ sg_set_buf(&frmr->fr_sg[i], seg->mr_offset,
seg->mr_len);
++seg;
@@ -412,25 +440,26 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
break;
}
- frmr->sg_nents = i;
+ frmr->fr_nents = i;
+ frmr->fr_dir = direction;
- dma_nents = ib_dma_map_sg(device, frmr->sg, frmr->sg_nents, direction);
+ dma_nents = ib_dma_map_sg(device, frmr->fr_sg, frmr->fr_nents, direction);
if (!dma_nents) {
pr_err("RPC: %s: failed to dma map sg %p sg_nents %u\n",
- __func__, frmr->sg, frmr->sg_nents);
+ __func__, frmr->fr_sg, frmr->fr_nents);
return -ENOMEM;
}
- n = ib_map_mr_sg(mr, frmr->sg, frmr->sg_nents, NULL, PAGE_SIZE);
- if (unlikely(n != frmr->sg_nents)) {
+ n = ib_map_mr_sg(mr, frmr->fr_sg, frmr->fr_nents, NULL, PAGE_SIZE);
+ if (unlikely(n != frmr->fr_nents)) {
pr_err("RPC: %s: failed to map mr %p (%u/%u)\n",
- __func__, frmr->fr_mr, n, frmr->sg_nents);
+ __func__, frmr->fr_mr, n, frmr->fr_nents);
rc = n < 0 ? n : -EINVAL;
goto out_senderr;
}
dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
- __func__, mw, frmr->sg_nents, mr->length);
+ __func__, mw, frmr->fr_nents, mr->length);
key = (u8)(mr->rkey & 0x000000FF);
ib_update_fast_reg_key(mr, ++key);
@@ -452,18 +481,16 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
if (rc)
goto out_senderr;
- seg1->mr_dir = direction;
seg1->rl_mw = mw;
seg1->mr_rkey = mr->rkey;
seg1->mr_base = mr->iova;
- seg1->mr_nsegs = frmr->sg_nents;
+ seg1->mr_nsegs = frmr->fr_nents;
seg1->mr_len = mr->length;
- return frmr->sg_nents;
+ return frmr->fr_nents;
out_senderr:
dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
- ib_dma_unmap_sg(device, frmr->sg, dma_nents, direction);
__frwr_queue_recovery(mw);
return rc;
}
@@ -487,24 +514,6 @@ __frwr_prepare_linv_wr(struct rpcrdma_mr_seg *seg)
return invalidate_wr;
}
-static void
-__frwr_dma_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
- int rc)
-{
- struct ib_device *device = r_xprt->rx_ia.ri_device;
- struct rpcrdma_mw *mw = seg->rl_mw;
- struct rpcrdma_frmr *f = &mw->frmr;
-
- seg->rl_mw = NULL;
-
- ib_dma_unmap_sg(device, f->sg, f->sg_nents, seg->mr_dir);
-
- if (!rc)
- rpcrdma_put_mw(r_xprt, mw);
- else
- __frwr_queue_recovery(mw);
-}
-
/* Invalidate all memory regions that were registered for "req".
*
* Sleeps until it is safe for the host CPU to access the
@@ -518,6 +527,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
struct rpcrdma_mr_seg *seg;
unsigned int i, nchunks;
struct rpcrdma_frmr *f;
+ struct rpcrdma_mw *mw;
int rc;
dprintk("RPC: %s: req %p\n", __func__, req);
@@ -558,11 +568,8 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* unless ri_id->qp is a valid pointer.
*/
rc = ib_post_send(ia->ri_id->qp, invalidate_wrs, &bad_wr);
- if (rc) {
- pr_warn("%s: ib_post_send failed %i\n", __func__, rc);
- rdma_disconnect(ia->ri_id);
- goto unmap;
- }
+ if (rc)
+ goto reset_mrs;
wait_for_completion(&f->fr_linv_done);
@@ -572,56 +579,65 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
unmap:
for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
seg = &req->rl_segments[i];
+ mw = seg->rl_mw;
+ seg->rl_mw = NULL;
- __frwr_dma_unmap(r_xprt, seg, rc);
+ ib_dma_unmap_sg(ia->ri_device, f->fr_sg, f->fr_nents,
+ f->fr_dir);
+ rpcrdma_put_mw(r_xprt, mw);
i += seg->mr_nsegs;
seg->mr_nsegs = 0;
}
req->rl_nchunks = 0;
-}
+ return;
-/* Post a LOCAL_INV Work Request to prevent further remote access
- * via RDMA READ or RDMA WRITE.
- */
-static int
-frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
-{
- struct rpcrdma_mr_seg *seg1 = seg;
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- struct rpcrdma_mw *mw = seg1->rl_mw;
- struct rpcrdma_frmr *frmr = &mw->frmr;
- struct ib_send_wr *invalidate_wr, *bad_wr;
- int rc, nsegs = seg->mr_nsegs;
+reset_mrs:
+ pr_warn("%s: ib_post_send failed %i\n", __func__, rc);
- dprintk("RPC: %s: FRMR %p\n", __func__, mw);
+ /* Find and reset the MRs in the LOCAL_INV WRs that did not
+ * get posted. This is synchronous, and slow.
+ */
+ for (i = 0, nchunks = req->rl_nchunks; nchunks; nchunks--) {
+ seg = &req->rl_segments[i];
+ mw = seg->rl_mw;
+ f = &mw->frmr;
- seg1->rl_mw = NULL;
- frmr->fr_state = FRMR_IS_INVALID;
- invalidate_wr = &mw->frmr.fr_invwr;
+ if (mw->frmr.fr_mr->rkey == bad_wr->ex.invalidate_rkey) {
+ __frwr_reset_mr(ia, mw);
+ bad_wr = bad_wr->next;
+ }
- memset(invalidate_wr, 0, sizeof(*invalidate_wr));
- frmr->fr_cqe.done = frwr_wc_localinv;
- invalidate_wr->wr_cqe = &frmr->fr_cqe;
- invalidate_wr->opcode = IB_WR_LOCAL_INV;
- invalidate_wr->ex.invalidate_rkey = frmr->fr_mr->rkey;
- DECR_CQCOUNT(&r_xprt->rx_ep);
+ i += seg->mr_nsegs;
+ }
+ goto unmap;
+}
- ib_dma_unmap_sg(ia->ri_device, frmr->sg, frmr->sg_nents, seg1->mr_dir);
- read_lock(&ia->ri_qplock);
- rc = ib_post_send(ia->ri_id->qp, invalidate_wr, &bad_wr);
- read_unlock(&ia->ri_qplock);
- if (rc)
- goto out_err;
+/* Use a slow, safe mechanism to invalidate all memory regions
+ * that were registered for "req".
+ */
+static void
+frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+ bool sync)
+{
+ struct rpcrdma_mr_seg *seg;
+ struct rpcrdma_mw *mw;
+ unsigned int i;
- rpcrdma_put_mw(r_xprt, mw);
- return nsegs;
+ for (i = 0; req->rl_nchunks; req->rl_nchunks--) {
+ seg = &req->rl_segments[i];
+ mw = seg->rl_mw;
-out_err:
- dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc);
- __frwr_queue_recovery(mw);
- return nsegs;
+ if (sync)
+ __frwr_reset_and_unmap(r_xprt, mw);
+ else
+ __frwr_queue_recovery(mw);
+
+ i += seg->mr_nsegs;
+ seg->mr_nsegs = 0;
+ seg->rl_mw = NULL;
+ }
}
static void
@@ -643,7 +659,7 @@ frwr_op_destroy(struct rpcrdma_buffer *buf)
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_map = frwr_op_map,
.ro_unmap_sync = frwr_op_unmap_sync,
- .ro_unmap = frwr_op_unmap,
+ .ro_unmap_safe = frwr_op_unmap_safe,
.ro_open = frwr_op_open,
.ro_maxpages = frwr_op_maxpages,
.ro_init = frwr_op_init,
diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c
index 481b9b6f4a15..3750596cc432 100644
--- a/net/sunrpc/xprtrdma/physical_ops.c
+++ b/net/sunrpc/xprtrdma/physical_ops.c
@@ -36,8 +36,11 @@ physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
__func__, PTR_ERR(mr));
return -ENOMEM;
}
-
ia->ri_dma_mr = mr;
+
+ rpcrdma_set_max_header_sizes(ia, cdata, min_t(unsigned int,
+ RPCRDMA_MAX_DATA_SEGS,
+ RPCRDMA_MAX_HDR_SEGS));
return 0;
}
@@ -47,7 +50,7 @@ static size_t
physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
{
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
- rpcrdma_max_segments(r_xprt));
+ RPCRDMA_MAX_HDR_SEGS);
}
static int
@@ -71,17 +74,6 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
return 1;
}
-/* Unmap a memory region, but leave it registered.
- */
-static int
-physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
-{
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-
- rpcrdma_unmap_one(ia->ri_device, seg);
- return 1;
-}
-
/* DMA unmap all memory regions that were mapped for "req".
*/
static void
@@ -94,6 +86,25 @@ physical_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
rpcrdma_unmap_one(device, &req->rl_segments[i++]);
}
+/* Use a slow, safe mechanism to invalidate all memory regions
+ * that were registered for "req".
+ *
+ * For physical memory registration, there is no good way to
+ * fence a single MR that has been advertised to the server. The
+ * client has already handed the server an R_key that cannot be
+ * invalidated and is shared by all MRs on this connection.
+ * Tearing down the PD might be the only safe choice, but it's
+ * not clear that a freshly acquired DMA R_key would be different
+ * than the one used by the PD that was just destroyed.
+ * FIXME.
+ */
+static void
+physical_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+ bool sync)
+{
+ physical_op_unmap_sync(r_xprt, req);
+}
+
static void
physical_op_destroy(struct rpcrdma_buffer *buf)
{
@@ -102,7 +113,7 @@ physical_op_destroy(struct rpcrdma_buffer *buf)
const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
.ro_map = physical_op_map,
.ro_unmap_sync = physical_op_unmap_sync,
- .ro_unmap = physical_op_unmap,
+ .ro_unmap_safe = physical_op_unmap_safe,
.ro_open = physical_op_open,
.ro_maxpages = physical_op_maxpages,
.ro_init = physical_op_init,
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 888823bb6dae..35a81096e83d 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -61,26 +61,84 @@ enum rpcrdma_chunktype {
rpcrdma_replych
};
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
static const char transfertypes[][12] = {
- "pure inline", /* no chunks */
- " read chunk", /* some argument via rdma read */
- "*read chunk", /* entire request via rdma read */
- "write chunk", /* some result via rdma write */
+ "inline", /* no chunks */
+ "read list", /* some argument via rdma read */
+ "*read list", /* entire request via rdma read */
+ "write list", /* some result via rdma write */
"reply chunk" /* entire reply via rdma write */
};
-#endif
+
+/* Returns size of largest RPC-over-RDMA header in a Call message
+ *
+ * The largest Call header contains a full-size Read list and a
+ * minimal Reply chunk.
+ */
+static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
+{
+ unsigned int size;
+
+ /* Fixed header fields and list discriminators */
+ size = RPCRDMA_HDRLEN_MIN;
+
+ /* Maximum Read list size */
+ maxsegs += 2; /* segment for head and tail buffers */
+ size = maxsegs * sizeof(struct rpcrdma_read_chunk);
+
+ /* Minimal Read chunk size */
+ size += sizeof(__be32); /* segment count */
+ size += sizeof(struct rpcrdma_segment);
+ size += sizeof(__be32); /* list discriminator */
+
+ dprintk("RPC: %s: max call header size = %u\n",
+ __func__, size);
+ return size;
+}
+
+/* Returns size of largest RPC-over-RDMA header in a Reply message
+ *
+ * There is only one Write list or one Reply chunk per Reply
+ * message. The larger list is the Write list.
+ */
+static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
+{
+ unsigned int size;
+
+ /* Fixed header fields and list discriminators */
+ size = RPCRDMA_HDRLEN_MIN;
+
+ /* Maximum Write list size */
+ maxsegs += 2; /* segment for head and tail buffers */
+ size = sizeof(__be32); /* segment count */
+ size += maxsegs * sizeof(struct rpcrdma_segment);
+ size += sizeof(__be32); /* list discriminator */
+
+ dprintk("RPC: %s: max reply header size = %u\n",
+ __func__, size);
+ return size;
+}
+
+void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *ia,
+ struct rpcrdma_create_data_internal *cdata,
+ unsigned int maxsegs)
+{
+ ia->ri_max_inline_write = cdata->inline_wsize -
+ rpcrdma_max_call_header_size(maxsegs);
+ ia->ri_max_inline_read = cdata->inline_rsize -
+ rpcrdma_max_reply_header_size(maxsegs);
+}
/* The client can send a request inline as long as the RPCRDMA header
* plus the RPC call fit under the transport's inline limit. If the
* combined call message size exceeds that limit, the client must use
* the read chunk list for this operation.
*/
-static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
+static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
+ struct rpc_rqst *rqst)
{
- unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
+ return rqst->rq_snd_buf.len <= ia->ri_max_inline_write;
}
/* The client can't know how large the actual reply will be. Thus it
@@ -89,11 +147,12 @@ static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
* limit, the client must provide a write list or a reply chunk for
* this request.
*/
-static bool rpcrdma_results_inline(struct rpc_rqst *rqst)
+static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
+ struct rpc_rqst *rqst)
{
- unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst);
+ return rqst->rq_rcv_buf.buflen <= ia->ri_max_inline_read;
}
static int
@@ -226,23 +285,16 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
return n;
}
-/*
- * Create read/write chunk lists, and reply chunks, for RDMA
- *
- * Assume check against THRESHOLD has been done, and chunks are required.
- * Assume only encoding one list entry for read|write chunks. The NFSv3
- * protocol is simple enough to allow this as it only has a single "bulk
- * result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
- * RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
- *
- * When used for a single reply chunk (which is a special write
- * chunk used for the entire reply, rather than just the data), it
- * is used primarily for READDIR and READLINK which would otherwise
- * be severely size-limited by a small rdma inline read max. The server
- * response will come back as an RDMA Write, followed by a message
- * of type RDMA_NOMSG carrying the xid and length. As a result, reply
- * chunks do not provide data alignment, however they do not require
- * "fixup" (moving the response to the upper layer buffer) either.
+static inline __be32 *
+xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr_seg *seg)
+{
+ *iptr++ = cpu_to_be32(seg->mr_rkey);
+ *iptr++ = cpu_to_be32(seg->mr_len);
+ return xdr_encode_hyper(iptr, seg->mr_base);
+}
+
+/* XDR-encode the Read list. Supports encoding a list of read
+ * segments that belong to a single read chunk.
*
* Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
*
@@ -250,131 +302,190 @@ rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
* N elements, position P (same P for all chunks of same arg!):
* 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
*
+ * Returns a pointer to the XDR word in the RDMA header following
+ * the end of the Read list, or an error pointer.
+ */
+static __be32 *
+rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
+ struct rpcrdma_req *req, struct rpc_rqst *rqst,
+ __be32 *iptr, enum rpcrdma_chunktype rtype)
+{
+ struct rpcrdma_mr_seg *seg = req->rl_nextseg;
+ unsigned int pos;
+ int n, nsegs;
+
+ if (rtype == rpcrdma_noch) {
+ *iptr++ = xdr_zero; /* item not present */
+ return iptr;
+ }
+
+ pos = rqst->rq_snd_buf.head[0].iov_len;
+ if (rtype == rpcrdma_areadch)
+ pos = 0;
+ nsegs = rpcrdma_convert_iovs(&rqst->rq_snd_buf, pos, rtype, seg,
+ RPCRDMA_MAX_SEGS - req->rl_nchunks);
+ if (nsegs < 0)
+ return ERR_PTR(nsegs);
+
+ do {
+ n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, false);
+ if (n <= 0)
+ return ERR_PTR(n);
+
+ *iptr++ = xdr_one; /* item present */
+
+ /* All read segments in this chunk
+ * have the same "position".
+ */
+ *iptr++ = cpu_to_be32(pos);
+ iptr = xdr_encode_rdma_segment(iptr, seg);
+
+ dprintk("RPC: %5u %s: read segment pos %u "
+ "%d@0x%016llx:0x%08x (%s)\n",
+ rqst->rq_task->tk_pid, __func__, pos,
+ seg->mr_len, (unsigned long long)seg->mr_base,
+ seg->mr_rkey, n < nsegs ? "more" : "last");
+
+ r_xprt->rx_stats.read_chunk_count++;
+ req->rl_nchunks++;
+ seg += n;
+ nsegs -= n;
+ } while (nsegs);
+ req->rl_nextseg = seg;
+
+ /* Finish Read list */
+ *iptr++ = xdr_zero; /* Next item not present */
+ return iptr;
+}
+
+/* XDR-encode the Write list. Supports encoding a list containing
+ * one array of plain segments that belong to a single write chunk.
+ *
+ * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
+ *
* Write chunklist (a list of (one) counted array):
* N elements:
* 1 - N - HLOO - HLOO - ... - HLOO - 0
*
+ * Returns a pointer to the XDR word in the RDMA header following
+ * the end of the Write list, or an error pointer.
+ */
+static __be32 *
+rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
+ struct rpc_rqst *rqst, __be32 *iptr,
+ enum rpcrdma_chunktype wtype)
+{
+ struct rpcrdma_mr_seg *seg = req->rl_nextseg;
+ int n, nsegs, nchunks;
+ __be32 *segcount;
+
+ if (wtype != rpcrdma_writech) {
+ *iptr++ = xdr_zero; /* no Write list present */
+ return iptr;
+ }
+
+ nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf,
+ rqst->rq_rcv_buf.head[0].iov_len,
+ wtype, seg,
+ RPCRDMA_MAX_SEGS - req->rl_nchunks);
+ if (nsegs < 0)
+ return ERR_PTR(nsegs);
+
+ *iptr++ = xdr_one; /* Write list present */
+ segcount = iptr++; /* save location of segment count */
+
+ nchunks = 0;
+ do {
+ n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, true);
+ if (n <= 0)
+ return ERR_PTR(n);
+
+ iptr = xdr_encode_rdma_segment(iptr, seg);
+
+ dprintk("RPC: %5u %s: write segment "
+ "%d@0x016%llx:0x%08x (%s)\n",
+ rqst->rq_task->tk_pid, __func__,
+ seg->mr_len, (unsigned long long)seg->mr_base,
+ seg->mr_rkey, n < nsegs ? "more" : "last");
+
+ r_xprt->rx_stats.write_chunk_count++;
+ r_xprt->rx_stats.total_rdma_request += seg->mr_len;
+ req->rl_nchunks++;
+ nchunks++;
+ seg += n;
+ nsegs -= n;
+ } while (nsegs);
+ req->rl_nextseg = seg;
+
+ /* Update count of segments in this Write chunk */
+ *segcount = cpu_to_be32(nchunks);
+
+ /* Finish Write list */
+ *iptr++ = xdr_zero; /* Next item not present */
+ return iptr;
+}
+
+/* XDR-encode the Reply chunk. Supports encoding an array of plain
+ * segments that belong to a single write (reply) chunk.
+ *
+ * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
+ *
* Reply chunk (a counted array):
* N elements:
* 1 - N - HLOO - HLOO - ... - HLOO
*
- * Returns positive RPC/RDMA header size, or negative errno.
+ * Returns a pointer to the XDR word in the RDMA header following
+ * the end of the Reply chunk, or an error pointer.
*/
-
-static ssize_t
-rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
- struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
+static __be32 *
+rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
+ struct rpcrdma_req *req, struct rpc_rqst *rqst,
+ __be32 *iptr, enum rpcrdma_chunktype wtype)
{
- struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
- struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
- int n, nsegs, nchunks = 0;
- unsigned int pos;
- struct rpcrdma_mr_seg *seg = req->rl_segments;
- struct rpcrdma_read_chunk *cur_rchunk = NULL;
- struct rpcrdma_write_array *warray = NULL;
- struct rpcrdma_write_chunk *cur_wchunk = NULL;
- __be32 *iptr = headerp->rm_body.rm_chunks;
- int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool);
-
- if (type == rpcrdma_readch || type == rpcrdma_areadch) {
- /* a read chunk - server will RDMA Read our memory */
- cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
- } else {
- /* a write or reply chunk - server will RDMA Write our memory */
- *iptr++ = xdr_zero; /* encode a NULL read chunk list */
- if (type == rpcrdma_replych)
- *iptr++ = xdr_zero; /* a NULL write chunk list */
- warray = (struct rpcrdma_write_array *) iptr;
- cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
- }
+ struct rpcrdma_mr_seg *seg = req->rl_nextseg;
+ int n, nsegs, nchunks;
+ __be32 *segcount;
- if (type == rpcrdma_replych || type == rpcrdma_areadch)
- pos = 0;
- else
- pos = target->head[0].iov_len;
+ if (wtype != rpcrdma_replych) {
+ *iptr++ = xdr_zero; /* no Reply chunk present */
+ return iptr;
+ }
- nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
+ nsegs = rpcrdma_convert_iovs(&rqst->rq_rcv_buf, 0, wtype, seg,
+ RPCRDMA_MAX_SEGS - req->rl_nchunks);
if (nsegs < 0)
- return nsegs;
+ return ERR_PTR(nsegs);
- map = r_xprt->rx_ia.ri_ops->ro_map;
+ *iptr++ = xdr_one; /* Reply chunk present */
+ segcount = iptr++; /* save location of segment count */
+
+ nchunks = 0;
do {
- n = map(r_xprt, seg, nsegs, cur_wchunk != NULL);
+ n = r_xprt->rx_ia.ri_ops->ro_map(r_xprt, seg, nsegs, true);
if (n <= 0)
- goto out;
- if (cur_rchunk) { /* read */
- cur_rchunk->rc_discrim = xdr_one;
- /* all read chunks have the same "position" */
- cur_rchunk->rc_position = cpu_to_be32(pos);
- cur_rchunk->rc_target.rs_handle =
- cpu_to_be32(seg->mr_rkey);
- cur_rchunk->rc_target.rs_length =
- cpu_to_be32(seg->mr_len);
- xdr_encode_hyper(
- (__be32 *)&cur_rchunk->rc_target.rs_offset,
- seg->mr_base);
- dprintk("RPC: %s: read chunk "
- "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
- seg->mr_len, (unsigned long long)seg->mr_base,
- seg->mr_rkey, pos, n < nsegs ? "more" : "last");
- cur_rchunk++;
- r_xprt->rx_stats.read_chunk_count++;
- } else { /* write/reply */
- cur_wchunk->wc_target.rs_handle =
- cpu_to_be32(seg->mr_rkey);
- cur_wchunk->wc_target.rs_length =
- cpu_to_be32(seg->mr_len);
- xdr_encode_hyper(
- (__be32 *)&cur_wchunk->wc_target.rs_offset,
- seg->mr_base);
- dprintk("RPC: %s: %s chunk "
- "elem %d@0x%llx:0x%x (%s)\n", __func__,
- (type == rpcrdma_replych) ? "reply" : "write",
- seg->mr_len, (unsigned long long)seg->mr_base,
- seg->mr_rkey, n < nsegs ? "more" : "last");
- cur_wchunk++;
- if (type == rpcrdma_replych)
- r_xprt->rx_stats.reply_chunk_count++;
- else
- r_xprt->rx_stats.write_chunk_count++;
- r_xprt->rx_stats.total_rdma_request += seg->mr_len;
- }
+ return ERR_PTR(n);
+
+ iptr = xdr_encode_rdma_segment(iptr, seg);
+
+ dprintk("RPC: %5u %s: reply segment "
+ "%d@0x%016llx:0x%08x (%s)\n",
+ rqst->rq_task->tk_pid, __func__,
+ seg->mr_len, (unsigned long long)seg->mr_base,
+ seg->mr_rkey, n < nsegs ? "more" : "last");
+
+ r_xprt->rx_stats.reply_chunk_count++;
+ r_xprt->rx_stats.total_rdma_request += seg->mr_len;
+ req->rl_nchunks++;
nchunks++;
seg += n;
nsegs -= n;
} while (nsegs);
+ req->rl_nextseg = seg;
- /* success. all failures return above */
- req->rl_nchunks = nchunks;
-
- /*
- * finish off header. If write, marshal discrim and nchunks.
- */
- if (cur_rchunk) {
- iptr = (__be32 *) cur_rchunk;
- *iptr++ = xdr_zero; /* finish the read chunk list */
- *iptr++ = xdr_zero; /* encode a NULL write chunk list */
- *iptr++ = xdr_zero; /* encode a NULL reply chunk */
- } else {
- warray->wc_discrim = xdr_one;
- warray->wc_nchunks = cpu_to_be32(nchunks);
- iptr = (__be32 *) cur_wchunk;
- if (type == rpcrdma_writech) {
- *iptr++ = xdr_zero; /* finish the write chunk list */
- *iptr++ = xdr_zero; /* encode a NULL reply chunk */
- }
- }
-
- /*
- * Return header size.
- */
- return (unsigned char *)iptr - (unsigned char *)headerp;
+ /* Update count of segments in the Reply chunk */
+ *segcount = cpu_to_be32(nchunks);
-out:
- for (pos = 0; nchunks--;)
- pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
- &req->rl_segments[pos]);
- return n;
+ return iptr;
}
/*
@@ -440,13 +551,10 @@ static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
* Marshal a request: the primary job of this routine is to choose
* the transfer modes. See comments below.
*
- * Uses multiple RDMA IOVs for a request:
- * [0] -- RPC RDMA header, which uses memory from the *start* of the
- * preregistered buffer that already holds the RPC data in
- * its middle.
- * [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
- * [2] -- optional padding.
- * [3] -- if padded, header only in [1] and data here.
+ * Prepares up to two IOVs per Call message:
+ *
+ * [0] -- RPC RDMA header
+ * [1] -- the RPC header/data
*
* Returns zero on success, otherwise a negative errno.
*/
@@ -457,24 +565,17 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
struct rpc_xprt *xprt = rqst->rq_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
- char *base;
- size_t rpclen;
- ssize_t hdrlen;
enum rpcrdma_chunktype rtype, wtype;
struct rpcrdma_msg *headerp;
+ ssize_t hdrlen;
+ size_t rpclen;
+ __be32 *iptr;
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
return rpcrdma_bc_marshal_reply(rqst);
#endif
- /*
- * rpclen gets amount of data in first buffer, which is the
- * pre-registered buffer.
- */
- base = rqst->rq_svec[0].iov_base;
- rpclen = rqst->rq_svec[0].iov_len;
-
headerp = rdmab_to_msg(req->rl_rdmabuf);
/* don't byte-swap XID, it's already done in request */
headerp->rm_xid = rqst->rq_xid;
@@ -485,15 +586,16 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
/*
* Chunks needed for results?
*
- * o Read ops return data as write chunk(s), header as inline.
* o If the expected result is under the inline threshold, all ops
* return as inline.
+ * o Large read ops return data as write chunk(s), header as
+ * inline.
* o Large non-read ops return as a single reply chunk.
*/
- if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
- wtype = rpcrdma_writech;
- else if (rpcrdma_results_inline(rqst))
+ if (rpcrdma_results_inline(r_xprt, rqst))
wtype = rpcrdma_noch;
+ else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
+ wtype = rpcrdma_writech;
else
wtype = rpcrdma_replych;
@@ -511,10 +613,14 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
* that both has a data payload, and whose non-data arguments
* by themselves are larger than the inline threshold.
*/
- if (rpcrdma_args_inline(rqst)) {
+ if (rpcrdma_args_inline(r_xprt, rqst)) {
rtype = rpcrdma_noch;
+ rpcrdma_inline_pullup(rqst);
+ rpclen = rqst->rq_svec[0].iov_len;
} else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
rtype = rpcrdma_readch;
+ rpclen = rqst->rq_svec[0].iov_len;
+ rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
} else {
r_xprt->rx_stats.nomsg_call_count++;
headerp->rm_type = htonl(RDMA_NOMSG);
@@ -522,57 +628,50 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
rpclen = 0;
}
- /* The following simplification is not true forever */
- if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
- wtype = rpcrdma_noch;
- if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
- dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
- __func__);
- return -EIO;
- }
-
- hdrlen = RPCRDMA_HDRLEN_MIN;
-
- /*
- * Pull up any extra send data into the preregistered buffer.
- * When padding is in use and applies to the transfer, insert
- * it and change the message type.
+ /* This implementation supports the following combinations
+ * of chunk lists in one RPC-over-RDMA Call message:
+ *
+ * - Read list
+ * - Write list
+ * - Reply chunk
+ * - Read list + Reply chunk
+ *
+ * It might not yet support the following combinations:
+ *
+ * - Read list + Write list
+ *
+ * It does not support the following combinations:
+ *
+ * - Write list + Reply chunk
+ * - Read list + Write list + Reply chunk
+ *
+ * This implementation supports only a single chunk in each
+ * Read or Write list. Thus for example the client cannot
+ * send a Call message with a Position Zero Read chunk and a
+ * regular Read chunk at the same time.
*/
- if (rtype == rpcrdma_noch) {
-
- rpcrdma_inline_pullup(rqst);
-
- headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
- headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
- headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
- /* new length after pullup */
- rpclen = rqst->rq_svec[0].iov_len;
- } else if (rtype == rpcrdma_readch)
- rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
- if (rtype != rpcrdma_noch) {
- hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
- headerp, rtype);
- wtype = rtype; /* simplify dprintk */
-
- } else if (wtype != rpcrdma_noch) {
- hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
- headerp, wtype);
- }
- if (hdrlen < 0)
- return hdrlen;
+ req->rl_nchunks = 0;
+ req->rl_nextseg = req->rl_segments;
+ iptr = headerp->rm_body.rm_chunks;
+ iptr = rpcrdma_encode_read_list(r_xprt, req, rqst, iptr, rtype);
+ if (IS_ERR(iptr))
+ goto out_unmap;
+ iptr = rpcrdma_encode_write_list(r_xprt, req, rqst, iptr, wtype);
+ if (IS_ERR(iptr))
+ goto out_unmap;
+ iptr = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, iptr, wtype);
+ if (IS_ERR(iptr))
+ goto out_unmap;
+ hdrlen = (unsigned char *)iptr - (unsigned char *)headerp;
+
+ if (hdrlen + rpclen > RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
+ goto out_overflow;
+
+ dprintk("RPC: %5u %s: %s/%s: hdrlen %zd rpclen %zd\n",
+ rqst->rq_task->tk_pid, __func__,
+ transfertypes[rtype], transfertypes[wtype],
+ hdrlen, rpclen);
- dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd"
- " headerp 0x%p base 0x%p lkey 0x%x\n",
- __func__, transfertypes[wtype], hdrlen, rpclen,
- headerp, base, rdmab_lkey(req->rl_rdmabuf));
-
- /*
- * initialize send_iov's - normally only two: rdma chunk header and
- * single preregistered RPC header buffer, but if padding is present,
- * then use a preregistered (and zeroed) pad buffer between the RPC
- * header and any write data. In all non-rdma cases, any following
- * data has been copied into the RPC header buffer.
- */
req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
req->rl_send_iov[0].length = hdrlen;
req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
@@ -587,6 +686,18 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
req->rl_niovs = 2;
return 0;
+
+out_overflow:
+ pr_err("rpcrdma: send overflow: hdrlen %zd rpclen %zu %s/%s\n",
+ hdrlen, rpclen, transfertypes[rtype], transfertypes[wtype]);
+ /* Terminate this RPC. Chunks registered above will be
+ * released by xprt_release -> xprt_rmda_free .
+ */
+ return -EIO;
+
+out_unmap:
+ r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
+ return PTR_ERR(iptr);
}
/*
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index b1b009f10ea3..99d2e5b72726 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -73,6 +73,8 @@ static unsigned int xprt_rdma_memreg_strategy = RPCRDMA_FRMR;
static unsigned int min_slot_table_size = RPCRDMA_MIN_SLOT_TABLE;
static unsigned int max_slot_table_size = RPCRDMA_MAX_SLOT_TABLE;
+static unsigned int min_inline_size = RPCRDMA_MIN_INLINE;
+static unsigned int max_inline_size = RPCRDMA_MAX_INLINE;
static unsigned int zero;
static unsigned int max_padding = PAGE_SIZE;
static unsigned int min_memreg = RPCRDMA_BOUNCEBUFFERS;
@@ -96,6 +98,8 @@ static struct ctl_table xr_tunables_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
+ .extra1 = &min_inline_size,
+ .extra2 = &max_inline_size,
},
{
.procname = "rdma_max_inline_write",
@@ -103,6 +107,8 @@ static struct ctl_table xr_tunables_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
+ .extra1 = &min_inline_size,
+ .extra2 = &max_inline_size,
},
{
.procname = "rdma_inline_write_padding",
@@ -508,6 +514,7 @@ xprt_rdma_allocate(struct rpc_task *task, size_t size)
out:
dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req);
req->rl_connect_cookie = 0; /* our reserved value */
+ req->rl_task = task;
return req->rl_sendbuf->rg_base;
out_rdmabuf:
@@ -564,7 +571,6 @@ xprt_rdma_free(void *buffer)
struct rpcrdma_req *req;
struct rpcrdma_xprt *r_xprt;
struct rpcrdma_regbuf *rb;
- int i;
if (buffer == NULL)
return;
@@ -578,11 +584,8 @@ xprt_rdma_free(void *buffer)
dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
- for (i = 0; req->rl_nchunks;) {
- --req->rl_nchunks;
- i += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
- &req->rl_segments[i]);
- }
+ r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req,
+ !RPC_IS_ASYNC(req->rl_task));
rpcrdma_buffer_put(req);
}
@@ -707,6 +710,7 @@ static struct rpc_xprt_ops xprt_rdma_procs = {
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
.bc_setup = xprt_rdma_bc_setup,
.bc_up = xprt_rdma_bc_up,
+ .bc_maxpayload = xprt_rdma_bc_maxpayload,
.bc_free_rqst = xprt_rdma_bc_free_rqst,
.bc_destroy = xprt_rdma_bc_destroy,
#endif
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index f5ed9f982cd7..b044d98a1370 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -203,15 +203,6 @@ out_fail:
goto out_schedule;
}
-static void
-rpcrdma_flush_cqs(struct rpcrdma_ep *ep)
-{
- struct ib_wc wc;
-
- while (ib_poll_cq(ep->rep_attr.recv_cq, 1, &wc) > 0)
- rpcrdma_receive_wc(NULL, &wc);
-}
-
static int
rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
@@ -374,23 +365,6 @@ out:
}
/*
- * Drain any cq, prior to teardown.
- */
-static void
-rpcrdma_clean_cq(struct ib_cq *cq)
-{
- struct ib_wc wc;
- int count = 0;
-
- while (1 == ib_poll_cq(cq, 1, &wc))
- ++count;
-
- if (count)
- dprintk("RPC: %s: flushed %d events (last 0x%x)\n",
- __func__, count, wc.opcode);
-}
-
-/*
* Exported functions.
*/
@@ -459,7 +433,6 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg)
dprintk("RPC: %s: memory registration strategy is '%s'\n",
__func__, ia->ri_ops->ro_displayname);
- rwlock_init(&ia->ri_qplock);
return 0;
out3:
@@ -515,7 +488,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
__func__);
return -ENOMEM;
}
- max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS;
+ max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1;
/* check provider's send/recv wr limits */
if (cdata->max_requests > max_qp_wr)
@@ -526,11 +499,13 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
ep->rep_attr.srq = NULL;
ep->rep_attr.cap.max_send_wr = cdata->max_requests;
ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
+ ep->rep_attr.cap.max_send_wr += 1; /* drain cqe */
rc = ia->ri_ops->ro_open(ia, ep, cdata);
if (rc)
return rc;
ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
+ ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */
ep->rep_attr.cap.max_send_sge = RPCRDMA_MAX_IOVS;
ep->rep_attr.cap.max_recv_sge = 1;
ep->rep_attr.cap.max_inline_data = 0;
@@ -578,6 +553,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
ep->rep_attr.recv_cq = recvcq;
/* Initialize cma parameters */
+ memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
/* RPC/RDMA does not use private data */
ep->rep_remote_cma.private_data = NULL;
@@ -591,7 +567,16 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
ep->rep_remote_cma.responder_resources =
ia->ri_device->attrs.max_qp_rd_atom;
- ep->rep_remote_cma.retry_count = 7;
+ /* Limit transport retries so client can detect server
+ * GID changes quickly. RPC layer handles re-establishing
+ * transport connection and retransmission.
+ */
+ ep->rep_remote_cma.retry_count = 6;
+
+ /* RPC-over-RDMA handles its own flow control. In addition,
+ * make all RNR NAKs visible so we know that RPC-over-RDMA
+ * flow control is working correctly (no NAKs should be seen).
+ */
ep->rep_remote_cma.flow_control = 0;
ep->rep_remote_cma.rnr_retry_count = 0;
@@ -622,13 +607,8 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
cancel_delayed_work_sync(&ep->rep_connect_worker);
- if (ia->ri_id->qp)
- rpcrdma_ep_disconnect(ep, ia);
-
- rpcrdma_clean_cq(ep->rep_attr.recv_cq);
- rpcrdma_clean_cq(ep->rep_attr.send_cq);
-
if (ia->ri_id->qp) {
+ rpcrdma_ep_disconnect(ep, ia);
rdma_destroy_qp(ia->ri_id);
ia->ri_id->qp = NULL;
}
@@ -659,7 +639,6 @@ retry:
dprintk("RPC: %s: reconnecting...\n", __func__);
rpcrdma_ep_disconnect(ep, ia);
- rpcrdma_flush_cqs(ep);
xprt = container_of(ia, struct rpcrdma_xprt, rx_ia);
id = rpcrdma_create_id(xprt, ia,
@@ -692,10 +671,8 @@ retry:
goto out;
}
- write_lock(&ia->ri_qplock);
old = ia->ri_id;
ia->ri_id = id;
- write_unlock(&ia->ri_qplock);
rdma_destroy_qp(old);
rpcrdma_destroy_id(old);
@@ -785,7 +762,6 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
int rc;
- rpcrdma_flush_cqs(ep);
rc = rdma_disconnect(ia->ri_id);
if (!rc) {
/* returns without wait if not connected */
@@ -797,6 +773,8 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc);
ep->rep_connected = rc;
}
+
+ ib_drain_qp(ia->ri_id->qp);
}
struct rpcrdma_req *
@@ -1271,25 +1249,3 @@ out_rc:
rpcrdma_recv_buffer_put(rep);
return rc;
}
-
-/* How many chunk list items fit within our inline buffers?
- */
-unsigned int
-rpcrdma_max_segments(struct rpcrdma_xprt *r_xprt)
-{
- struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
- int bytes, segments;
-
- bytes = min_t(unsigned int, cdata->inline_wsize, cdata->inline_rsize);
- bytes -= RPCRDMA_HDRLEN_MIN;
- if (bytes < sizeof(struct rpcrdma_segment) * 2) {
- pr_warn("RPC: %s: inline threshold too small\n",
- __func__);
- return 0;
- }
-
- segments = 1 << (fls(bytes / sizeof(struct rpcrdma_segment)) - 1);
- dprintk("RPC: %s: max chunk list size = %d segments\n",
- __func__, segments);
- return segments;
-}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 2ebc743cb96f..95cdc66225ee 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -65,7 +65,6 @@
*/
struct rpcrdma_ia {
const struct rpcrdma_memreg_ops *ri_ops;
- rwlock_t ri_qplock;
struct ib_device *ri_device;
struct rdma_cm_id *ri_id;
struct ib_pd *ri_pd;
@@ -73,6 +72,8 @@ struct rpcrdma_ia {
struct completion ri_done;
int ri_async_rc;
unsigned int ri_max_frmr_depth;
+ unsigned int ri_max_inline_write;
+ unsigned int ri_max_inline_read;
struct ib_qp_attr ri_qp_attr;
struct ib_qp_init_attr ri_qp_init_attr;
};
@@ -144,6 +145,26 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb)
#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
+/* To ensure a transport can always make forward progress,
+ * the number of RDMA segments allowed in header chunk lists
+ * is capped at 8. This prevents less-capable devices and
+ * memory registrations from overrunning the Send buffer
+ * while building chunk lists.
+ *
+ * Elements of the Read list take up more room than the
+ * Write list or Reply chunk. 8 read segments means the Read
+ * list (or Write list or Reply chunk) cannot consume more
+ * than
+ *
+ * ((8 + 2) * read segment size) + 1 XDR words, or 244 bytes.
+ *
+ * And the fixed part of the header is another 24 bytes.
+ *
+ * The smallest inline threshold is 1024 bytes, ensuring that
+ * at least 750 bytes are available for RPC messages.
+ */
+#define RPCRDMA_MAX_HDR_SEGS (8)
+
/*
* struct rpcrdma_rep -- this structure encapsulates state required to recv
* and complete a reply, asychronously. It needs several pieces of
@@ -162,7 +183,9 @@ rdmab_to_msg(struct rpcrdma_regbuf *rb)
*/
#define RPCRDMA_MAX_DATA_SEGS ((1 * 1024 * 1024) / PAGE_SIZE)
-#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 2) /* head+tail = 2 */
+
+/* data segments + head/tail for Call + head/tail for Reply */
+#define RPCRDMA_MAX_SEGS (RPCRDMA_MAX_DATA_SEGS + 4)
struct rpcrdma_buffer;
@@ -198,14 +221,13 @@ enum rpcrdma_frmr_state {
};
struct rpcrdma_frmr {
- struct scatterlist *sg;
- int sg_nents;
+ struct scatterlist *fr_sg;
+ int fr_nents;
+ enum dma_data_direction fr_dir;
struct ib_mr *fr_mr;
struct ib_cqe fr_cqe;
enum rpcrdma_frmr_state fr_state;
struct completion fr_linv_done;
- struct work_struct fr_work;
- struct rpcrdma_xprt *fr_xprt;
union {
struct ib_reg_wr fr_regwr;
struct ib_send_wr fr_invwr;
@@ -222,6 +244,8 @@ struct rpcrdma_mw {
struct rpcrdma_fmr fmr;
struct rpcrdma_frmr frmr;
};
+ struct work_struct mw_work;
+ struct rpcrdma_xprt *mw_xprt;
struct list_head mw_list;
struct list_head mw_all;
};
@@ -270,12 +294,14 @@ struct rpcrdma_req {
unsigned int rl_niovs;
unsigned int rl_nchunks;
unsigned int rl_connect_cookie;
+ struct rpc_task *rl_task;
struct rpcrdma_buffer *rl_buffer;
struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS];
struct rpcrdma_regbuf *rl_rdmabuf;
struct rpcrdma_regbuf *rl_sendbuf;
struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
+ struct rpcrdma_mr_seg *rl_nextseg;
struct ib_cqe rl_cqe;
struct list_head rl_all;
@@ -372,8 +398,8 @@ struct rpcrdma_memreg_ops {
struct rpcrdma_mr_seg *, int, bool);
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
struct rpcrdma_req *);
- int (*ro_unmap)(struct rpcrdma_xprt *,
- struct rpcrdma_mr_seg *);
+ void (*ro_unmap_safe)(struct rpcrdma_xprt *,
+ struct rpcrdma_req *, bool);
int (*ro_open)(struct rpcrdma_ia *,
struct rpcrdma_ep *,
struct rpcrdma_create_data_internal *);
@@ -456,7 +482,6 @@ struct rpcrdma_regbuf *rpcrdma_alloc_regbuf(struct rpcrdma_ia *,
void rpcrdma_free_regbuf(struct rpcrdma_ia *,
struct rpcrdma_regbuf *);
-unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *);
int rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *, unsigned int);
int frwr_alloc_recovery_wq(void);
@@ -519,6 +544,9 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
*/
int rpcrdma_marshal_req(struct rpc_rqst *);
+void rpcrdma_set_max_header_sizes(struct rpcrdma_ia *,
+ struct rpcrdma_create_data_internal *,
+ unsigned int);
/* RPC/RDMA module init - xprtrdma/transport.c
*/
@@ -534,6 +562,7 @@ void xprt_rdma_cleanup(void);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
int xprt_rdma_bc_setup(struct rpc_xprt *, unsigned int);
int xprt_rdma_bc_up(struct svc_serv *, struct net *);
+size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *);
int rpcrdma_bc_post_recv(struct rpcrdma_xprt *, unsigned int);
void rpcrdma_bc_receive_call(struct rpcrdma_xprt *, struct rpcrdma_rep *);
int rpcrdma_bc_marshal_reply(struct rpc_rqst *);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index b90c5397b5e1..2d3e0c42361e 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1364,6 +1364,11 @@ static int xs_tcp_bc_up(struct svc_serv *serv, struct net *net)
return ret;
return 0;
}
+
+static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
+{
+ return PAGE_SIZE;
+}
#else
static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
struct xdr_skb_reader *desc)
@@ -2661,6 +2666,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
#ifdef CONFIG_SUNRPC_BACKCHANNEL
.bc_setup = xprt_setup_bc,
.bc_up = xs_tcp_bc_up,
+ .bc_maxpayload = xs_tcp_bc_maxpayload,
.bc_free_rqst = xprt_free_bc_rqst,
.bc_destroy = xprt_destroy_bc,
#endif
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index b2ab2a92a375..0f82314621f2 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -7,6 +7,7 @@ quote := "
squote := '
empty :=
space := $(empty) $(empty)
+space_escape := _-_SPACE_-_
###
# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
@@ -226,10 +227,10 @@ objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
# See Documentation/kbuild/makefiles.txt for more info
ifneq ($(KBUILD_NOCMDDEP),1)
-# Check if both arguments has same arguments. Result is empty string if equal.
-# User may override this check using make KBUILD_NOCMDDEP=1
-arg-check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \
- $(filter-out $(cmd_$@), $(cmd_$(1))) )
+# Check if both arguments are the same including their order. Result is empty
+# string if equal. User may override this check using make KBUILD_NOCMDDEP=1
+arg-check = $(filter-out $(subst $(space),$(space_escape),$(strip $(cmd_$@))), \
+ $(subst $(space),$(space_escape),$(strip $(cmd_$1))))
else
arg-check = $(if $(strip $(cmd_$@)),,1)
endif
@@ -256,10 +257,42 @@ if_changed = $(if $(strip $(any-prereq) $(arg-check)), \
# Execute the command and also postprocess generated .d dependencies file.
if_changed_dep = $(if $(strip $(any-prereq) $(arg-check) ), \
@set -e; \
+ $(cmd_and_fixdep), @:)
+
+ifndef CONFIG_TRIM_UNUSED_KSYMS
+
+cmd_and_fixdep = \
$(echo-cmd) $(cmd_$(1)); \
scripts/basic/fixdep $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp;\
rm -f $(depfile); \
- mv -f $(dot-target).tmp $(dot-target).cmd, @:)
+ mv -f $(dot-target).tmp $(dot-target).cmd;
+
+else
+
+# Filter out exported kernel symbol names from the preprocessor output.
+# See also __KSYM_DEPS__ in include/linux/export.h.
+# We disable the depfile generation here, so as not to overwrite the existing
+# depfile while fixdep is parsing it.
+flags_nodeps = $(filter-out -Wp$(comma)-M%, $($(1)))
+ksym_dep_filter = \
+ case "$(1)" in \
+ cc_*_c|cpp_i_c) \
+ $(CPP) $(call flags_nodeps,c_flags) -D__KSYM_DEPS__ $< ;; \
+ as_*_S|cpp_s_S) \
+ $(CPP) $(call flags_nodeps,a_flags) -D__KSYM_DEPS__ $< ;; \
+ boot*|build*|*cpp_lds_S|dtc|host*|vdso*) : ;; \
+ *) echo "Don't know how to preprocess $(1)" >&2; false ;; \
+ esac | tr ";" "\n" | sed -rn 's/^.*=== __KSYM_(.*) ===.*$$/KSYM_\1/p'
+
+cmd_and_fixdep = \
+ $(echo-cmd) $(cmd_$(1)); \
+ $(ksym_dep_filter) | \
+ scripts/basic/fixdep -e $(depfile) $@ '$(make-cmd)' \
+ > $(dot-target).tmp; \
+ rm -f $(depfile); \
+ mv -f $(dot-target).tmp $(dot-target).cmd;
+
+endif
# Usage: $(call if_changed_rule,foo)
# Will check if $(cmd_foo) or any of the prerequisites changed,
@@ -341,8 +374,6 @@ endif
#
###############################################################################
#
-space_escape := %%%SPACE%%%
-#
define config_filename
ifneq ($$(CONFIG_$(1)),"")
$(1)_FILENAME := $$(subst \\,\,$$(subst \$$(quote),$$(quote),$$(subst $$(space_escape),\$$(space),$$(patsubst "%",%,$$(subst $$(space),$$(space_escape),$$(CONFIG_$(1)))))))
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index e1bc1907090e..0d1ca5bf42fb 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -152,11 +152,11 @@ cmd_cc_s_c = $(CC) $(c_flags) $(DISABLE_LTO) -fverbose-asm -S -o $@ $<
$(obj)/%.s: $(src)/%.c FORCE
$(call if_changed_dep,cc_s_c)
-quiet_cmd_cc_i_c = CPP $(quiet_modtag) $@
-cmd_cc_i_c = $(CPP) $(c_flags) -o $@ $<
+quiet_cmd_cpp_i_c = CPP $(quiet_modtag) $@
+cmd_cpp_i_c = $(CPP) $(c_flags) -o $@ $<
$(obj)/%.i: $(src)/%.c FORCE
- $(call if_changed_dep,cc_i_c)
+ $(call if_changed_dep,cpp_i_c)
cmd_gensymtypes = \
$(CPP) -D__GENKSYMS__ $(c_flags) $< | \
@@ -266,26 +266,24 @@ endif # CONFIG_STACK_VALIDATION
define rule_cc_o_c
$(call echo-cmd,checksrc) $(cmd_checksrc) \
- $(call echo-cmd,cc_o_c) $(cmd_cc_o_c); \
+ $(call cmd_and_fixdep,cc_o_c) \
$(cmd_modversions) \
- $(cmd_objtool) \
- $(call echo-cmd,record_mcount) \
- $(cmd_record_mcount) \
- scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,cc_o_c)' > \
- $(dot-target).tmp; \
- rm -f $(depfile); \
- mv -f $(dot-target).tmp $(dot-target).cmd
+ $(cmd_objtool) \
+ $(call echo-cmd,record_mcount) $(cmd_record_mcount)
endef
define rule_as_o_S
- $(call echo-cmd,as_o_S) $(cmd_as_o_S); \
- $(cmd_objtool) \
- scripts/basic/fixdep $(depfile) $@ '$(call make-cmd,as_o_S)' > \
- $(dot-target).tmp; \
- rm -f $(depfile); \
- mv -f $(dot-target).tmp $(dot-target).cmd
+ $(call cmd_and_fixdep,as_o_S) \
+ $(cmd_objtool)
endef
+# List module undefined symbols (or empty line if not enabled)
+ifdef CONFIG_TRIM_UNUSED_KSYMS
+cmd_undef_syms = $(NM) $@ | sed -n 's/^ \+U //p' | xargs echo
+else
+cmd_undef_syms = echo
+endif
+
# Built-in and composite module parts
$(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_obj) FORCE
$(call cmd,force_checksrc)
@@ -296,7 +294,8 @@ $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_obj) FORCE
$(single-used-m): $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_obj) FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
- @{ echo $(@:.o=.ko); echo $@; } > $(MODVERDIR)/$(@F:.o=.mod)
+ @{ echo $(@:.o=.ko); echo $@; \
+ $(cmd_undef_syms); } > $(MODVERDIR)/$(@F:.o=.mod)
quiet_cmd_cc_lst_c = MKLST $@
cmd_cc_lst_c = $(CC) $(c_flags) -g -c -o $*.o $< && \
@@ -314,11 +313,11 @@ modkern_aflags := $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL)
$(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
$(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE)
-quiet_cmd_as_s_S = CPP $(quiet_modtag) $@
-cmd_as_s_S = $(CPP) $(a_flags) -o $@ $<
+quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@
+cmd_cpp_s_S = $(CPP) $(a_flags) -o $@ $<
$(obj)/%.s: $(src)/%.S FORCE
- $(call if_changed_dep,as_s_S)
+ $(call if_changed_dep,cpp_s_S)
quiet_cmd_as_o_S = AS $(quiet_modtag) $@
cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
@@ -426,7 +425,8 @@ $(call multi_depend, $(multi-used-y), .o, -objs -y)
$(multi-used-m): FORCE
$(call if_changed,link_multi-m)
- @{ echo $(@:.o=.ko); echo $(link_multi_deps); } > $(MODVERDIR)/$(@F:.o=.mod)
+ @{ echo $(@:.o=.ko); echo $(link_multi_deps); \
+ $(cmd_undef_syms); } > $(MODVERDIR)/$(@F:.o=.mod)
$(call multi_depend, $(multi-used-m), .o, -objs -y -m)
targets += $(multi-used-y) $(multi-used-m)
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index f9e47a70509c..53449a6ff6aa 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -24,6 +24,7 @@ warning-1 += $(call cc-option, -Wmissing-prototypes)
warning-1 += -Wold-style-definition
warning-1 += $(call cc-option, -Wmissing-include-dirs)
warning-1 += $(call cc-option, -Wunused-but-set-variable)
+warning-1 += $(call cc-option, -Wunused-const-variable)
warning-1 += $(call cc-disable-warning, missing-field-initializers)
warning-1 += $(call cc-disable-warning, sign-compare)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index ed1b7c4fb674..e7df0f5db7ec 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -96,10 +96,10 @@ obj-dirs := $(addprefix $(obj)/,$(obj-dirs))
# Note: Files that end up in two or more modules are compiled without the
# KBUILD_MODNAME definition. The reason is that any made-up name would
# differ in different configs.
-name-fix = $(subst $(comma),_,$(subst -,_,$1))
-basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))"
+name-fix = $(squote)$(quote)$(subst $(comma),_,$(subst -,_,$1))$(quote)$(squote)
+basename_flags = -DKBUILD_BASENAME=$(call name-fix,$(basetarget))
modname_flags = $(if $(filter 1,$(words $(modname))),\
- -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
+ -DKBUILD_MODNAME=$(call name-fix,$(modname)))
orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \
$(ccflags-y) $(CFLAGS_$(basetarget).o)
@@ -162,7 +162,7 @@ endif
c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(__c_flags) $(modkern_cflags) \
- -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags)
+ $(basename_flags) $(modname_flags)
a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(__a_flags) $(modkern_aflags)
diff --git a/scripts/adjust_autoksyms.sh b/scripts/adjust_autoksyms.sh
new file mode 100755
index 000000000000..8dc1918b6783
--- /dev/null
+++ b/scripts/adjust_autoksyms.sh
@@ -0,0 +1,101 @@
+#!/bin/sh
+
+# Script to create/update include/generated/autoksyms.h and dependency files
+#
+# Copyright: (C) 2016 Linaro Limited
+# Created by: Nicolas Pitre, January 2016
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+# Create/update the include/generated/autoksyms.h file from the list
+# of all module's needed symbols as recorded on the third line of
+# .tmp_versions/*.mod files.
+#
+# For each symbol being added or removed, the corresponding dependency
+# file's timestamp is updated to force a rebuild of the affected source
+# file. All arguments passed to this script are assumed to be a command
+# to be exec'd to trigger a rebuild of those files.
+
+set -e
+
+cur_ksyms_file="include/generated/autoksyms.h"
+new_ksyms_file="include/generated/autoksyms.h.tmpnew"
+
+info() {
+ if [ "$quiet" != "silent_" ]; then
+ printf " %-7s %s\n" "$1" "$2"
+ fi
+}
+
+info "CHK" "$cur_ksyms_file"
+
+# Use "make V=1" to debug this script.
+case "$KBUILD_VERBOSE" in
+*1*)
+ set -x
+ ;;
+esac
+
+# We need access to CONFIG_ symbols
+case "${KCONFIG_CONFIG}" in
+*/*)
+ . "${KCONFIG_CONFIG}"
+ ;;
+*)
+ # Force using a file from the current directory
+ . "./${KCONFIG_CONFIG}"
+esac
+
+# In case it doesn't exist yet...
+if [ -e "$cur_ksyms_file" ]; then touch "$cur_ksyms_file"; fi
+
+# Generate a new ksym list file with symbols needed by the current
+# set of modules.
+cat > "$new_ksyms_file" << EOT
+/*
+ * Automatically generated file; DO NOT EDIT.
+ */
+
+EOT
+sed -ns -e '3{s/ /\n/g;/^$/!p;}' "$MODVERDIR"/*.mod | sort -u |
+while read sym; do
+ if [ -n "$CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX" ]; then
+ sym="${sym#_}"
+ fi
+ echo "#define __KSYM_${sym} 1"
+done >> "$new_ksyms_file"
+
+# Special case for modversions (see modpost.c)
+if [ -n "$CONFIG_MODVERSIONS" ]; then
+ echo "#define __KSYM_module_layout 1" >> "$new_ksyms_file"
+fi
+
+# Extract changes between old and new list and touch corresponding
+# dependency files.
+changed=$(
+count=0
+sort "$cur_ksyms_file" "$new_ksyms_file" | uniq -u |
+sed -n 's/^#define __KSYM_\(.*\) 1/\1/p' | tr "A-Z_" "a-z/" |
+while read sympath; do
+ if [ -z "$sympath" ]; then continue; fi
+ depfile="include/config/ksym/${sympath}.h"
+ mkdir -p "$(dirname "$depfile")"
+ touch "$depfile"
+ echo $((count += 1))
+done | tail -1 )
+changed=${changed:-0}
+
+if [ $changed -gt 0 ]; then
+ # Replace the old list with tne new one
+ old=$(grep -c "^#define __KSYM_" "$cur_ksyms_file" || true)
+ new=$(grep -c "^#define __KSYM_" "$new_ksyms_file" || true)
+ info "KSYMS" "symbols: before=$old, after=$new, changed=$changed"
+ info "UPD" "$cur_ksyms_file"
+ mv -f "$new_ksyms_file" "$cur_ksyms_file"
+ # Then trigger a rebuild of affected source files
+ exec $@
+else
+ rm -f "$new_ksyms_file"
+fi
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index caef815d1743..746ec1ece614 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -120,13 +120,15 @@
#define INT_NFIG ntohl(0x4e464947)
#define INT_FIG_ ntohl(0x4649475f)
+int insert_extra_deps;
char *target;
char *depfile;
char *cmdline;
static void usage(void)
{
- fprintf(stderr, "Usage: fixdep <depfile> <target> <cmdline>\n");
+ fprintf(stderr, "Usage: fixdep [-e] <depfile> <target> <cmdline>\n");
+ fprintf(stderr, " -e insert extra dependencies given on stdin\n");
exit(1);
}
@@ -138,6 +140,40 @@ static void print_cmdline(void)
printf("cmd_%s := %s\n\n", target, cmdline);
}
+/*
+ * Print out a dependency path from a symbol name
+ */
+static void print_config(const char *m, int slen)
+{
+ int c, i;
+
+ printf(" $(wildcard include/config/");
+ for (i = 0; i < slen; i++) {
+ c = m[i];
+ if (c == '_')
+ c = '/';
+ else
+ c = tolower(c);
+ putchar(c);
+ }
+ printf(".h) \\\n");
+}
+
+static void do_extra_deps(void)
+{
+ if (insert_extra_deps) {
+ char buf[80];
+ while(fgets(buf, sizeof(buf), stdin)) {
+ int len = strlen(buf);
+ if (len < 2 || buf[len-1] != '\n') {
+ fprintf(stderr, "fixdep: bad data on stdin\n");
+ exit(1);
+ }
+ print_config(buf, len-1);
+ }
+ }
+}
+
struct item {
struct item *next;
unsigned int len;
@@ -197,23 +233,12 @@ static void define_config(const char *name, int len, unsigned int hash)
static void use_config(const char *m, int slen)
{
unsigned int hash = strhash(m, slen);
- int c, i;
if (is_defined_config(m, slen, hash))
return;
define_config(m, slen, hash);
-
- printf(" $(wildcard include/config/");
- for (i = 0; i < slen; i++) {
- c = m[i];
- if (c == '_')
- c = '/';
- else
- c = tolower(c);
- putchar(c);
- }
- printf(".h) \\\n");
+ print_config(m, slen);
}
static void parse_config_file(const char *map, size_t len)
@@ -250,7 +275,7 @@ static void parse_config_file(const char *map, size_t len)
}
}
-/* test is s ends in sub */
+/* test if s ends in sub */
static int strrcmp(const char *s, const char *sub)
{
int slen = strlen(s);
@@ -333,6 +358,7 @@ static void parse_dep_file(void *map, size_t len)
/* Ignore certain dependencies */
if (strrcmp(s, "include/generated/autoconf.h") &&
+ strrcmp(s, "include/generated/autoksyms.h") &&
strrcmp(s, "arch/um/include/uml-config.h") &&
strrcmp(s, "include/linux/kconfig.h") &&
strrcmp(s, ".ver")) {
@@ -378,6 +404,8 @@ static void parse_dep_file(void *map, size_t len)
exit(1);
}
+ do_extra_deps();
+
printf("\n%s: $(deps_%s)\n\n", target, target);
printf("$(deps_%s):\n", target);
}
@@ -434,7 +462,10 @@ int main(int argc, char *argv[])
{
traps();
- if (argc != 4)
+ if (argc == 5 && !strcmp(argv[1], "-e")) {
+ insert_extra_deps = 1;
+ argv++;
+ } else if (argc != 4)
usage();
depfile = argv[1];
diff --git a/scripts/coccicheck b/scripts/coccicheck
index b2d758188f2f..dd85a455b2ba 100755
--- a/scripts/coccicheck
+++ b/scripts/coccicheck
@@ -98,7 +98,7 @@ run_cmd() {
}
kill_running() {
- for i in $(seq $(( NPROC - 1 )) ); do
+ for i in $(seq 0 $(( NPROC - 1 )) ); do
if [ $VERBOSE -eq 2 ] ; then
echo "Killing ${SPATCH_PID[$i]}"
fi
diff --git a/scripts/coccinelle/api/setup_timer.cocci b/scripts/coccinelle/api/setup_timer.cocci
index 8ee0ac30e547..eb6bd9e4ab1a 100644
--- a/scripts/coccinelle/api/setup_timer.cocci
+++ b/scripts/coccinelle/api/setup_timer.cocci
@@ -106,7 +106,7 @@ position j0, j1, j2;
@match_function_and_data_after_init_timer_context
depends on !patch &&
!match_immediate_function_data_after_init_timer_context &&
-(context || org || report)@
+ (context || org || report)@
expression a, b, e1, e2, e3, e4, e5;
position j0, j1, j2;
@@
@@ -127,7 +127,7 @@ position j0, j1, j2;
@r3_context depends on !patch &&
!match_immediate_function_data_after_init_timer_context &&
!match_function_and_data_after_init_timer_context &&
-(context || org || report)@
+ (context || org || report)@
expression c, e6, e7;
position r1.p;
position j0, j1;
diff --git a/scripts/coccinelle/misc/compare_const_fl.cocci b/scripts/coccinelle/misc/compare_const_fl.cocci
deleted file mode 100644
index b5d4bab60263..000000000000
--- a/scripts/coccinelle/misc/compare_const_fl.cocci
+++ /dev/null
@@ -1,171 +0,0 @@
-/// Move constants to the right of binary operators.
-//# Depends on personal taste in some cases.
-///
-// Confidence: Moderate
-// Copyright: (C) 2015 Copyright: (C) 2015 Julia Lawall, Inria. GPLv2.
-// URL: http://coccinelle.lip6.fr/
-// Options: --no-includes --include-headers
-
-virtual patch
-virtual context
-virtual org
-virtual report
-
-@r1 depends on patch && !context && !org && !report
- disable bitor_comm, neg_if_exp@
-constant c,c1;
-local idexpression i;
-expression e,e1,e2;
-binary operator b = {==,!=,&,|};
-type t;
-@@
-
-(
-c b (c1)
-|
-sizeof(t) b e1
-|
-sizeof e b e1
-|
-i b e1
-|
-c | e1 | e2 | ...
-|
-c | (e ? e1 : e2)
-|
-- c
-+ e
-b
-- e
-+ c
-)
-
-@r2 depends on patch && !context && !org && !report
- disable gtr_lss, gtr_lss_eq, not_int2@
-constant c,c1;
-expression e,e1,e2;
-binary operator b;
-binary operator b1 = {<,<=},b2 = {<,<=};
-binary operator b3 = {>,>=},b4 = {>,>=};
-local idexpression i;
-type t;
-@@
-
-(
-c b c1
-|
-sizeof(t) b e1
-|
-sizeof e b e1
-|
- (e1 b1 e) && (e b2 e2)
-|
- (e1 b3 e) && (e b4 e2)
-|
-i b e
-|
-- c < e
-+ e > c
-|
-- c <= e
-+ e >= c
-|
-- c > e
-+ e < c
-|
-- c >= e
-+ e <= c
-)
-
-// ----------------------------------------------------------------------------
-
-@r1_context depends on !patch && (context || org || report)
- disable bitor_comm, neg_if_exp exists@
-type t;
-binary operator b = {==,!=,&,|};
-constant c, c1;
-expression e, e1, e2;
-local idexpression i;
-position j0;
-@@
-
-(
-c b (c1)
-|
-sizeof(t) b e1
-|
-sizeof e b e1
-|
-i b e1
-|
-c | e1 | e2 | ...
-|
-c | (e ? e1 : e2)
-|
-* c@j0 b e
-)
-
-@r2_context depends on !patch && (context || org || report)
- disable gtr_lss, gtr_lss_eq, not_int2 exists@
-type t;
-binary operator b, b1 = {<,<=}, b2 = {<,<=}, b3 = {>,>=}, b4 = {>,>=};
-constant c, c1;
-expression e, e1, e2;
-local idexpression i;
-position j0;
-@@
-
-(
-c b c1
-|
-sizeof(t) b e1
-|
-sizeof e b e1
-|
- (e1 b1 e) && (e b2 e2)
-|
- (e1 b3 e) && (e b4 e2)
-|
-i b e
-|
-* c@j0 < e
-|
-* c@j0 <= e
-|
-* c@j0 > e
-|
-* c@j0 >= e
-)
-
-// ----------------------------------------------------------------------------
-
-@script:python r1_org depends on org@
-j0 << r1_context.j0;
-@@
-
-msg = "Move constant to right."
-coccilib.org.print_todo(j0[0], msg)
-
-@script:python r2_org depends on org@
-j0 << r2_context.j0;
-@@
-
-msg = "Move constant to right."
-coccilib.org.print_todo(j0[0], msg)
-
-// ----------------------------------------------------------------------------
-
-@script:python r1_report depends on report@
-j0 << r1_context.j0;
-@@
-
-msg = "Move constant to right."
-coccilib.report.print_report(j0[0], msg)
-
-@script:python r2_report depends on report@
-j0 << r2_context.j0;
-@@
-
-msg = "Move constant to right."
-coccilib.report.print_report(j0[0], msg)
-
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index dafaf96e0a34..06121ce524a7 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -873,5 +873,8 @@ int main(int argc, char **argv)
(double)nsyms / (double)HASH_BUCKETS);
}
+ if (dumpfile)
+ fclose(dumpfile);
+
return errors != 0;
}
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index dd243d2abd87..297b079ae4d9 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -375,7 +375,9 @@ load:
continue;
} else {
if (line[0] != '\r' && line[0] != '\n')
- conf_warning("unexpected data");
+ conf_warning("unexpected data: %.*s",
+ (int)strcspn(line, "\r\n"), line);
+
continue;
}
setsym:
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index 25cf0c2c0c79..2432298487fb 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -209,12 +209,26 @@ static void sym_set_all_changed(void)
static void sym_calc_visibility(struct symbol *sym)
{
struct property *prop;
+ struct symbol *choice_sym = NULL;
tristate tri;
/* any prompt visible? */
tri = no;
+
+ if (sym_is_choice_value(sym))
+ choice_sym = prop_get_symbol(sym_get_choice_prop(sym));
+
for_all_prompts(sym, prop) {
prop->visible.tri = expr_calc_value(prop->visible.expr);
+ /*
+ * Tristate choice_values with visibility 'mod' are
+ * not visible if the corresponding choice's value is
+ * 'yes'.
+ */
+ if (choice_sym && sym->type == S_TRISTATE &&
+ prop->visible.tri == mod && choice_sym->curr.tri == yes)
+ prop->visible.tri = no;
+
tri = EXPR_OR(tri, prop->visible.tri);
}
if (tri == mod && (sym->type != S_TRISTATE || modules_val == no))
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index c2c7389bfbab..71b4a8af9d4d 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -52,7 +52,7 @@ rpm-pkg rpm: FORCE
$(call cmd,src_tar,$(KERNELPATH),kernel.spec)
$(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
mv -f $(objtree)/.tmp_version $(objtree)/.version
- rpmbuild --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
+ rpmbuild $(RPMOPTS) --target $(UTS_MACHINE) -ta $(KERNELPATH).tar.gz
rm $(KERNELPATH).tar.gz kernel.spec
# binrpm-pkg
@@ -63,7 +63,7 @@ binrpm-pkg: FORCE
$(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
mv -f $(objtree)/.tmp_version $(objtree)/.version
- rpmbuild --define "_builddir $(objtree)" --target \
+ rpmbuild $(RPMOPTS) --define "_builddir $(objtree)" --target \
$(UTS_MACHINE) -bb $(objtree)/binkernel.spec
rm binkernel.spec
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index 6c3b038ef40d..86e56fef7473 100755
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -322,7 +322,10 @@ fi
# Build kernel header package
(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
-(cd $srctree; find arch/$SRCARCH/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
+if grep -q '^CONFIG_STACK_VALIDATION=y' $KCONFIG_CONFIG ; then
+ (cd $srctree; find tools/objtool -type f -executable) >> "$objtree/debian/hdrsrcfiles"
+fi
+(cd $srctree; find arch/*/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
(cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
(cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
diff --git a/scripts/package/mkspec b/scripts/package/mkspec
index b6de63cb3f23..57673bae5597 100755
--- a/scripts/package/mkspec
+++ b/scripts/package/mkspec
@@ -143,6 +143,11 @@ echo "if [ -x /sbin/new-kernel-pkg ]; then"
echo "new-kernel-pkg --remove $KERNELRELEASE --rminitrd --initrdfile=/boot/initramfs-$KERNELRELEASE.img"
echo "fi"
echo ""
+echo "%postun"
+echo "if [ -x /sbin/update-bootloader ]; then"
+echo "/sbin/update-bootloader --remove $KERNELRELEASE"
+echo "fi"
+echo ""
echo "%files"
echo '%defattr (-, root, root)'
echo "/lib/modules/$KERNELRELEASE"
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index ff2b8c3cf7a9..6777295f4b2b 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -3514,7 +3514,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
if (isp->smk_flags & SMK_INODE_CHANGED) {
isp->smk_flags &= ~SMK_INODE_CHANGED;
- rc = inode->i_op->setxattr(dp,
+ rc = inode->i_op->setxattr(dp, inode,
XATTR_NAME_SMACKTRANSMUTE,
TRANS_TRUE, TRANS_TRUE_SIZE,
0);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 9b756b1f3dc5..0309f2111c70 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -19,6 +19,9 @@
#include <linux/ratelimit.h>
#include <linux/workqueue.h>
#include <linux/string_helpers.h>
+#include <linux/task_work.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
#define YAMA_SCOPE_DISABLED 0
#define YAMA_SCOPE_RELATIONAL 1
@@ -42,20 +45,71 @@ static DEFINE_SPINLOCK(ptracer_relations_lock);
static void yama_relation_cleanup(struct work_struct *work);
static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
-static void report_access(const char *access, struct task_struct *target,
- struct task_struct *agent)
+struct access_report_info {
+ struct callback_head work;
+ const char *access;
+ struct task_struct *target;
+ struct task_struct *agent;
+};
+
+static void __report_access(struct callback_head *work)
{
+ struct access_report_info *info =
+ container_of(work, struct access_report_info, work);
char *target_cmd, *agent_cmd;
- target_cmd = kstrdup_quotable_cmdline(target, GFP_ATOMIC);
- agent_cmd = kstrdup_quotable_cmdline(agent, GFP_ATOMIC);
+ target_cmd = kstrdup_quotable_cmdline(info->target, GFP_KERNEL);
+ agent_cmd = kstrdup_quotable_cmdline(info->agent, GFP_KERNEL);
pr_notice_ratelimited(
"ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
- access, target_cmd, target->pid, agent_cmd, agent->pid);
+ info->access, target_cmd, info->target->pid, agent_cmd,
+ info->agent->pid);
kfree(agent_cmd);
kfree(target_cmd);
+
+ put_task_struct(info->agent);
+ put_task_struct(info->target);
+ kfree(info);
+}
+
+/* defers execution because cmdline access can sleep */
+static void report_access(const char *access, struct task_struct *target,
+ struct task_struct *agent)
+{
+ struct access_report_info *info;
+ char agent_comm[sizeof(agent->comm)];
+
+ assert_spin_locked(&target->alloc_lock); /* for target->comm */
+
+ if (current->flags & PF_KTHREAD) {
+ /* I don't think kthreads call task_work_run() before exiting.
+ * Imagine angry ranting about procfs here.
+ */
+ pr_notice_ratelimited(
+ "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n",
+ access, target->comm, target->pid,
+ get_task_comm(agent_comm, agent), agent->pid);
+ return;
+ }
+
+ info = kmalloc(sizeof(*info), GFP_ATOMIC);
+ if (!info)
+ return;
+ init_task_work(&info->work, __report_access);
+ get_task_struct(target);
+ get_task_struct(agent);
+ info->access = access;
+ info->target = target;
+ info->agent = agent;
+ if (task_work_add(current, &info->work, true) == 0)
+ return; /* success */
+
+ WARN(1, "report_access called from exiting task");
+ put_task_struct(target);
+ put_task_struct(agent);
+ kfree(info);
}
/**
@@ -351,8 +405,11 @@ int yama_ptrace_traceme(struct task_struct *parent)
break;
}
- if (rc)
+ if (rc) {
+ task_lock(current);
report_access("traceme", current, parent);
+ task_unlock(current);
+ }
return rc;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 002f153bc659..d53c25e7a1c1 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -335,6 +335,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
case 0x10ec0283:
case 0x10ec0286:
case 0x10ec0288:
+ case 0x10ec0295:
case 0x10ec0298:
alc_update_coef_idx(codec, 0x10, 1<<9, 0);
break;
@@ -907,6 +908,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
{ 0x10ec0298, 0x1028, 0, "ALC3266" },
{ 0x10ec0256, 0x1028, 0, "ALC3246" },
{ 0x10ec0225, 0x1028, 0, "ALC3253" },
+ { 0x10ec0295, 0x1028, 0, "ALC3254" },
{ 0x10ec0670, 0x1025, 0, "ALC669X" },
{ 0x10ec0676, 0x1025, 0, "ALC679X" },
{ 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -3697,6 +3699,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
alc_process_coef_fw(codec, coef0668);
break;
case 0x10ec0225:
+ case 0x10ec0295:
alc_process_coef_fw(codec, coef0225);
break;
}
@@ -3797,6 +3800,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
break;
case 0x10ec0225:
+ case 0x10ec0295:
alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
alc_process_coef_fw(codec, coef0225);
@@ -3854,6 +3858,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
switch (codec->core.vendor_id) {
case 0x10ec0225:
+ case 0x10ec0295:
alc_process_coef_fw(codec, coef0225);
break;
case 0x10ec0255:
@@ -3957,6 +3962,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
alc_process_coef_fw(codec, coef0688);
break;
case 0x10ec0225:
+ case 0x10ec0295:
alc_process_coef_fw(codec, coef0225);
break;
}
@@ -4038,6 +4044,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
alc_process_coef_fw(codec, coef0688);
break;
case 0x10ec0225:
+ case 0x10ec0295:
alc_process_coef_fw(codec, coef0225);
break;
}
@@ -4121,6 +4128,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
is_ctia = (val & 0x1c02) == 0x1c02;
break;
case 0x10ec0225:
+ case 0x10ec0295:
alc_process_coef_fw(codec, coef0225);
msleep(800);
val = alc_read_coef_idx(codec, 0x46);
@@ -5466,8 +5474,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
- SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5711,6 +5720,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x14, 0x90170110},
{0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x14, 0x90170130},
+ {0x21, 0x02211040}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60140},
{0x14, 0x90170110},
{0x21, 0x02211020}),
@@ -6033,6 +6045,7 @@ static int patch_alc269(struct hda_codec *codec)
alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
break;
case 0x10ec0225:
+ case 0x10ec0295:
spec->codec_variant = ALC269_TYPE_ALC225;
break;
case 0x10ec0234:
@@ -6979,6 +6992,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0292, "ALC292", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0293, "ALC293", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0294, "ALC294", patch_alc269),
+ HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index b3afae990e39..4d82a58ff6b0 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -43,6 +43,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_AK5386
select SND_SOC_ALC5623 if I2C
select SND_SOC_ALC5632 if I2C
+ select SND_SOC_BT_SCO
select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
select SND_SOC_CS35L32 if I2C
select SND_SOC_CS42L51_I2C if I2C
@@ -64,7 +65,6 @@ config SND_SOC_ALL_CODECS
select SND_SOC_DA732X if I2C
select SND_SOC_DA9055 if I2C
select SND_SOC_DMIC
- select SND_SOC_BT_SCO
select SND_SOC_ES8328_SPI if SPI_MASTER
select SND_SOC_ES8328_I2C if I2C
select SND_SOC_GTM601
@@ -79,6 +79,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MAX98090 if I2C
select SND_SOC_MAX98095 if I2C
select SND_SOC_MAX98357A if GPIOLIB
+ select SND_SOC_MAX98371 if I2C
select SND_SOC_MAX9867 if I2C
select SND_SOC_MAX98925 if I2C
select SND_SOC_MAX98926 if I2C
@@ -126,12 +127,14 @@ config SND_SOC_ALL_CODECS
select SND_SOC_TAS2552 if I2C
select SND_SOC_TAS5086 if I2C
select SND_SOC_TAS571X if I2C
+ select SND_SOC_TAS5720 if I2C
select SND_SOC_TFA9879 if I2C
select SND_SOC_TLV320AIC23_I2C if I2C
select SND_SOC_TLV320AIC23_SPI if SPI_MASTER
select SND_SOC_TLV320AIC26 if SPI_MASTER
select SND_SOC_TLV320AIC31XX if I2C
- select SND_SOC_TLV320AIC32X4 if I2C
+ select SND_SOC_TLV320AIC32X4_I2C if I2C
+ select SND_SOC_TLV320AIC32X4_SPI if SPI_MASTER
select SND_SOC_TLV320AIC3X if I2C
select SND_SOC_TPA6130A2 if I2C
select SND_SOC_TLV320DAC33 if I2C
@@ -367,6 +370,9 @@ config SND_SOC_ALC5623
config SND_SOC_ALC5632
tristate
+config SND_SOC_BT_SCO
+ tristate
+
config SND_SOC_CQ0093VC
tristate
@@ -473,9 +479,6 @@ config SND_SOC_DA732X
config SND_SOC_DA9055
tristate
-config SND_SOC_BT_SCO
- tristate
-
config SND_SOC_DMIC
tristate
@@ -529,6 +532,9 @@ config SND_SOC_MAX98095
config SND_SOC_MAX98357A
tristate
+config SND_SOC_MAX98371
+ tristate
+
config SND_SOC_MAX9867
tristate
@@ -748,9 +754,16 @@ config SND_SOC_TAS5086
depends on I2C
config SND_SOC_TAS571X
- tristate "Texas Instruments TAS5711/TAS5717/TAS5719 power amplifiers"
+ tristate "Texas Instruments TAS5711/TAS5717/TAS5719/TAS5721 power amplifiers"
depends on I2C
+config SND_SOC_TAS5720
+ tristate "Texas Instruments TAS5720 Mono Audio amplifier"
+ depends on I2C
+ help
+ Enable support for Texas Instruments TAS5720L/M high-efficiency mono
+ Class-D audio power amplifiers.
+
config SND_SOC_TFA9879
tristate "NXP Semiconductors TFA9879 amplifier"
depends on I2C
@@ -780,6 +793,16 @@ config SND_SOC_TLV320AIC31XX
config SND_SOC_TLV320AIC32X4
tristate
+config SND_SOC_TLV320AIC32X4_I2C
+ tristate
+ depends on I2C
+ select SND_SOC_TLV320AIC32X4
+
+config SND_SOC_TLV320AIC32X4_SPI
+ tristate
+ depends on SPI_MASTER
+ select SND_SOC_TLV320AIC32X4
+
config SND_SOC_TLV320AIC3X
tristate "Texas Instruments TLV320AIC3x CODECs"
depends on I2C
@@ -920,7 +943,8 @@ config SND_SOC_WM8955
tristate
config SND_SOC_WM8960
- tristate
+ tristate "Wolfson Microelectronics WM8960 CODEC"
+ depends on I2C
config SND_SOC_WM8961
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index b7b99416537f..0f548fd34ca3 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -32,6 +32,7 @@ snd-soc-ak4642-objs := ak4642.o
snd-soc-ak4671-objs := ak4671.o
snd-soc-ak5386-objs := ak5386.o
snd-soc-arizona-objs := arizona.o
+snd-soc-bt-sco-objs := bt-sco.o
snd-soc-cq93vc-objs := cq93vc.o
snd-soc-cs35l32-objs := cs35l32.o
snd-soc-cs42l51-objs := cs42l51.o
@@ -55,7 +56,6 @@ snd-soc-da7218-objs := da7218.o
snd-soc-da7219-objs := da7219.o da7219-aad.o
snd-soc-da732x-objs := da732x.o
snd-soc-da9055-objs := da9055.o
-snd-soc-bt-sco-objs := bt-sco.o
snd-soc-dmic-objs := dmic.o
snd-soc-es8328-objs := es8328.o
snd-soc-es8328-i2c-objs := es8328-i2c.o
@@ -74,6 +74,7 @@ snd-soc-max98088-objs := max98088.o
snd-soc-max98090-objs := max98090.o
snd-soc-max98095-objs := max98095.o
snd-soc-max98357a-objs := max98357a.o
+snd-soc-max98371-objs := max98371.o
snd-soc-max9867-objs := max9867.o
snd-soc-max98925-objs := max98925.o
snd-soc-max98926-objs := max98926.o
@@ -131,6 +132,7 @@ snd-soc-stac9766-objs := stac9766.o
snd-soc-sti-sas-objs := sti-sas.o
snd-soc-tas5086-objs := tas5086.o
snd-soc-tas571x-objs := tas571x.o
+snd-soc-tas5720-objs := tas5720.o
snd-soc-tfa9879-objs := tfa9879.o
snd-soc-tlv320aic23-objs := tlv320aic23.o
snd-soc-tlv320aic23-i2c-objs := tlv320aic23-i2c.o
@@ -138,6 +140,8 @@ snd-soc-tlv320aic23-spi-objs := tlv320aic23-spi.o
snd-soc-tlv320aic26-objs := tlv320aic26.o
snd-soc-tlv320aic31xx-objs := tlv320aic31xx.o
snd-soc-tlv320aic32x4-objs := tlv320aic32x4.o
+snd-soc-tlv320aic32x4-i2c-objs := tlv320aic32x4-i2c.o
+snd-soc-tlv320aic32x4-spi-objs := tlv320aic32x4-spi.o
snd-soc-tlv320aic3x-objs := tlv320aic3x.o
snd-soc-tlv320dac33-objs := tlv320dac33.o
snd-soc-ts3a227e-objs := ts3a227e.o
@@ -243,6 +247,7 @@ obj-$(CONFIG_SND_SOC_AK5386) += snd-soc-ak5386.o
obj-$(CONFIG_SND_SOC_ALC5623) += snd-soc-alc5623.o
obj-$(CONFIG_SND_SOC_ALC5632) += snd-soc-alc5632.o
obj-$(CONFIG_SND_SOC_ARIZONA) += snd-soc-arizona.o
+obj-$(CONFIG_SND_SOC_BT_SCO) += snd-soc-bt-sco.o
obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
obj-$(CONFIG_SND_SOC_CS35L32) += snd-soc-cs35l32.o
obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
@@ -266,7 +271,6 @@ obj-$(CONFIG_SND_SOC_DA7218) += snd-soc-da7218.o
obj-$(CONFIG_SND_SOC_DA7219) += snd-soc-da7219.o
obj-$(CONFIG_SND_SOC_DA732X) += snd-soc-da732x.o
obj-$(CONFIG_SND_SOC_DA9055) += snd-soc-da9055.o
-obj-$(CONFIG_SND_SOC_BT_SCO) += snd-soc-bt-sco.o
obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o
obj-$(CONFIG_SND_SOC_ES8328) += snd-soc-es8328.o
obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
@@ -339,6 +343,7 @@ obj-$(CONFIG_SND_SOC_STI_SAS) += snd-soc-sti-sas.o
obj-$(CONFIG_SND_SOC_TAS2552) += snd-soc-tas2552.o
obj-$(CONFIG_SND_SOC_TAS5086) += snd-soc-tas5086.o
obj-$(CONFIG_SND_SOC_TAS571X) += snd-soc-tas571x.o
+obj-$(CONFIG_SND_SOC_TAS5720) += snd-soc-tas5720.o
obj-$(CONFIG_SND_SOC_TFA9879) += snd-soc-tfa9879.o
obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
obj-$(CONFIG_SND_SOC_TLV320AIC23_I2C) += snd-soc-tlv320aic23-i2c.o
@@ -346,6 +351,8 @@ obj-$(CONFIG_SND_SOC_TLV320AIC23_SPI) += snd-soc-tlv320aic23-spi.o
obj-$(CONFIG_SND_SOC_TLV320AIC26) += snd-soc-tlv320aic26.o
obj-$(CONFIG_SND_SOC_TLV320AIC31XX) += snd-soc-tlv320aic31xx.o
obj-$(CONFIG_SND_SOC_TLV320AIC32X4) += snd-soc-tlv320aic32x4.o
+obj-$(CONFIG_SND_SOC_TLV320AIC32X4_I2C) += snd-soc-tlv320aic32x4-i2c.o
+obj-$(CONFIG_SND_SOC_TLV320AIC32X4_SPI) += snd-soc-tlv320aic32x4-spi.o
obj-$(CONFIG_SND_SOC_TLV320AIC3X) += snd-soc-tlv320aic3x.o
obj-$(CONFIG_SND_SOC_TLV320DAC33) += snd-soc-tlv320dac33.o
obj-$(CONFIG_SND_SOC_TS3A227E) += snd-soc-ts3a227e.o
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 1ee8506c06c7..4d8b9e49e8d6 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -560,6 +560,7 @@ static const struct regmap_config ak4642_regmap = {
.max_register = FIL1_3,
.reg_defaults = ak4642_reg,
.num_reg_defaults = NUM_AK4642_REG_DEFAULTS,
+ .cache_type = REGCACHE_RBTREE,
};
static const struct regmap_config ak4643_regmap = {
@@ -568,6 +569,7 @@ static const struct regmap_config ak4643_regmap = {
.max_register = SPK_MS,
.reg_defaults = ak4643_reg,
.num_reg_defaults = ARRAY_SIZE(ak4643_reg),
+ .cache_type = REGCACHE_RBTREE,
};
static const struct regmap_config ak4648_regmap = {
@@ -576,6 +578,7 @@ static const struct regmap_config ak4648_regmap = {
.max_register = EQ_FBEQE,
.reg_defaults = ak4648_reg,
.num_reg_defaults = ARRAY_SIZE(ak4648_reg),
+ .cache_type = REGCACHE_RBTREE,
};
static const struct ak4642_drvdata ak4642_drvdata = {
diff --git a/sound/soc/codecs/max98371.c b/sound/soc/codecs/max98371.c
new file mode 100644
index 000000000000..cf0a39bb631a
--- /dev/null
+++ b/sound/soc/codecs/max98371.c
@@ -0,0 +1,441 @@
+/*
+ * max98371.c -- ALSA SoC Stereo MAX98371 driver
+ *
+ * Copyright 2015-16 Maxim Integrated Products
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include "max98371.h"
+
+static const char *const monomix_text[] = {
+ "Left", "Right", "LeftRightDiv2",
+};
+
+static const char *const hpf_cutoff_txt[] = {
+ "Disable", "DC Block", "50Hz",
+ "100Hz", "200Hz", "400Hz", "800Hz",
+};
+
+static SOC_ENUM_SINGLE_DECL(max98371_monomix, MAX98371_MONOMIX_CFG, 0,
+ monomix_text);
+
+static SOC_ENUM_SINGLE_DECL(max98371_hpf_cutoff, MAX98371_HPF, 0,
+ hpf_cutoff_txt);
+
+static const DECLARE_TLV_DB_RANGE(max98371_dht_min_gain,
+ 0, 1, TLV_DB_SCALE_ITEM(537, 66, 0),
+ 2, 3, TLV_DB_SCALE_ITEM(677, 82, 0),
+ 4, 5, TLV_DB_SCALE_ITEM(852, 104, 0),
+ 6, 7, TLV_DB_SCALE_ITEM(1072, 131, 0),
+ 8, 9, TLV_DB_SCALE_ITEM(1350, 165, 0),
+ 10, 11, TLV_DB_SCALE_ITEM(1699, 101, 0),
+);
+
+static const DECLARE_TLV_DB_RANGE(max98371_dht_max_gain,
+ 0, 1, TLV_DB_SCALE_ITEM(537, 66, 0),
+ 2, 3, TLV_DB_SCALE_ITEM(677, 82, 0),
+ 4, 5, TLV_DB_SCALE_ITEM(852, 104, 0),
+ 6, 7, TLV_DB_SCALE_ITEM(1072, 131, 0),
+ 8, 9, TLV_DB_SCALE_ITEM(1350, 165, 0),
+ 10, 11, TLV_DB_SCALE_ITEM(1699, 208, 0),
+);
+
+static const DECLARE_TLV_DB_RANGE(max98371_dht_rot_gain,
+ 0, 1, TLV_DB_SCALE_ITEM(-50, -50, 0),
+ 2, 6, TLV_DB_SCALE_ITEM(-100, -100, 0),
+ 7, 8, TLV_DB_SCALE_ITEM(-800, -200, 0),
+ 9, 11, TLV_DB_SCALE_ITEM(-1200, -300, 0),
+ 12, 13, TLV_DB_SCALE_ITEM(-2000, -200, 0),
+ 14, 15, TLV_DB_SCALE_ITEM(-2500, -500, 0),
+);
+
+static const struct reg_default max98371_reg[] = {
+ { 0x01, 0x00 },
+ { 0x02, 0x00 },
+ { 0x03, 0x00 },
+ { 0x04, 0x00 },
+ { 0x05, 0x00 },
+ { 0x06, 0x00 },
+ { 0x07, 0x00 },
+ { 0x08, 0x00 },
+ { 0x09, 0x00 },
+ { 0x0A, 0x00 },
+ { 0x10, 0x06 },
+ { 0x11, 0x08 },
+ { 0x14, 0x80 },
+ { 0x15, 0x00 },
+ { 0x16, 0x00 },
+ { 0x18, 0x00 },
+ { 0x19, 0x00 },
+ { 0x1C, 0x00 },
+ { 0x1D, 0x00 },
+ { 0x1E, 0x00 },
+ { 0x1F, 0x00 },
+ { 0x20, 0x00 },
+ { 0x21, 0x00 },
+ { 0x22, 0x00 },
+ { 0x23, 0x00 },
+ { 0x24, 0x00 },
+ { 0x25, 0x00 },
+ { 0x26, 0x00 },
+ { 0x27, 0x00 },
+ { 0x28, 0x00 },
+ { 0x29, 0x00 },
+ { 0x2A, 0x00 },
+ { 0x2B, 0x00 },
+ { 0x2C, 0x00 },
+ { 0x2D, 0x00 },
+ { 0x2E, 0x0B },
+ { 0x31, 0x00 },
+ { 0x32, 0x18 },
+ { 0x33, 0x00 },
+ { 0x34, 0x00 },
+ { 0x36, 0x00 },
+ { 0x37, 0x00 },
+ { 0x38, 0x00 },
+ { 0x39, 0x00 },
+ { 0x3A, 0x00 },
+ { 0x3B, 0x00 },
+ { 0x3C, 0x00 },
+ { 0x3D, 0x00 },
+ { 0x3E, 0x00 },
+ { 0x3F, 0x00 },
+ { 0x40, 0x00 },
+ { 0x41, 0x00 },
+ { 0x42, 0x00 },
+ { 0x43, 0x00 },
+ { 0x4A, 0x00 },
+ { 0x4B, 0x00 },
+ { 0x4C, 0x00 },
+ { 0x4D, 0x00 },
+ { 0x4E, 0x00 },
+ { 0x50, 0x00 },
+ { 0x51, 0x00 },
+ { 0x55, 0x00 },
+ { 0x58, 0x00 },
+ { 0x59, 0x00 },
+ { 0x5C, 0x00 },
+ { 0xFF, 0x43 },
+};
+
+static bool max98371_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98371_IRQ_CLEAR1:
+ case MAX98371_IRQ_CLEAR2:
+ case MAX98371_IRQ_CLEAR3:
+ case MAX98371_VERSION:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max98371_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98371_SOFT_RESET:
+ return false;
+ default:
+ return true;
+ }
+};
+
+static const DECLARE_TLV_DB_RANGE(max98371_gain_tlv,
+ 0, 7, TLV_DB_SCALE_ITEM(0, 50, 0),
+ 8, 10, TLV_DB_SCALE_ITEM(400, 100, 0)
+);
+
+static const DECLARE_TLV_DB_RANGE(max98371_noload_gain_tlv,
+ 0, 11, TLV_DB_SCALE_ITEM(950, 100, 0),
+);
+
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -6300, 50, 1);
+
+static const struct snd_kcontrol_new max98371_snd_controls[] = {
+ SOC_SINGLE_TLV("Speaker Volume", MAX98371_GAIN,
+ MAX98371_GAIN_SHIFT, (1<<MAX98371_GAIN_WIDTH)-1, 0,
+ max98371_gain_tlv),
+ SOC_SINGLE_TLV("Digital Volume", MAX98371_DIGITAL_GAIN, 0,
+ (1<<MAX98371_DIGITAL_GAIN_WIDTH)-1, 1, digital_tlv),
+ SOC_SINGLE_TLV("Speaker DHT Max Volume", MAX98371_GAIN,
+ 0, (1<<MAX98371_DHT_MAX_WIDTH)-1, 0,
+ max98371_dht_max_gain),
+ SOC_SINGLE_TLV("Speaker DHT Min Volume", MAX98371_DHT_GAIN,
+ 0, (1<<MAX98371_DHT_GAIN_WIDTH)-1, 0,
+ max98371_dht_min_gain),
+ SOC_SINGLE_TLV("Speaker DHT Rotation Volume", MAX98371_DHT_GAIN,
+ 0, (1<<MAX98371_DHT_ROT_WIDTH)-1, 0,
+ max98371_dht_rot_gain),
+ SOC_SINGLE("DHT Attack Step", MAX98371_DHT, MAX98371_DHT_STEP, 3, 0),
+ SOC_SINGLE("DHT Attack Rate", MAX98371_DHT, 0, 7, 0),
+ SOC_ENUM("Monomix Select", max98371_monomix),
+ SOC_ENUM("HPF Cutoff", max98371_hpf_cutoff),
+};
+
+static int max98371_dai_set_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct max98371_priv *max98371 = snd_soc_codec_get_drvdata(codec);
+ unsigned int val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ dev_err(codec->dev, "DAI clock mode unsupported");
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ val |= 0;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ val |= MAX98371_DAI_RIGHT;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ val |= MAX98371_DAI_LEFT;
+ break;
+ default:
+ dev_err(codec->dev, "DAI wrong mode unsupported");
+ return -EINVAL;
+ }
+ regmap_update_bits(max98371->regmap, MAX98371_FMT,
+ MAX98371_FMT_MODE_MASK, val);
+ return 0;
+}
+
+static int max98371_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98371_priv *max98371 = snd_soc_codec_get_drvdata(codec);
+ int blr_clk_ratio, ch_size, channels = params_channels(params);
+ int rate = params_rate(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S8:
+ regmap_update_bits(max98371->regmap, MAX98371_FMT,
+ MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_16);
+ ch_size = 8;
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ regmap_update_bits(max98371->regmap, MAX98371_FMT,
+ MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_16);
+ ch_size = 16;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ regmap_update_bits(max98371->regmap, MAX98371_FMT,
+ MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_32);
+ ch_size = 24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ regmap_update_bits(max98371->regmap, MAX98371_FMT,
+ MAX98371_FMT_MASK, MAX98371_DAI_CHANSZ_32);
+ ch_size = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* BCLK/LRCLK ratio calculation */
+ blr_clk_ratio = channels * ch_size;
+ switch (blr_clk_ratio) {
+ case 32:
+ regmap_update_bits(max98371->regmap,
+ MAX98371_DAI_CLK,
+ MAX98371_DAI_BSEL_MASK, MAX98371_DAI_BSEL_32);
+ break;
+ case 48:
+ regmap_update_bits(max98371->regmap,
+ MAX98371_DAI_CLK,
+ MAX98371_DAI_BSEL_MASK, MAX98371_DAI_BSEL_48);
+ break;
+ case 64:
+ regmap_update_bits(max98371->regmap,
+ MAX98371_DAI_CLK,
+ MAX98371_DAI_BSEL_MASK, MAX98371_DAI_BSEL_64);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (rate) {
+ case 32000:
+ regmap_update_bits(max98371->regmap,
+ MAX98371_SPK_SR,
+ MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_32);
+ break;
+ case 44100:
+ regmap_update_bits(max98371->regmap,
+ MAX98371_SPK_SR,
+ MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_44);
+ break;
+ case 48000:
+ regmap_update_bits(max98371->regmap,
+ MAX98371_SPK_SR,
+ MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_48);
+ break;
+ case 88200:
+ regmap_update_bits(max98371->regmap,
+ MAX98371_SPK_SR,
+ MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_88);
+ break;
+ case 96000:
+ regmap_update_bits(max98371->regmap,
+ MAX98371_SPK_SR,
+ MAX98371_SPK_SR_MASK, MAX98371_SPK_SR_96);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* enabling both the RX channels*/
+ regmap_update_bits(max98371->regmap, MAX98371_MONOMIX_SRC,
+ MAX98371_MONOMIX_SRC_MASK, MONOMIX_RX_0_1);
+ regmap_update_bits(max98371->regmap, MAX98371_DAI_CHANNEL,
+ MAX98371_CHANNEL_MASK, MAX98371_CHANNEL_MASK);
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget max98371_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("DAC", NULL, MAX98371_SPK_ENABLE, 0, 0),
+ SND_SOC_DAPM_SUPPLY("Global Enable", MAX98371_GLOBAL_ENABLE,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("SPK_OUT"),
+};
+
+static const struct snd_soc_dapm_route max98371_audio_map[] = {
+ {"DAC", NULL, "HiFi Playback"},
+ {"SPK_OUT", NULL, "DAC"},
+ {"SPK_OUT", NULL, "Global Enable"},
+};
+
+#define MAX98371_RATES SNDRV_PCM_RATE_8000_48000
+#define MAX98371_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \
+ SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE)
+
+static const struct snd_soc_dai_ops max98371_dai_ops = {
+ .set_fmt = max98371_dai_set_fmt,
+ .hw_params = max98371_dai_hw_params,
+};
+
+static struct snd_soc_dai_driver max98371_dai[] = {
+ {
+ .name = "max98371-aif1",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = MAX98371_FORMATS,
+ },
+ .ops = &max98371_dai_ops,
+ }
+};
+
+static const struct snd_soc_codec_driver max98371_codec = {
+ .controls = max98371_snd_controls,
+ .num_controls = ARRAY_SIZE(max98371_snd_controls),
+ .dapm_routes = max98371_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98371_audio_map),
+ .dapm_widgets = max98371_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98371_dapm_widgets),
+};
+
+static const struct regmap_config max98371_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX98371_VERSION,
+ .reg_defaults = max98371_reg,
+ .num_reg_defaults = ARRAY_SIZE(max98371_reg),
+ .volatile_reg = max98371_volatile_register,
+ .readable_reg = max98371_readable_register,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int max98371_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct max98371_priv *max98371;
+ int ret, reg;
+
+ max98371 = devm_kzalloc(&i2c->dev,
+ sizeof(*max98371), GFP_KERNEL);
+ if (!max98371)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, max98371);
+ max98371->regmap = devm_regmap_init_i2c(i2c, &max98371_regmap);
+ if (IS_ERR(max98371->regmap)) {
+ ret = PTR_ERR(max98371->regmap);
+ dev_err(&i2c->dev,
+ "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(max98371->regmap, MAX98371_VERSION, &reg);
+ if (ret < 0) {
+ dev_info(&i2c->dev, "device error %d\n", ret);
+ return ret;
+ }
+ dev_info(&i2c->dev, "device version %x\n", reg);
+
+ ret = snd_soc_register_codec(&i2c->dev, &max98371_codec,
+ max98371_dai, ARRAY_SIZE(max98371_dai));
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+static int max98371_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id max98371_i2c_id[] = {
+ { "max98371", 0 },
+};
+
+MODULE_DEVICE_TABLE(i2c, max98371_i2c_id);
+
+static const struct of_device_id max98371_of_match[] = {
+ { .compatible = "maxim,max98371", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max98371_of_match);
+
+static struct i2c_driver max98371_i2c_driver = {
+ .driver = {
+ .name = "max98371",
+ .owner = THIS_MODULE,
+ .pm = NULL,
+ .of_match_table = of_match_ptr(max98371_of_match),
+ },
+ .probe = max98371_i2c_probe,
+ .remove = max98371_i2c_remove,
+ .id_table = max98371_i2c_id,
+};
+
+module_i2c_driver(max98371_i2c_driver);
+
+MODULE_AUTHOR("anish kumar <yesanishhere@gmail.com>");
+MODULE_DESCRIPTION("ALSA SoC MAX98371 driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98371.h b/sound/soc/codecs/max98371.h
new file mode 100644
index 000000000000..9f6330964d98
--- /dev/null
+++ b/sound/soc/codecs/max98371.h
@@ -0,0 +1,67 @@
+/*
+ * max98371.h -- MAX98371 ALSA SoC Audio driver
+ *
+ * Copyright 2011-2012 Maxim Integrated Products
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MAX98371_H
+#define _MAX98371_H
+
+#define MAX98371_IRQ_CLEAR1 0x01
+#define MAX98371_IRQ_CLEAR2 0x02
+#define MAX98371_IRQ_CLEAR3 0x03
+#define MAX98371_DAI_CLK 0x10
+#define MAX98371_DAI_BSEL_MASK 0xF
+#define MAX98371_DAI_BSEL_32 2
+#define MAX98371_DAI_BSEL_48 3
+#define MAX98371_DAI_BSEL_64 4
+#define MAX98371_SPK_SR 0x11
+#define MAX98371_SPK_SR_MASK 0xF
+#define MAX98371_SPK_SR_32 6
+#define MAX98371_SPK_SR_44 7
+#define MAX98371_SPK_SR_48 8
+#define MAX98371_SPK_SR_88 10
+#define MAX98371_SPK_SR_96 11
+#define MAX98371_DAI_CHANNEL 0x15
+#define MAX98371_CHANNEL_MASK 0x3
+#define MAX98371_MONOMIX_SRC 0x18
+#define MAX98371_MONOMIX_CFG 0x19
+#define MAX98371_HPF 0x1C
+#define MAX98371_MONOMIX_SRC_MASK 0xFF
+#define MONOMIX_RX_0_1 ((0x1)<<(4))
+#define M98371_DAI_CHANNEL_I2S 0x3
+#define MAX98371_DIGITAL_GAIN 0x2D
+#define MAX98371_DIGITAL_GAIN_WIDTH 0x7
+#define MAX98371_GAIN 0x2E
+#define MAX98371_GAIN_SHIFT 0x4
+#define MAX98371_GAIN_WIDTH 0x4
+#define MAX98371_DHT_MAX_WIDTH 4
+#define MAX98371_FMT 0x14
+#define MAX98371_CHANSZ_WIDTH 6
+#define MAX98371_FMT_MASK ((0x3)<<(MAX98371_CHANSZ_WIDTH))
+#define MAX98371_FMT_MODE_MASK ((0x7)<<(3))
+#define MAX98371_DAI_LEFT ((0x1)<<(3))
+#define MAX98371_DAI_RIGHT ((0x2)<<(3))
+#define MAX98371_DAI_CHANSZ_16 ((1)<<(MAX98371_CHANSZ_WIDTH))
+#define MAX98371_DAI_CHANSZ_24 ((2)<<(MAX98371_CHANSZ_WIDTH))
+#define MAX98371_DAI_CHANSZ_32 ((3)<<(MAX98371_CHANSZ_WIDTH))
+#define MAX98371_DHT 0x32
+#define MAX98371_DHT_STEP 0x3
+#define MAX98371_DHT_GAIN 0x31
+#define MAX98371_DHT_GAIN_WIDTH 0x4
+#define MAX98371_DHT_ROT_WIDTH 0x4
+#define MAX98371_SPK_ENABLE 0x4A
+#define MAX98371_GLOBAL_ENABLE 0x50
+#define MAX98371_SOFT_RESET 0x51
+#define MAX98371_VERSION 0xFF
+
+
+struct max98371_priv {
+ struct regmap *regmap;
+ struct snd_soc_codec *codec;
+};
+#endif
diff --git a/sound/soc/codecs/rt298.c b/sound/soc/codecs/rt298.c
index a1aaffc20862..f80cfe4d2ef2 100644
--- a/sound/soc/codecs/rt298.c
+++ b/sound/soc/codecs/rt298.c
@@ -276,6 +276,8 @@ static int rt298_jack_detect(struct rt298_priv *rt298, bool *hp, bool *mic)
} else {
*mic = false;
regmap_write(rt298->regmap, RT298_SET_MIC1, 0x20);
+ regmap_update_bits(rt298->regmap,
+ RT298_CBJ_CTRL1, 0x0400, 0x0000);
}
} else {
regmap_read(rt298->regmap, RT298_GET_HP_SENSE, &buf);
@@ -482,6 +484,26 @@ static int rt298_adc_event(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec,
VERB_CMD(AC_VERB_SET_AMP_GAIN_MUTE, nid, 0),
0x7080, 0x7000);
+ /* If MCLK doesn't exist, reset AD filter */
+ if (!(snd_soc_read(codec, RT298_VAD_CTRL) & 0x200)) {
+ pr_info("NO MCLK\n");
+ switch (nid) {
+ case RT298_ADC_IN1:
+ snd_soc_update_bits(codec,
+ RT298_D_FILTER_CTRL, 0x2, 0x2);
+ mdelay(10);
+ snd_soc_update_bits(codec,
+ RT298_D_FILTER_CTRL, 0x2, 0x0);
+ break;
+ case RT298_ADC_IN2:
+ snd_soc_update_bits(codec,
+ RT298_D_FILTER_CTRL, 0x4, 0x4);
+ mdelay(10);
+ snd_soc_update_bits(codec,
+ RT298_D_FILTER_CTRL, 0x4, 0x0);
+ break;
+ }
+ }
break;
case SND_SOC_DAPM_PRE_PMD:
snd_soc_update_bits(codec,
@@ -520,30 +542,12 @@ static int rt298_mic1_event(struct snd_soc_dapm_widget *w,
return 0;
}
-static int rt298_vref_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec,
- RT298_CBJ_CTRL1, 0x0400, 0x0000);
- mdelay(50);
- break;
- default:
- return 0;
- }
-
- return 0;
-}
-
static const struct snd_soc_dapm_widget rt298_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY_S("HV", 1, RT298_POWER_CTRL1,
12, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY("VREF", RT298_POWER_CTRL1,
- 0, 1, rt298_vref_event, SND_SOC_DAPM_PRE_PMU),
+ 0, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("BG_MBIAS", 1, RT298_POWER_CTRL2,
1, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT298_POWER_CTRL2,
@@ -934,18 +938,9 @@ static int rt298_set_bias_level(struct snd_soc_codec *codec,
}
break;
- case SND_SOC_BIAS_ON:
- mdelay(30);
- snd_soc_update_bits(codec,
- RT298_CBJ_CTRL1, 0x0400, 0x0400);
-
- break;
-
case SND_SOC_BIAS_STANDBY:
snd_soc_write(codec,
RT298_SET_AUDIO_POWER, AC_PWRST_D3);
- snd_soc_update_bits(codec,
- RT298_CBJ_CTRL1, 0x0400, 0x0000);
break;
default:
diff --git a/sound/soc/codecs/rt298.h b/sound/soc/codecs/rt298.h
index d66f8847b676..3638f3d61209 100644
--- a/sound/soc/codecs/rt298.h
+++ b/sound/soc/codecs/rt298.h
@@ -137,6 +137,7 @@
#define RT298_A_BIAS_CTRL2 0x02
#define RT298_POWER_CTRL1 0x03
#define RT298_A_BIAS_CTRL3 0x04
+#define RT298_D_FILTER_CTRL 0x05
#define RT298_POWER_CTRL2 0x08
#define RT298_I2S_CTRL1 0x09
#define RT298_I2S_CTRL2 0x0a
@@ -148,6 +149,7 @@
#define RT298_IRQ_CTRL 0x33
#define RT298_WIND_FILTER_CTRL 0x46
#define RT298_PLL_CTRL1 0x49
+#define RT298_VAD_CTRL 0x4e
#define RT298_CBJ_CTRL1 0x4f
#define RT298_CBJ_CTRL2 0x50
#define RT298_PLL_CTRL 0x63
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 60212266d5d1..da9483c1c6fb 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -1241,60 +1241,46 @@ static int rt5677_dmic_use_asrc(struct snd_soc_dapm_widget *source,
regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
asrc_setting = (asrc_setting & RT5677_AD_STO1_CLK_SEL_MASK) >>
RT5677_AD_STO1_CLK_SEL_SFT;
- if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
- asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
- return 1;
break;
case 10:
regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
asrc_setting = (asrc_setting & RT5677_AD_STO2_CLK_SEL_MASK) >>
RT5677_AD_STO2_CLK_SEL_SFT;
- if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
- asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
- return 1;
break;
case 9:
regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
asrc_setting = (asrc_setting & RT5677_AD_STO3_CLK_SEL_MASK) >>
RT5677_AD_STO3_CLK_SEL_SFT;
- if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
- asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
- return 1;
break;
case 8:
regmap_read(rt5677->regmap, RT5677_ASRC_5, &asrc_setting);
asrc_setting = (asrc_setting & RT5677_AD_STO4_CLK_SEL_MASK) >>
RT5677_AD_STO4_CLK_SEL_SFT;
- if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
- asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
- return 1;
break;
case 7:
regmap_read(rt5677->regmap, RT5677_ASRC_6, &asrc_setting);
asrc_setting = (asrc_setting & RT5677_AD_MONOL_CLK_SEL_MASK) >>
RT5677_AD_MONOL_CLK_SEL_SFT;
- if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
- asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
- return 1;
break;
case 6:
regmap_read(rt5677->regmap, RT5677_ASRC_6, &asrc_setting);
asrc_setting = (asrc_setting & RT5677_AD_MONOR_CLK_SEL_MASK) >>
RT5677_AD_MONOR_CLK_SEL_SFT;
- if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
- asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
- return 1;
break;
default:
- break;
+ return 0;
}
+ if (asrc_setting >= RT5677_CLK_SEL_I2S1_ASRC &&
+ asrc_setting <= RT5677_CLK_SEL_I2S6_ASRC)
+ return 1;
+
return 0;
}
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 39307ad41a34..b8d19b77bde9 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -4,6 +4,9 @@
* Copyright (C) 2015 Google, Inc.
* Copyright (c) 2013 Daniel Mack <zonque@gmail.com>
*
+ * TAS5721 support:
+ * Copyright (C) 2016 Petr Kulhavy, Barix AG <petr@barix.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -57,6 +60,10 @@ static int tas571x_register_size(struct tas571x_private *priv, unsigned int reg)
case TAS571X_CH1_VOL_REG:
case TAS571X_CH2_VOL_REG:
return priv->chip->vol_reg_size;
+ case TAS571X_INPUT_MUX_REG:
+ case TAS571X_CH4_SRC_SELECT_REG:
+ case TAS571X_PWM_MUX_REG:
+ return 4;
default:
return 1;
}
@@ -167,6 +174,23 @@ static int tas571x_hw_params(struct snd_pcm_substream *substream,
TAS571X_SDI_FMT_MASK, val);
}
+static int tas571x_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u8 sysctl2;
+ int ret;
+
+ sysctl2 = mute ? TAS571X_SYS_CTRL_2_SDN_MASK : 0;
+
+ ret = snd_soc_update_bits(codec,
+ TAS571X_SYS_CTRL_2_REG,
+ TAS571X_SYS_CTRL_2_SDN_MASK,
+ sysctl2);
+ usleep_range(1000, 2000);
+
+ return ret;
+}
+
static int tas571x_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
@@ -214,6 +238,7 @@ static int tas571x_set_bias_level(struct snd_soc_codec *codec,
static const struct snd_soc_dai_ops tas571x_dai_ops = {
.set_fmt = tas571x_set_dai_fmt,
.hw_params = tas571x_hw_params,
+ .digital_mute = tas571x_mute,
};
static const char *const tas5711_supply_names[] = {
@@ -241,6 +266,26 @@ static const struct snd_kcontrol_new tas5711_controls[] = {
1, 1),
};
+static const struct regmap_range tas571x_readonly_regs_range[] = {
+ regmap_reg_range(TAS571X_CLK_CTRL_REG, TAS571X_DEV_ID_REG),
+};
+
+static const struct regmap_range tas571x_volatile_regs_range[] = {
+ regmap_reg_range(TAS571X_CLK_CTRL_REG, TAS571X_ERR_STATUS_REG),
+ regmap_reg_range(TAS571X_OSC_TRIM_REG, TAS571X_OSC_TRIM_REG),
+};
+
+static const struct regmap_access_table tas571x_write_regs = {
+ .no_ranges = tas571x_readonly_regs_range,
+ .n_no_ranges = ARRAY_SIZE(tas571x_readonly_regs_range),
+};
+
+static const struct regmap_access_table tas571x_volatile_regs = {
+ .yes_ranges = tas571x_volatile_regs_range,
+ .n_yes_ranges = ARRAY_SIZE(tas571x_volatile_regs_range),
+
+};
+
static const struct reg_default tas5711_reg_defaults[] = {
{ 0x04, 0x05 },
{ 0x05, 0x40 },
@@ -260,6 +305,8 @@ static const struct regmap_config tas5711_regmap_config = {
.reg_defaults = tas5711_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(tas5711_reg_defaults),
.cache_type = REGCACHE_RBTREE,
+ .wr_table = &tas571x_write_regs,
+ .volatile_table = &tas571x_volatile_regs,
};
static const struct tas571x_chip tas5711_chip = {
@@ -314,6 +361,8 @@ static const struct regmap_config tas5717_regmap_config = {
.reg_defaults = tas5717_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(tas5717_reg_defaults),
.cache_type = REGCACHE_RBTREE,
+ .wr_table = &tas571x_write_regs,
+ .volatile_table = &tas571x_volatile_regs,
};
/* This entry is reused for tas5719 as the software interface is identical. */
@@ -326,6 +375,77 @@ static const struct tas571x_chip tas5717_chip = {
.vol_reg_size = 2,
};
+static const char *const tas5721_supply_names[] = {
+ "AVDD",
+ "DVDD",
+ "DRVDD",
+ "PVDD",
+};
+
+static const struct snd_kcontrol_new tas5721_controls[] = {
+ SOC_SINGLE_TLV("Master Volume",
+ TAS571X_MVOL_REG,
+ 0, 0xff, 1, tas5711_volume_tlv),
+ SOC_DOUBLE_R_TLV("Speaker Volume",
+ TAS571X_CH1_VOL_REG,
+ TAS571X_CH2_VOL_REG,
+ 0, 0xff, 1, tas5711_volume_tlv),
+ SOC_DOUBLE("Speaker Switch",
+ TAS571X_SOFT_MUTE_REG,
+ TAS571X_SOFT_MUTE_CH1_SHIFT, TAS571X_SOFT_MUTE_CH2_SHIFT,
+ 1, 1),
+};
+
+static const struct reg_default tas5721_reg_defaults[] = {
+ {TAS571X_CLK_CTRL_REG, 0x6c},
+ {TAS571X_DEV_ID_REG, 0x00},
+ {TAS571X_ERR_STATUS_REG, 0x00},
+ {TAS571X_SYS_CTRL_1_REG, 0xa0},
+ {TAS571X_SDI_REG, 0x05},
+ {TAS571X_SYS_CTRL_2_REG, 0x40},
+ {TAS571X_SOFT_MUTE_REG, 0x00},
+ {TAS571X_MVOL_REG, 0xff},
+ {TAS571X_CH1_VOL_REG, 0x30},
+ {TAS571X_CH2_VOL_REG, 0x30},
+ {TAS571X_CH3_VOL_REG, 0x30},
+ {TAS571X_VOL_CFG_REG, 0x91},
+ {TAS571X_MODULATION_LIMIT_REG, 0x02},
+ {TAS571X_IC_DELAY_CH1_REG, 0xac},
+ {TAS571X_IC_DELAY_CH2_REG, 0x54},
+ {TAS571X_IC_DELAY_CH3_REG, 0xac},
+ {TAS571X_IC_DELAY_CH4_REG, 0x54},
+ {TAS571X_PWM_CH_SDN_GROUP_REG, 0x30},
+ {TAS571X_START_STOP_PERIOD_REG, 0x0f},
+ {TAS571X_OSC_TRIM_REG, 0x82},
+ {TAS571X_BKND_ERR_REG, 0x02},
+ {TAS571X_INPUT_MUX_REG, 0x17772},
+ {TAS571X_CH4_SRC_SELECT_REG, 0x4303},
+ {TAS571X_PWM_MUX_REG, 0x1021345},
+};
+
+static const struct regmap_config tas5721_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 32,
+ .max_register = 0xff,
+ .reg_read = tas571x_reg_read,
+ .reg_write = tas571x_reg_write,
+ .reg_defaults = tas5721_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(tas5721_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+ .wr_table = &tas571x_write_regs,
+ .volatile_table = &tas571x_volatile_regs,
+};
+
+
+static const struct tas571x_chip tas5721_chip = {
+ .supply_names = tas5721_supply_names,
+ .num_supply_names = ARRAY_SIZE(tas5721_supply_names),
+ .controls = tas5711_controls,
+ .num_controls = ARRAY_SIZE(tas5711_controls),
+ .regmap_config = &tas5721_regmap_config,
+ .vol_reg_size = 1,
+};
+
static const struct snd_soc_dapm_widget tas571x_dapm_widgets[] = {
SND_SOC_DAPM_DAC("DACL", NULL, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_DAC("DACR", NULL, SND_SOC_NOPM, 0, 0),
@@ -386,11 +506,10 @@ static int tas571x_i2c_probe(struct i2c_client *client,
i2c_set_clientdata(client, priv);
of_id = of_match_device(tas571x_of_match, dev);
- if (!of_id) {
- dev_err(dev, "Unknown device type\n");
- return -EINVAL;
- }
- priv->chip = of_id->data;
+ if (of_id)
+ priv->chip = of_id->data;
+ else
+ priv->chip = (void *) id->driver_data;
priv->mclk = devm_clk_get(dev, "mclk");
if (IS_ERR(priv->mclk) && PTR_ERR(priv->mclk) != -ENOENT) {
@@ -445,10 +564,6 @@ static int tas571x_i2c_probe(struct i2c_client *client,
if (ret)
return ret;
- ret = regmap_update_bits(priv->regmap, TAS571X_SYS_CTRL_2_REG,
- TAS571X_SYS_CTRL_2_SDN_MASK, 0);
- if (ret)
- return ret;
memcpy(&priv->codec_driver, &tas571x_codec, sizeof(priv->codec_driver));
priv->codec_driver.controls = priv->chip->controls;
@@ -486,14 +601,16 @@ static const struct of_device_id tas571x_of_match[] = {
{ .compatible = "ti,tas5711", .data = &tas5711_chip, },
{ .compatible = "ti,tas5717", .data = &tas5717_chip, },
{ .compatible = "ti,tas5719", .data = &tas5717_chip, },
+ { .compatible = "ti,tas5721", .data = &tas5721_chip, },
{ }
};
MODULE_DEVICE_TABLE(of, tas571x_of_match);
static const struct i2c_device_id tas571x_i2c_id[] = {
- { "tas5711", 0 },
- { "tas5717", 0 },
- { "tas5719", 0 },
+ { "tas5711", (kernel_ulong_t) &tas5711_chip },
+ { "tas5717", (kernel_ulong_t) &tas5717_chip },
+ { "tas5719", (kernel_ulong_t) &tas5717_chip },
+ { "tas5721", (kernel_ulong_t) &tas5721_chip },
{ }
};
MODULE_DEVICE_TABLE(i2c, tas571x_i2c_id);
diff --git a/sound/soc/codecs/tas571x.h b/sound/soc/codecs/tas571x.h
index 0aee471232cd..cf800c364f0f 100644
--- a/sound/soc/codecs/tas571x.h
+++ b/sound/soc/codecs/tas571x.h
@@ -13,6 +13,10 @@
#define _TAS571X_H
/* device registers */
+#define TAS571X_CLK_CTRL_REG 0x00
+#define TAS571X_DEV_ID_REG 0x01
+#define TAS571X_ERR_STATUS_REG 0x02
+#define TAS571X_SYS_CTRL_1_REG 0x03
#define TAS571X_SDI_REG 0x04
#define TAS571X_SDI_FMT_MASK 0x0f
@@ -27,7 +31,25 @@
#define TAS571X_MVOL_REG 0x07
#define TAS571X_CH1_VOL_REG 0x08
#define TAS571X_CH2_VOL_REG 0x09
+#define TAS571X_CH3_VOL_REG 0x0a
+#define TAS571X_VOL_CFG_REG 0x0e
+#define TAS571X_MODULATION_LIMIT_REG 0x10
+#define TAS571X_IC_DELAY_CH1_REG 0x11
+#define TAS571X_IC_DELAY_CH2_REG 0x12
+#define TAS571X_IC_DELAY_CH3_REG 0x13
+#define TAS571X_IC_DELAY_CH4_REG 0x14
+#define TAS571X_PWM_CH_SDN_GROUP_REG 0x19 /* N/A on TAS5717, TAS5719 */
+#define TAS571X_PWM_CH1_SDN_MASK (1<<0)
+#define TAS571X_PWM_CH2_SDN_SHIFT (1<<1)
+#define TAS571X_PWM_CH3_SDN_SHIFT (1<<2)
+#define TAS571X_PWM_CH4_SDN_SHIFT (1<<3)
+
+#define TAS571X_START_STOP_PERIOD_REG 0x1a
#define TAS571X_OSC_TRIM_REG 0x1b
+#define TAS571X_BKND_ERR_REG 0x1c
+#define TAS571X_INPUT_MUX_REG 0x20
+#define TAS571X_CH4_SRC_SELECT_REG 0x21
+#define TAS571X_PWM_MUX_REG 0x25
#endif /* _TAS571X_H */
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
new file mode 100644
index 000000000000..f54fb46b77c2
--- /dev/null
+++ b/sound/soc/codecs/tas5720.c
@@ -0,0 +1,620 @@
+/*
+ * tas5720.c - ALSA SoC Texas Instruments TAS5720 Mono Audio Amplifier
+ *
+ * Copyright (C)2015-2016 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Andreas Dannenberg <dannenberg@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "tas5720.h"
+
+/* Define how often to check (and clear) the fault status register (in ms) */
+#define TAS5720_FAULT_CHECK_INTERVAL 200
+
+static const char * const tas5720_supply_names[] = {
+ "dvdd", /* Digital power supply. Connect to 3.3-V supply. */
+ "pvdd", /* Class-D amp and analog power supply (connected). */
+};
+
+#define TAS5720_NUM_SUPPLIES ARRAY_SIZE(tas5720_supply_names)
+
+struct tas5720_data {
+ struct snd_soc_codec *codec;
+ struct regmap *regmap;
+ struct i2c_client *tas5720_client;
+ struct regulator_bulk_data supplies[TAS5720_NUM_SUPPLIES];
+ struct delayed_work fault_check_work;
+ unsigned int last_fault;
+};
+
+static int tas5720_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int rate = params_rate(params);
+ bool ssz_ds;
+ int ret;
+
+ switch (rate) {
+ case 44100:
+ case 48000:
+ ssz_ds = false;
+ break;
+ case 88200:
+ case 96000:
+ ssz_ds = true;
+ break;
+ default:
+ dev_err(codec->dev, "unsupported sample rate: %u\n", rate);
+ return -EINVAL;
+ }
+
+ ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL1_REG,
+ TAS5720_SSZ_DS, ssz_ds);
+ if (ret < 0) {
+ dev_err(codec->dev, "error setting sample rate: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tas5720_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u8 serial_format;
+ int ret;
+
+ if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+ dev_vdbg(codec->dev, "DAI Format master is not found\n");
+ return -EINVAL;
+ }
+
+ switch (fmt & (SND_SOC_DAIFMT_FORMAT_MASK |
+ SND_SOC_DAIFMT_INV_MASK)) {
+ case (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF):
+ /* 1st data bit occur one BCLK cycle after the frame sync */
+ serial_format = TAS5720_SAIF_I2S;
+ break;
+ case (SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF):
+ /*
+ * Note that although the TAS5720 does not have a dedicated DSP
+ * mode it doesn't care about the LRCLK duty cycle during TDM
+ * operation. Therefore we can use the device's I2S mode with
+ * its delaying of the 1st data bit to receive DSP_A formatted
+ * data. See device datasheet for additional details.
+ */
+ serial_format = TAS5720_SAIF_I2S;
+ break;
+ case (SND_SOC_DAIFMT_DSP_B | SND_SOC_DAIFMT_NB_NF):
+ /*
+ * Similar to DSP_A, we can use the fact that the TAS5720 does
+ * not care about the LRCLK duty cycle during TDM to receive
+ * DSP_B formatted data in LEFTJ mode (no delaying of the 1st
+ * data bit).
+ */
+ serial_format = TAS5720_SAIF_LEFTJ;
+ break;
+ case (SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF):
+ /* No delay after the frame sync */
+ serial_format = TAS5720_SAIF_LEFTJ;
+ break;
+ default:
+ dev_vdbg(codec->dev, "DAI Format is not found\n");
+ return -EINVAL;
+ }
+
+ ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL1_REG,
+ TAS5720_SAIF_FORMAT_MASK,
+ serial_format);
+ if (ret < 0) {
+ dev_err(codec->dev, "error setting SAIF format: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tas5720_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int first_slot;
+ int ret;
+
+ if (!tx_mask) {
+ dev_err(codec->dev, "tx masks must not be 0\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Determine the first slot that is being requested. We will only
+ * use the first slot that is found since the TAS5720 is a mono
+ * amplifier.
+ */
+ first_slot = __ffs(tx_mask);
+
+ if (first_slot > 7) {
+ dev_err(codec->dev, "slot selection out of bounds (%u)\n",
+ first_slot);
+ return -EINVAL;
+ }
+
+ /* Enable manual TDM slot selection (instead of I2C ID based) */
+ ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL1_REG,
+ TAS5720_TDM_CFG_SRC, TAS5720_TDM_CFG_SRC);
+ if (ret < 0)
+ goto error_snd_soc_update_bits;
+
+ /* Configure the TDM slot to process audio from */
+ ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
+ TAS5720_TDM_SLOT_SEL_MASK, first_slot);
+ if (ret < 0)
+ goto error_snd_soc_update_bits;
+
+ return 0;
+
+error_snd_soc_update_bits:
+ dev_err(codec->dev, "error configuring TDM mode: %d\n", ret);
+ return ret;
+}
+
+static int tas5720_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int ret;
+
+ ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
+ TAS5720_MUTE, mute ? TAS5720_MUTE : 0);
+ if (ret < 0) {
+ dev_err(codec->dev, "error (un-)muting device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void tas5720_fault_check_work(struct work_struct *work)
+{
+ struct tas5720_data *tas5720 = container_of(work, struct tas5720_data,
+ fault_check_work.work);
+ struct device *dev = tas5720->codec->dev;
+ unsigned int curr_fault;
+ int ret;
+
+ ret = regmap_read(tas5720->regmap, TAS5720_FAULT_REG, &curr_fault);
+ if (ret < 0) {
+ dev_err(dev, "failed to read FAULT register: %d\n", ret);
+ goto out;
+ }
+
+ /* Check/handle all errors except SAIF clock errors */
+ curr_fault &= TAS5720_OCE | TAS5720_DCE | TAS5720_OTE;
+
+ /*
+ * Only flag errors once for a given occurrence. This is needed as
+ * the TAS5720 will take time clearing the fault condition internally
+ * during which we don't want to bombard the system with the same
+ * error message over and over.
+ */
+ if ((curr_fault & TAS5720_OCE) && !(tas5720->last_fault & TAS5720_OCE))
+ dev_crit(dev, "experienced an over current hardware fault\n");
+
+ if ((curr_fault & TAS5720_DCE) && !(tas5720->last_fault & TAS5720_DCE))
+ dev_crit(dev, "experienced a DC detection fault\n");
+
+ if ((curr_fault & TAS5720_OTE) && !(tas5720->last_fault & TAS5720_OTE))
+ dev_crit(dev, "experienced an over temperature fault\n");
+
+ /* Store current fault value so we can detect any changes next time */
+ tas5720->last_fault = curr_fault;
+
+ if (!curr_fault)
+ goto out;
+
+ /*
+ * Periodically toggle SDZ (shutdown bit) H->L->H to clear any latching
+ * faults as long as a fault condition persists. Always going through
+ * the full sequence no matter the first return value to minimizes
+ * chances for the device to end up in shutdown mode.
+ */
+ ret = regmap_write_bits(tas5720->regmap, TAS5720_POWER_CTRL_REG,
+ TAS5720_SDZ, 0);
+ if (ret < 0)
+ dev_err(dev, "failed to write POWER_CTRL register: %d\n", ret);
+
+ ret = regmap_write_bits(tas5720->regmap, TAS5720_POWER_CTRL_REG,
+ TAS5720_SDZ, TAS5720_SDZ);
+ if (ret < 0)
+ dev_err(dev, "failed to write POWER_CTRL register: %d\n", ret);
+
+out:
+ /* Schedule the next fault check at the specified interval */
+ schedule_delayed_work(&tas5720->fault_check_work,
+ msecs_to_jiffies(TAS5720_FAULT_CHECK_INTERVAL));
+}
+
+static int tas5720_codec_probe(struct snd_soc_codec *codec)
+{
+ struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
+ unsigned int device_id;
+ int ret;
+
+ tas5720->codec = codec;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(tas5720->supplies),
+ tas5720->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(tas5720->regmap, TAS5720_DEVICE_ID_REG, &device_id);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to read device ID register: %d\n",
+ ret);
+ goto probe_fail;
+ }
+
+ if (device_id != TAS5720_DEVICE_ID) {
+ dev_err(codec->dev, "wrong device ID. expected: %u read: %u\n",
+ TAS5720_DEVICE_ID, device_id);
+ ret = -ENODEV;
+ goto probe_fail;
+ }
+
+ /* Set device to mute */
+ ret = snd_soc_update_bits(codec, TAS5720_DIGITAL_CTRL2_REG,
+ TAS5720_MUTE, TAS5720_MUTE);
+ if (ret < 0)
+ goto error_snd_soc_update_bits;
+
+ /*
+ * Enter shutdown mode - our default when not playing audio - to
+ * minimize current consumption. On the TAS5720 there is no real down
+ * side doing so as all device registers are preserved and the wakeup
+ * of the codec is rather quick which we do using a dapm widget.
+ */
+ ret = snd_soc_update_bits(codec, TAS5720_POWER_CTRL_REG,
+ TAS5720_SDZ, 0);
+ if (ret < 0)
+ goto error_snd_soc_update_bits;
+
+ INIT_DELAYED_WORK(&tas5720->fault_check_work, tas5720_fault_check_work);
+
+ return 0;
+
+error_snd_soc_update_bits:
+ dev_err(codec->dev, "error configuring device registers: %d\n", ret);
+
+probe_fail:
+ regulator_bulk_disable(ARRAY_SIZE(tas5720->supplies),
+ tas5720->supplies);
+ return ret;
+}
+
+static int tas5720_codec_remove(struct snd_soc_codec *codec)
+{
+ struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ cancel_delayed_work_sync(&tas5720->fault_check_work);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(tas5720->supplies),
+ tas5720->supplies);
+ if (ret < 0)
+ dev_err(codec->dev, "failed to disable supplies: %d\n", ret);
+
+ return ret;
+};
+
+static int tas5720_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ if (event & SND_SOC_DAPM_POST_PMU) {
+ /* Take TAS5720 out of shutdown mode */
+ ret = snd_soc_update_bits(codec, TAS5720_POWER_CTRL_REG,
+ TAS5720_SDZ, TAS5720_SDZ);
+ if (ret < 0) {
+ dev_err(codec->dev, "error waking codec: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Observe codec shutdown-to-active time. The datasheet only
+ * lists a nominal value however just use-it as-is without
+ * additional padding to minimize the delay introduced in
+ * starting to play audio (actually there is other setup done
+ * by the ASoC framework that will provide additional delays,
+ * so we should always be safe).
+ */
+ msleep(25);
+
+ /* Turn on TAS5720 periodic fault checking/handling */
+ tas5720->last_fault = 0;
+ schedule_delayed_work(&tas5720->fault_check_work,
+ msecs_to_jiffies(TAS5720_FAULT_CHECK_INTERVAL));
+ } else if (event & SND_SOC_DAPM_PRE_PMD) {
+ /* Disable TAS5720 periodic fault checking/handling */
+ cancel_delayed_work_sync(&tas5720->fault_check_work);
+
+ /* Place TAS5720 in shutdown mode to minimize current draw */
+ ret = snd_soc_update_bits(codec, TAS5720_POWER_CTRL_REG,
+ TAS5720_SDZ, 0);
+ if (ret < 0) {
+ dev_err(codec->dev, "error shutting down codec: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int tas5720_suspend(struct snd_soc_codec *codec)
+{
+ struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ regcache_cache_only(tas5720->regmap, true);
+ regcache_mark_dirty(tas5720->regmap);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(tas5720->supplies),
+ tas5720->supplies);
+ if (ret < 0)
+ dev_err(codec->dev, "failed to disable supplies: %d\n", ret);
+
+ return ret;
+}
+
+static int tas5720_resume(struct snd_soc_codec *codec)
+{
+ struct tas5720_data *tas5720 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(tas5720->supplies),
+ tas5720->supplies);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ regcache_cache_only(tas5720->regmap, false);
+
+ ret = regcache_sync(tas5720->regmap);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to sync regcache: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+#else
+#define tas5720_suspend NULL
+#define tas5720_resume NULL
+#endif
+
+static bool tas5720_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case TAS5720_DEVICE_ID_REG:
+ case TAS5720_FAULT_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config tas5720_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = TAS5720_MAX_REG,
+ .cache_type = REGCACHE_RBTREE,
+ .volatile_reg = tas5720_is_volatile_reg,
+};
+
+/*
+ * DAC analog gain. There are four discrete values to select from, ranging
+ * from 19.2 dB to 26.3dB.
+ */
+static const DECLARE_TLV_DB_RANGE(dac_analog_tlv,
+ 0x0, 0x0, TLV_DB_SCALE_ITEM(1920, 0, 0),
+ 0x1, 0x1, TLV_DB_SCALE_ITEM(2070, 0, 0),
+ 0x2, 0x2, TLV_DB_SCALE_ITEM(2350, 0, 0),
+ 0x3, 0x3, TLV_DB_SCALE_ITEM(2630, 0, 0),
+);
+
+/*
+ * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB steps. Note that
+ * setting the gain below -100 dB (register value <0x7) is effectively a MUTE
+ * as per device datasheet.
+ */
+static DECLARE_TLV_DB_SCALE(dac_tlv, -10350, 50, 0);
+
+static const struct snd_kcontrol_new tas5720_snd_controls[] = {
+ SOC_SINGLE_TLV("Speaker Driver Playback Volume",
+ TAS5720_VOLUME_CTRL_REG, 0, 0xff, 0, dac_tlv),
+ SOC_SINGLE_TLV("Speaker Driver Analog Gain", TAS5720_ANALOG_CTRL_REG,
+ TAS5720_ANALOG_GAIN_SHIFT, 3, 0, dac_analog_tlv),
+};
+
+static const struct snd_soc_dapm_widget tas5720_dapm_widgets[] = {
+ SND_SOC_DAPM_AIF_IN("DAC IN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, tas5720_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+ SND_SOC_DAPM_OUTPUT("OUT")
+};
+
+static const struct snd_soc_dapm_route tas5720_audio_map[] = {
+ { "DAC", NULL, "DAC IN" },
+ { "OUT", NULL, "DAC" },
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_tas5720 = {
+ .probe = tas5720_codec_probe,
+ .remove = tas5720_codec_remove,
+ .suspend = tas5720_suspend,
+ .resume = tas5720_resume,
+
+ .controls = tas5720_snd_controls,
+ .num_controls = ARRAY_SIZE(tas5720_snd_controls),
+ .dapm_widgets = tas5720_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas5720_dapm_widgets),
+ .dapm_routes = tas5720_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas5720_audio_map),
+};
+
+/* PCM rates supported by the TAS5720 driver */
+#define TAS5720_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+
+/* Formats supported by TAS5720 driver */
+#define TAS5720_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE |\
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+static struct snd_soc_dai_ops tas5720_speaker_dai_ops = {
+ .hw_params = tas5720_hw_params,
+ .set_fmt = tas5720_set_dai_fmt,
+ .set_tdm_slot = tas5720_set_dai_tdm_slot,
+ .digital_mute = tas5720_mute,
+};
+
+/*
+ * TAS5720 DAI structure
+ *
+ * Note that were are advertising .playback.channels_max = 2 despite this being
+ * a mono amplifier. The reason for that is that some serial ports such as TI's
+ * McASP module have a minimum number of channels (2) that they can output.
+ * Advertising more channels than we have will allow us to interface with such
+ * a serial port without really any negative side effects as the TAS5720 will
+ * simply ignore any extra channel(s) asides from the one channel that is
+ * configured to be played back.
+ */
+static struct snd_soc_dai_driver tas5720_dai[] = {
+ {
+ .name = "tas5720-amplifier",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = TAS5720_RATES,
+ .formats = TAS5720_FORMATS,
+ },
+ .ops = &tas5720_speaker_dai_ops,
+ },
+};
+
+static int tas5720_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct tas5720_data *data;
+ int ret;
+ int i;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->tas5720_client = client;
+ data->regmap = devm_regmap_init_i2c(client, &tas5720_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ ret = PTR_ERR(data->regmap);
+ dev_err(dev, "failed to allocate register map: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(data->supplies); i++)
+ data->supplies[i].supply = tas5720_supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->supplies),
+ data->supplies);
+ if (ret != 0) {
+ dev_err(dev, "failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(dev, data);
+
+ ret = snd_soc_register_codec(&client->dev,
+ &soc_codec_dev_tas5720,
+ tas5720_dai, ARRAY_SIZE(tas5720_dai));
+ if (ret < 0) {
+ dev_err(dev, "failed to register codec: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tas5720_remove(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+
+ snd_soc_unregister_codec(dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id tas5720_id[] = {
+ { "tas5720", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tas5720_id);
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tas5720_of_match[] = {
+ { .compatible = "ti,tas5720", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tas5720_of_match);
+#endif
+
+static struct i2c_driver tas5720_i2c_driver = {
+ .driver = {
+ .name = "tas5720",
+ .of_match_table = of_match_ptr(tas5720_of_match),
+ },
+ .probe = tas5720_probe,
+ .remove = tas5720_remove,
+ .id_table = tas5720_id,
+};
+
+module_i2c_driver(tas5720_i2c_driver);
+
+MODULE_AUTHOR("Andreas Dannenberg <dannenberg@ti.com>");
+MODULE_DESCRIPTION("TAS5720 Audio amplifier driver");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tas5720.h b/sound/soc/codecs/tas5720.h
new file mode 100644
index 000000000000..3d077c779b12
--- /dev/null
+++ b/sound/soc/codecs/tas5720.h
@@ -0,0 +1,90 @@
+/*
+ * tas5720.h - ALSA SoC Texas Instruments TAS5720 Mono Audio Amplifier
+ *
+ * Copyright (C)2015-2016 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Andreas Dannenberg <dannenberg@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __TAS5720_H__
+#define __TAS5720_H__
+
+/* Register Address Map */
+#define TAS5720_DEVICE_ID_REG 0x00
+#define TAS5720_POWER_CTRL_REG 0x01
+#define TAS5720_DIGITAL_CTRL1_REG 0x02
+#define TAS5720_DIGITAL_CTRL2_REG 0x03
+#define TAS5720_VOLUME_CTRL_REG 0x04
+#define TAS5720_ANALOG_CTRL_REG 0x06
+#define TAS5720_FAULT_REG 0x08
+#define TAS5720_DIGITAL_CLIP2_REG 0x10
+#define TAS5720_DIGITAL_CLIP1_REG 0x11
+#define TAS5720_MAX_REG TAS5720_DIGITAL_CLIP1_REG
+
+/* TAS5720_DEVICE_ID_REG */
+#define TAS5720_DEVICE_ID 0x01
+
+/* TAS5720_POWER_CTRL_REG */
+#define TAS5720_DIG_CLIP_MASK GENMASK(7, 2)
+#define TAS5720_SLEEP BIT(1)
+#define TAS5720_SDZ BIT(0)
+
+/* TAS5720_DIGITAL_CTRL1_REG */
+#define TAS5720_HPF_BYPASS BIT(7)
+#define TAS5720_TDM_CFG_SRC BIT(6)
+#define TAS5720_SSZ_DS BIT(3)
+#define TAS5720_SAIF_RIGHTJ_24BIT (0x0)
+#define TAS5720_SAIF_RIGHTJ_20BIT (0x1)
+#define TAS5720_SAIF_RIGHTJ_18BIT (0x2)
+#define TAS5720_SAIF_RIGHTJ_16BIT (0x3)
+#define TAS5720_SAIF_I2S (0x4)
+#define TAS5720_SAIF_LEFTJ (0x5)
+#define TAS5720_SAIF_FORMAT_MASK GENMASK(2, 0)
+
+/* TAS5720_DIGITAL_CTRL2_REG */
+#define TAS5720_MUTE BIT(4)
+#define TAS5720_TDM_SLOT_SEL_MASK GENMASK(2, 0)
+
+/* TAS5720_ANALOG_CTRL_REG */
+#define TAS5720_PWM_RATE_6_3_FSYNC (0x0 << 4)
+#define TAS5720_PWM_RATE_8_4_FSYNC (0x1 << 4)
+#define TAS5720_PWM_RATE_10_5_FSYNC (0x2 << 4)
+#define TAS5720_PWM_RATE_12_6_FSYNC (0x3 << 4)
+#define TAS5720_PWM_RATE_14_7_FSYNC (0x4 << 4)
+#define TAS5720_PWM_RATE_16_8_FSYNC (0x5 << 4)
+#define TAS5720_PWM_RATE_20_10_FSYNC (0x6 << 4)
+#define TAS5720_PWM_RATE_24_12_FSYNC (0x7 << 4)
+#define TAS5720_PWM_RATE_MASK GENMASK(6, 4)
+#define TAS5720_ANALOG_GAIN_19_2DBV (0x0 << 2)
+#define TAS5720_ANALOG_GAIN_20_7DBV (0x1 << 2)
+#define TAS5720_ANALOG_GAIN_23_5DBV (0x2 << 2)
+#define TAS5720_ANALOG_GAIN_26_3DBV (0x3 << 2)
+#define TAS5720_ANALOG_GAIN_MASK GENMASK(3, 2)
+#define TAS5720_ANALOG_GAIN_SHIFT (0x2)
+
+/* TAS5720_FAULT_REG */
+#define TAS5720_OC_THRESH_100PCT (0x0 << 4)
+#define TAS5720_OC_THRESH_75PCT (0x1 << 4)
+#define TAS5720_OC_THRESH_50PCT (0x2 << 4)
+#define TAS5720_OC_THRESH_25PCT (0x3 << 4)
+#define TAS5720_OC_THRESH_MASK GENMASK(5, 4)
+#define TAS5720_CLKE BIT(3)
+#define TAS5720_OCE BIT(2)
+#define TAS5720_DCE BIT(1)
+#define TAS5720_OTE BIT(0)
+#define TAS5720_FAULT_MASK GENMASK(3, 0)
+
+/* TAS5720_DIGITAL_CLIP1_REG */
+#define TAS5720_CLIP1_MASK GENMASK(7, 2)
+#define TAS5720_CLIP1_SHIFT (0x2)
+
+#endif /* __TAS5720_H__ */
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index ee4def4f819f..3c5e1df01c19 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -28,6 +28,7 @@
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/acpi.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
@@ -1280,10 +1281,19 @@ static const struct i2c_device_id aic31xx_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, aic31xx_i2c_id);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id aic31xx_acpi_match[] = {
+ { "10TI3100", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
+#endif
+
static struct i2c_driver aic31xx_i2c_driver = {
.driver = {
.name = "tlv320aic31xx-codec",
.of_match_table = of_match_ptr(tlv320aic31xx_of_match),
+ .acpi_match_table = ACPI_PTR(aic31xx_acpi_match),
},
.probe = aic31xx_i2c_probe,
.remove = aic31xx_i2c_remove,
diff --git a/sound/soc/codecs/tlv320aic32x4-i2c.c b/sound/soc/codecs/tlv320aic32x4-i2c.c
new file mode 100644
index 000000000000..59606cf3008f
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic32x4-i2c.c
@@ -0,0 +1,74 @@
+/*
+ * linux/sound/soc/codecs/tlv320aic32x4-i2c.c
+ *
+ * Copyright 2011 NW Digital Radio
+ *
+ * Author: Jeremy McDermond <nh6z@nh6z.net>
+ *
+ * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+
+#include "tlv320aic32x4.h"
+
+static int aic32x4_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ struct regmap_config config;
+
+ config = aic32x4_regmap_config;
+ config.reg_bits = 8;
+ config.val_bits = 8;
+
+ regmap = devm_regmap_init_i2c(i2c, &config);
+ return aic32x4_probe(&i2c->dev, regmap);
+}
+
+static int aic32x4_i2c_remove(struct i2c_client *i2c)
+{
+ return aic32x4_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id aic32x4_i2c_id[] = {
+ { "tlv320aic32x4", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, aic32x4_i2c_id);
+
+static const struct of_device_id aic32x4_of_id[] = {
+ { .compatible = "ti,tlv320aic32x4", },
+ { /* senitel */ }
+};
+MODULE_DEVICE_TABLE(of, aic32x4_of_id);
+
+static struct i2c_driver aic32x4_i2c_driver = {
+ .driver = {
+ .name = "tlv320aic32x4",
+ .of_match_table = aic32x4_of_id,
+ },
+ .probe = aic32x4_i2c_probe,
+ .remove = aic32x4_i2c_remove,
+ .id_table = aic32x4_i2c_id,
+};
+
+module_i2c_driver(aic32x4_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver I2C");
+MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic32x4-spi.c b/sound/soc/codecs/tlv320aic32x4-spi.c
new file mode 100644
index 000000000000..724fcdd491b2
--- /dev/null
+++ b/sound/soc/codecs/tlv320aic32x4-spi.c
@@ -0,0 +1,76 @@
+/*
+ * linux/sound/soc/codecs/tlv320aic32x4-spi.c
+ *
+ * Copyright 2011 NW Digital Radio
+ *
+ * Author: Jeremy McDermond <nh6z@nh6z.net>
+ *
+ * Based on sound/soc/codecs/wm8974 and TI driver for kernel 2.6.27.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+
+#include "tlv320aic32x4.h"
+
+static int aic32x4_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ struct regmap_config config;
+
+ config = aic32x4_regmap_config;
+ config.reg_bits = 7;
+ config.pad_bits = 1;
+ config.val_bits = 8;
+ config.read_flag_mask = 0x01;
+
+ regmap = devm_regmap_init_spi(spi, &config);
+ return aic32x4_probe(&spi->dev, regmap);
+}
+
+static int aic32x4_spi_remove(struct spi_device *spi)
+{
+ return aic32x4_remove(&spi->dev);
+}
+
+static const struct spi_device_id aic32x4_spi_id[] = {
+ { "tlv320aic32x4", 0 },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, aic32x4_spi_id);
+
+static const struct of_device_id aic32x4_of_id[] = {
+ { .compatible = "ti,tlv320aic32x4", },
+ { /* senitel */ }
+};
+MODULE_DEVICE_TABLE(of, aic32x4_of_id);
+
+static struct spi_driver aic32x4_spi_driver = {
+ .driver = {
+ .name = "tlv320aic32x4",
+ .owner = THIS_MODULE,
+ .of_match_table = aic32x4_of_id,
+ },
+ .probe = aic32x4_spi_probe,
+ .remove = aic32x4_spi_remove,
+ .id_table = aic32x4_spi_id,
+};
+
+module_spi_driver(aic32x4_spi_driver);
+
+MODULE_DESCRIPTION("ASoC TLV320AIC32x4 codec driver SPI");
+MODULE_AUTHOR("Jeremy McDermond <nh6z@nh6z.net>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index f2d3191961e1..85d4978d0384 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -30,7 +30,6 @@
#include <linux/pm.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <linux/i2c.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/clk.h>
@@ -160,7 +159,10 @@ static const struct aic32x4_rate_divs aic32x4_divs[] = {
/* 48k rate */
{AIC32X4_FREQ_12000000, 48000, 1, 8, 1920, 128, 2, 8, 128, 2, 8, 4},
{AIC32X4_FREQ_24000000, 48000, 2, 8, 1920, 128, 8, 2, 64, 8, 4, 4},
- {AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4}
+ {AIC32X4_FREQ_25000000, 48000, 2, 7, 8643, 128, 8, 2, 64, 8, 4, 4},
+
+ /* 96k rate */
+ {AIC32X4_FREQ_25000000, 96000, 2, 7, 8643, 64, 4, 4, 64, 4, 4, 1},
};
static const struct snd_kcontrol_new hpl_output_mixer_controls[] = {
@@ -181,16 +183,71 @@ static const struct snd_kcontrol_new lor_output_mixer_controls[] = {
SOC_DAPM_SINGLE("R_DAC Switch", AIC32X4_LORROUTE, 3, 1, 0),
};
-static const struct snd_kcontrol_new left_input_mixer_controls[] = {
- SOC_DAPM_SINGLE("IN1_L P Switch", AIC32X4_LMICPGAPIN, 6, 1, 0),
- SOC_DAPM_SINGLE("IN2_L P Switch", AIC32X4_LMICPGAPIN, 4, 1, 0),
- SOC_DAPM_SINGLE("IN3_L P Switch", AIC32X4_LMICPGAPIN, 2, 1, 0),
+static const char * const resistor_text[] = {
+ "Off", "10 kOhm", "20 kOhm", "40 kOhm",
};
-static const struct snd_kcontrol_new right_input_mixer_controls[] = {
- SOC_DAPM_SINGLE("IN1_R P Switch", AIC32X4_RMICPGAPIN, 6, 1, 0),
- SOC_DAPM_SINGLE("IN2_R P Switch", AIC32X4_RMICPGAPIN, 4, 1, 0),
- SOC_DAPM_SINGLE("IN3_R P Switch", AIC32X4_RMICPGAPIN, 2, 1, 0),
+/* Left mixer pins */
+static SOC_ENUM_SINGLE_DECL(in1l_lpga_p_enum, AIC32X4_LMICPGAPIN, 6, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in2l_lpga_p_enum, AIC32X4_LMICPGAPIN, 4, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in3l_lpga_p_enum, AIC32X4_LMICPGAPIN, 2, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in1r_lpga_p_enum, AIC32X4_LMICPGAPIN, 0, resistor_text);
+
+static SOC_ENUM_SINGLE_DECL(cml_lpga_n_enum, AIC32X4_LMICPGANIN, 6, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in2r_lpga_n_enum, AIC32X4_LMICPGANIN, 4, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in3r_lpga_n_enum, AIC32X4_LMICPGANIN, 2, resistor_text);
+
+static const struct snd_kcontrol_new in1l_to_lmixer_controls[] = {
+ SOC_DAPM_ENUM("IN1_L L+ Switch", in1l_lpga_p_enum),
+};
+static const struct snd_kcontrol_new in2l_to_lmixer_controls[] = {
+ SOC_DAPM_ENUM("IN2_L L+ Switch", in2l_lpga_p_enum),
+};
+static const struct snd_kcontrol_new in3l_to_lmixer_controls[] = {
+ SOC_DAPM_ENUM("IN3_L L+ Switch", in3l_lpga_p_enum),
+};
+static const struct snd_kcontrol_new in1r_to_lmixer_controls[] = {
+ SOC_DAPM_ENUM("IN1_R L+ Switch", in1r_lpga_p_enum),
+};
+static const struct snd_kcontrol_new cml_to_lmixer_controls[] = {
+ SOC_DAPM_ENUM("CM_L L- Switch", cml_lpga_n_enum),
+};
+static const struct snd_kcontrol_new in2r_to_lmixer_controls[] = {
+ SOC_DAPM_ENUM("IN2_R L- Switch", in2r_lpga_n_enum),
+};
+static const struct snd_kcontrol_new in3r_to_lmixer_controls[] = {
+ SOC_DAPM_ENUM("IN3_R L- Switch", in3r_lpga_n_enum),
+};
+
+/* Right mixer pins */
+static SOC_ENUM_SINGLE_DECL(in1r_rpga_p_enum, AIC32X4_RMICPGAPIN, 6, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in2r_rpga_p_enum, AIC32X4_RMICPGAPIN, 4, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in3r_rpga_p_enum, AIC32X4_RMICPGAPIN, 2, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in2l_rpga_p_enum, AIC32X4_RMICPGAPIN, 0, resistor_text);
+static SOC_ENUM_SINGLE_DECL(cmr_rpga_n_enum, AIC32X4_RMICPGANIN, 6, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in1l_rpga_n_enum, AIC32X4_RMICPGANIN, 4, resistor_text);
+static SOC_ENUM_SINGLE_DECL(in3l_rpga_n_enum, AIC32X4_RMICPGANIN, 2, resistor_text);
+
+static const struct snd_kcontrol_new in1r_to_rmixer_controls[] = {
+ SOC_DAPM_ENUM("IN1_R R+ Switch", in1r_rpga_p_enum),
+};
+static const struct snd_kcontrol_new in2r_to_rmixer_controls[] = {
+ SOC_DAPM_ENUM("IN2_R R+ Switch", in2r_rpga_p_enum),
+};
+static const struct snd_kcontrol_new in3r_to_rmixer_controls[] = {
+ SOC_DAPM_ENUM("IN3_R R+ Switch", in3r_rpga_p_enum),
+};
+static const struct snd_kcontrol_new in2l_to_rmixer_controls[] = {
+ SOC_DAPM_ENUM("IN2_L R+ Switch", in2l_rpga_p_enum),
+};
+static const struct snd_kcontrol_new cmr_to_rmixer_controls[] = {
+ SOC_DAPM_ENUM("CM_R R- Switch", cmr_rpga_n_enum),
+};
+static const struct snd_kcontrol_new in1l_to_rmixer_controls[] = {
+ SOC_DAPM_ENUM("IN1_L R- Switch", in1l_rpga_n_enum),
+};
+static const struct snd_kcontrol_new in3l_to_rmixer_controls[] = {
+ SOC_DAPM_ENUM("IN3_L R- Switch", in3l_rpga_n_enum),
};
static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
@@ -214,14 +271,39 @@ static const struct snd_soc_dapm_widget aic32x4_dapm_widgets[] = {
&lor_output_mixer_controls[0],
ARRAY_SIZE(lor_output_mixer_controls)),
SND_SOC_DAPM_PGA("LOR Power", AIC32X4_OUTPWRCTL, 2, 0, NULL, 0),
- SND_SOC_DAPM_MIXER("Left Input Mixer", SND_SOC_NOPM, 0, 0,
- &left_input_mixer_controls[0],
- ARRAY_SIZE(left_input_mixer_controls)),
- SND_SOC_DAPM_MIXER("Right Input Mixer", SND_SOC_NOPM, 0, 0,
- &right_input_mixer_controls[0],
- ARRAY_SIZE(right_input_mixer_controls)),
- SND_SOC_DAPM_ADC("Left ADC", "Left Capture", AIC32X4_ADCSETUP, 7, 0),
+
SND_SOC_DAPM_ADC("Right ADC", "Right Capture", AIC32X4_ADCSETUP, 6, 0),
+ SND_SOC_DAPM_MUX("IN1_R to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+ in1r_to_rmixer_controls),
+ SND_SOC_DAPM_MUX("IN2_R to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+ in2r_to_rmixer_controls),
+ SND_SOC_DAPM_MUX("IN3_R to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+ in3r_to_rmixer_controls),
+ SND_SOC_DAPM_MUX("IN2_L to Right Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+ in2l_to_rmixer_controls),
+ SND_SOC_DAPM_MUX("CM_R to Right Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+ cmr_to_rmixer_controls),
+ SND_SOC_DAPM_MUX("IN1_L to Right Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+ in1l_to_rmixer_controls),
+ SND_SOC_DAPM_MUX("IN3_L to Right Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+ in3l_to_rmixer_controls),
+
+ SND_SOC_DAPM_ADC("Left ADC", "Left Capture", AIC32X4_ADCSETUP, 7, 0),
+ SND_SOC_DAPM_MUX("IN1_L to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+ in1l_to_lmixer_controls),
+ SND_SOC_DAPM_MUX("IN2_L to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+ in2l_to_lmixer_controls),
+ SND_SOC_DAPM_MUX("IN3_L to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+ in3l_to_lmixer_controls),
+ SND_SOC_DAPM_MUX("IN1_R to Left Mixer Positive Resistor", SND_SOC_NOPM, 0, 0,
+ in1r_to_lmixer_controls),
+ SND_SOC_DAPM_MUX("CM_L to Left Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+ cml_to_lmixer_controls),
+ SND_SOC_DAPM_MUX("IN2_R to Left Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+ in2r_to_lmixer_controls),
+ SND_SOC_DAPM_MUX("IN3_R to Left Mixer Negative Resistor", SND_SOC_NOPM, 0, 0,
+ in3r_to_lmixer_controls),
+
SND_SOC_DAPM_MICBIAS("Mic Bias", AIC32X4_MICBIAS, 6, 0),
SND_SOC_DAPM_OUTPUT("HPL"),
@@ -261,19 +343,77 @@ static const struct snd_soc_dapm_route aic32x4_dapm_routes[] = {
{"LOR Power", NULL, "LOR Output Mixer"},
{"LOR", NULL, "LOR Power"},
- /* Left input */
- {"Left Input Mixer", "IN1_L P Switch", "IN1_L"},
- {"Left Input Mixer", "IN2_L P Switch", "IN2_L"},
- {"Left Input Mixer", "IN3_L P Switch", "IN3_L"},
-
- {"Left ADC", NULL, "Left Input Mixer"},
-
/* Right Input */
- {"Right Input Mixer", "IN1_R P Switch", "IN1_R"},
- {"Right Input Mixer", "IN2_R P Switch", "IN2_R"},
- {"Right Input Mixer", "IN3_R P Switch", "IN3_R"},
-
- {"Right ADC", NULL, "Right Input Mixer"},
+ {"Right ADC", NULL, "IN1_R to Right Mixer Positive Resistor"},
+ {"IN1_R to Right Mixer Positive Resistor", "10 kOhm", "IN1_R"},
+ {"IN1_R to Right Mixer Positive Resistor", "20 kOhm", "IN1_R"},
+ {"IN1_R to Right Mixer Positive Resistor", "40 kOhm", "IN1_R"},
+
+ {"Right ADC", NULL, "IN2_R to Right Mixer Positive Resistor"},
+ {"IN2_R to Right Mixer Positive Resistor", "10 kOhm", "IN2_R"},
+ {"IN2_R to Right Mixer Positive Resistor", "20 kOhm", "IN2_R"},
+ {"IN2_R to Right Mixer Positive Resistor", "40 kOhm", "IN2_R"},
+
+ {"Right ADC", NULL, "IN3_R to Right Mixer Positive Resistor"},
+ {"IN3_R to Right Mixer Positive Resistor", "10 kOhm", "IN3_R"},
+ {"IN3_R to Right Mixer Positive Resistor", "20 kOhm", "IN3_R"},
+ {"IN3_R to Right Mixer Positive Resistor", "40 kOhm", "IN3_R"},
+
+ {"Right ADC", NULL, "IN2_L to Right Mixer Positive Resistor"},
+ {"IN2_L to Right Mixer Positive Resistor", "10 kOhm", "IN2_L"},
+ {"IN2_L to Right Mixer Positive Resistor", "20 kOhm", "IN2_L"},
+ {"IN2_L to Right Mixer Positive Resistor", "40 kOhm", "IN2_L"},
+
+ {"Right ADC", NULL, "CM_R to Right Mixer Negative Resistor"},
+ {"CM_R to Right Mixer Negative Resistor", "10 kOhm", "CM_R"},
+ {"CM_R to Right Mixer Negative Resistor", "20 kOhm", "CM_R"},
+ {"CM_R to Right Mixer Negative Resistor", "40 kOhm", "CM_R"},
+
+ {"Right ADC", NULL, "IN1_L to Right Mixer Negative Resistor"},
+ {"IN1_L to Right Mixer Negative Resistor", "10 kOhm", "IN1_L"},
+ {"IN1_L to Right Mixer Negative Resistor", "20 kOhm", "IN1_L"},
+ {"IN1_L to Right Mixer Negative Resistor", "40 kOhm", "IN1_L"},
+
+ {"Right ADC", NULL, "IN3_L to Right Mixer Negative Resistor"},
+ {"IN3_L to Right Mixer Negative Resistor", "10 kOhm", "IN3_L"},
+ {"IN3_L to Right Mixer Negative Resistor", "20 kOhm", "IN3_L"},
+ {"IN3_L to Right Mixer Negative Resistor", "40 kOhm", "IN3_L"},
+
+ /* Left Input */
+ {"Left ADC", NULL, "IN1_L to Left Mixer Positive Resistor"},
+ {"IN1_L to Left Mixer Positive Resistor", "10 kOhm", "IN1_L"},
+ {"IN1_L to Left Mixer Positive Resistor", "20 kOhm", "IN1_L"},
+ {"IN1_L to Left Mixer Positive Resistor", "40 kOhm", "IN1_L"},
+
+ {"Left ADC", NULL, "IN2_L to Left Mixer Positive Resistor"},
+ {"IN2_L to Left Mixer Positive Resistor", "10 kOhm", "IN2_L"},
+ {"IN2_L to Left Mixer Positive Resistor", "20 kOhm", "IN2_L"},
+ {"IN2_L to Left Mixer Positive Resistor", "40 kOhm", "IN2_L"},
+
+ {"Left ADC", NULL, "IN3_L to Left Mixer Positive Resistor"},
+ {"IN3_L to Left Mixer Positive Resistor", "10 kOhm", "IN3_L"},
+ {"IN3_L to Left Mixer Positive Resistor", "20 kOhm", "IN3_L"},
+ {"IN3_L to Left Mixer Positive Resistor", "40 kOhm", "IN3_L"},
+
+ {"Left ADC", NULL, "IN1_R to Left Mixer Positive Resistor"},
+ {"IN1_R to Left Mixer Positive Resistor", "10 kOhm", "IN1_R"},
+ {"IN1_R to Left Mixer Positive Resistor", "20 kOhm", "IN1_R"},
+ {"IN1_R to Left Mixer Positive Resistor", "40 kOhm", "IN1_R"},
+
+ {"Left ADC", NULL, "CM_L to Left Mixer Negative Resistor"},
+ {"CM_L to Left Mixer Negative Resistor", "10 kOhm", "CM_L"},
+ {"CM_L to Left Mixer Negative Resistor", "20 kOhm", "CM_L"},
+ {"CM_L to Left Mixer Negative Resistor", "40 kOhm", "CM_L"},
+
+ {"Left ADC", NULL, "IN2_R to Left Mixer Negative Resistor"},
+ {"IN2_R to Left Mixer Negative Resistor", "10 kOhm", "IN2_R"},
+ {"IN2_R to Left Mixer Negative Resistor", "20 kOhm", "IN2_R"},
+ {"IN2_R to Left Mixer Negative Resistor", "40 kOhm", "IN2_R"},
+
+ {"Left ADC", NULL, "IN3_R to Left Mixer Negative Resistor"},
+ {"IN3_R to Left Mixer Negative Resistor", "10 kOhm", "IN3_R"},
+ {"IN3_R to Left Mixer Negative Resistor", "20 kOhm", "IN3_R"},
+ {"IN3_R to Left Mixer Negative Resistor", "40 kOhm", "IN3_R"},
};
static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
@@ -287,14 +427,12 @@ static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
},
};
-static const struct regmap_config aic32x4_regmap = {
- .reg_bits = 8,
- .val_bits = 8,
-
+const struct regmap_config aic32x4_regmap_config = {
.max_register = AIC32X4_RMICPGAVOL,
.ranges = aic32x4_regmap_pages,
.num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
};
+EXPORT_SYMBOL(aic32x4_regmap_config);
static inline int aic32x4_get_divs(int mclk, int rate)
{
@@ -567,7 +705,7 @@ static int aic32x4_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
-#define AIC32X4_RATES SNDRV_PCM_RATE_8000_48000
+#define AIC32X4_RATES SNDRV_PCM_RATE_8000_96000
#define AIC32X4_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE \
| SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE)
@@ -596,7 +734,7 @@ static struct snd_soc_dai_driver aic32x4_dai = {
.symmetric_rates = 1,
};
-static int aic32x4_probe(struct snd_soc_codec *codec)
+static int aic32x4_codec_probe(struct snd_soc_codec *codec)
{
struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
u32 tmp_reg;
@@ -655,7 +793,7 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
}
static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = {
- .probe = aic32x4_probe,
+ .probe = aic32x4_codec_probe,
.set_bias_level = aic32x4_set_bias_level,
.suspend_bias_off = true,
@@ -777,24 +915,22 @@ error_ldo:
return ret;
}
-static int aic32x4_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+int aic32x4_probe(struct device *dev, struct regmap *regmap)
{
- struct aic32x4_pdata *pdata = i2c->dev.platform_data;
struct aic32x4_priv *aic32x4;
- struct device_node *np = i2c->dev.of_node;
+ struct aic32x4_pdata *pdata = dev->platform_data;
+ struct device_node *np = dev->of_node;
int ret;
- aic32x4 = devm_kzalloc(&i2c->dev, sizeof(struct aic32x4_priv),
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ aic32x4 = devm_kzalloc(dev, sizeof(struct aic32x4_priv),
GFP_KERNEL);
if (aic32x4 == NULL)
return -ENOMEM;
- aic32x4->regmap = devm_regmap_init_i2c(i2c, &aic32x4_regmap);
- if (IS_ERR(aic32x4->regmap))
- return PTR_ERR(aic32x4->regmap);
-
- i2c_set_clientdata(i2c, aic32x4);
+ dev_set_drvdata(dev, aic32x4);
if (pdata) {
aic32x4->power_cfg = pdata->power_cfg;
@@ -804,7 +940,7 @@ static int aic32x4_i2c_probe(struct i2c_client *i2c,
} else if (np) {
ret = aic32x4_parse_dt(aic32x4, np);
if (ret) {
- dev_err(&i2c->dev, "Failed to parse DT node\n");
+ dev_err(dev, "Failed to parse DT node\n");
return ret;
}
} else {
@@ -814,71 +950,48 @@ static int aic32x4_i2c_probe(struct i2c_client *i2c,
aic32x4->rstn_gpio = -1;
}
- aic32x4->mclk = devm_clk_get(&i2c->dev, "mclk");
+ aic32x4->mclk = devm_clk_get(dev, "mclk");
if (IS_ERR(aic32x4->mclk)) {
- dev_err(&i2c->dev, "Failed getting the mclk. The current implementation does not support the usage of this codec without mclk\n");
+ dev_err(dev, "Failed getting the mclk. The current implementation does not support the usage of this codec without mclk\n");
return PTR_ERR(aic32x4->mclk);
}
if (gpio_is_valid(aic32x4->rstn_gpio)) {
- ret = devm_gpio_request_one(&i2c->dev, aic32x4->rstn_gpio,
+ ret = devm_gpio_request_one(dev, aic32x4->rstn_gpio,
GPIOF_OUT_INIT_LOW, "tlv320aic32x4 rstn");
if (ret != 0)
return ret;
}
- ret = aic32x4_setup_regulators(&i2c->dev, aic32x4);
+ ret = aic32x4_setup_regulators(dev, aic32x4);
if (ret) {
- dev_err(&i2c->dev, "Failed to setup regulators\n");
+ dev_err(dev, "Failed to setup regulators\n");
return ret;
}
- ret = snd_soc_register_codec(&i2c->dev,
+ ret = snd_soc_register_codec(dev,
&soc_codec_dev_aic32x4, &aic32x4_dai, 1);
if (ret) {
- dev_err(&i2c->dev, "Failed to register codec\n");
+ dev_err(dev, "Failed to register codec\n");
aic32x4_disable_regulators(aic32x4);
return ret;
}
- i2c_set_clientdata(i2c, aic32x4);
-
return 0;
}
+EXPORT_SYMBOL(aic32x4_probe);
-static int aic32x4_i2c_remove(struct i2c_client *client)
+int aic32x4_remove(struct device *dev)
{
- struct aic32x4_priv *aic32x4 = i2c_get_clientdata(client);
+ struct aic32x4_priv *aic32x4 = dev_get_drvdata(dev);
aic32x4_disable_regulators(aic32x4);
- snd_soc_unregister_codec(&client->dev);
+ snd_soc_unregister_codec(dev);
+
return 0;
}
-
-static const struct i2c_device_id aic32x4_i2c_id[] = {
- { "tlv320aic32x4", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, aic32x4_i2c_id);
-
-static const struct of_device_id aic32x4_of_id[] = {
- { .compatible = "ti,tlv320aic32x4", },
- { /* senitel */ }
-};
-MODULE_DEVICE_TABLE(of, aic32x4_of_id);
-
-static struct i2c_driver aic32x4_i2c_driver = {
- .driver = {
- .name = "tlv320aic32x4",
- .of_match_table = aic32x4_of_id,
- },
- .probe = aic32x4_i2c_probe,
- .remove = aic32x4_i2c_remove,
- .id_table = aic32x4_i2c_id,
-};
-
-module_i2c_driver(aic32x4_i2c_driver);
+EXPORT_SYMBOL(aic32x4_remove);
MODULE_DESCRIPTION("ASoC tlv320aic32x4 codec driver");
MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
diff --git a/sound/soc/codecs/tlv320aic32x4.h b/sound/soc/codecs/tlv320aic32x4.h
index 995f033a855d..a197dd51addc 100644
--- a/sound/soc/codecs/tlv320aic32x4.h
+++ b/sound/soc/codecs/tlv320aic32x4.h
@@ -10,6 +10,13 @@
#ifndef _TLV320AIC32X4_H
#define _TLV320AIC32X4_H
+struct device;
+struct regmap_config;
+
+extern const struct regmap_config aic32x4_regmap_config;
+int aic32x4_probe(struct device *dev, struct regmap *regmap);
+int aic32x4_remove(struct device *dev);
+
/* tlv320aic32x4 register space (in decimal to match datasheet) */
#define AIC32X4_PAGE1 128
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index bc3de2e844e6..1f7081043566 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -824,7 +824,7 @@ static int twl6040_set_bias_level(struct snd_soc_codec *codec,
{
struct twl6040 *twl6040 = codec->control_data;
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
- int ret;
+ int ret = 0;
switch (level) {
case SND_SOC_BIAS_ON:
@@ -832,12 +832,16 @@ static int twl6040_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
- if (priv->codec_powered)
+ if (priv->codec_powered) {
+ /* Select low power PLL in standby */
+ ret = twl6040_set_pll(twl6040, TWL6040_SYSCLK_SEL_LPPLL,
+ 32768, 19200000);
break;
+ }
ret = twl6040_power(twl6040, 1);
if (ret)
- return ret;
+ break;
priv->codec_powered = 1;
@@ -853,7 +857,7 @@ static int twl6040_set_bias_level(struct snd_soc_codec *codec,
break;
}
- return 0;
+ return ret;
}
static int twl6040_startup(struct snd_pcm_substream *substream,
@@ -983,9 +987,9 @@ static void twl6040_mute_path(struct snd_soc_codec *codec, enum twl6040_dai_id i
if (mute) {
/* Power down drivers and DACs */
hflctl &= ~(TWL6040_HFDACENA | TWL6040_HFPGAENA |
- TWL6040_HFDRVENA);
+ TWL6040_HFDRVENA | TWL6040_HFSWENA);
hfrctl &= ~(TWL6040_HFDACENA | TWL6040_HFPGAENA |
- TWL6040_HFDRVENA);
+ TWL6040_HFDRVENA | TWL6040_HFSWENA);
}
twl6040_reg_write(twl6040, TWL6040_REG_HFLCTL, hflctl);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index fc164d69a557..f3109da24769 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3793,9 +3793,8 @@ static int wm8962_runtime_resume(struct device *dev)
ret = regulator_bulk_enable(ARRAY_SIZE(wm8962->supplies),
wm8962->supplies);
if (ret != 0) {
- dev_err(dev,
- "Failed to enable supplies: %d\n", ret);
- return ret;
+ dev_err(dev, "Failed to enable supplies: %d\n", ret);
+ goto disable_clock;
}
regcache_cache_only(wm8962->regmap, false);
@@ -3833,6 +3832,10 @@ static int wm8962_runtime_resume(struct device *dev)
msleep(5);
return 0;
+
+disable_clock:
+ clk_disable_unprepare(wm8962->pdata.mclk);
+ return ret;
}
static int wm8962_runtime_suspend(struct device *dev)
diff --git a/sound/soc/codecs/wm8962.h b/sound/soc/codecs/wm8962.h
index 910aafd09d21..e63a318a3015 100644
--- a/sound/soc/codecs/wm8962.h
+++ b/sound/soc/codecs/wm8962.h
@@ -16,9 +16,9 @@
#include <asm/types.h>
#include <sound/soc.h>
-#define WM8962_SYSCLK_MCLK 1
-#define WM8962_SYSCLK_FLL 2
-#define WM8962_SYSCLK_PLL3 3
+#define WM8962_SYSCLK_MCLK 0
+#define WM8962_SYSCLK_FLL 1
+#define WM8962_SYSCLK_PLL3 2
#define WM8962_FLL 1
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 2389ab47e25f..466492b7d4f5 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -643,6 +643,7 @@ MODULE_DEVICE_TABLE(of, asoc_simple_of_match);
static struct platform_driver asoc_simple_card = {
.driver = {
.name = "asoc-simple-card",
+ .pm = &snd_soc_pm_ops,
.of_match_table = asoc_simple_of_match,
},
.probe = asoc_simple_card_probe,
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index 132bb83f8e99..bc3c7b5ac752 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -1,6 +1,7 @@
config SND_KIRKWOOD_SOC
tristate "SoC Audio for the Marvell Kirkwood and Dove chips"
depends on ARCH_DOVE || ARCH_MVEBU || COMPILE_TEST
+ depends on HAS_DMA
help
Say Y or M if you want to add support for codecs attached to
the Kirkwood I2S interface. You will also need to select the
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index f7e789e97fbc..3abf51c07851 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -43,6 +43,7 @@ config SND_SOC_MT8173_RT5650_RT5676
depends on SND_SOC_MEDIATEK && I2C
select SND_SOC_RT5645
select SND_SOC_RT5677
+ select SND_SOC_HDMI_CODEC
help
This adds ASoC driver for Mediatek MT8173 boards
with the RT5650 and RT5676 codecs.
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 5c4c58c69c51..bb593926c62d 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -134,7 +134,9 @@ static struct snd_soc_dai_link_component mt8173_rt5650_rt5676_codecs[] = {
enum {
DAI_LINK_PLAYBACK,
DAI_LINK_CAPTURE,
+ DAI_LINK_HDMI,
DAI_LINK_CODEC_I2S,
+ DAI_LINK_HDMI_I2S,
DAI_LINK_INTERCODEC
};
@@ -161,6 +163,16 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
.dynamic = 1,
.dpcm_capture = 1,
},
+ [DAI_LINK_HDMI] = {
+ .name = "HDMI",
+ .stream_name = "HDMI PCM",
+ .cpu_dai_name = "HDMI",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ },
/* Back End DAI links */
[DAI_LINK_CODEC_I2S] = {
@@ -177,6 +189,13 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
.dpcm_playback = 1,
.dpcm_capture = 1,
},
+ [DAI_LINK_HDMI_I2S] = {
+ .name = "HDMI BE",
+ .cpu_dai_name = "HDMIO",
+ .no_pcm = 1,
+ .codec_dai_name = "i2s-hifi",
+ .dpcm_playback = 1,
+ },
/* rt5676 <-> rt5650 intercodec link: Sets rt5676 I2S2 as master */
[DAI_LINK_INTERCODEC] = {
.name = "rt5650_rt5676 intercodec",
@@ -251,6 +270,14 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
mt8173_rt5650_rt5676_dais[DAI_LINK_INTERCODEC].codec_of_node =
mt8173_rt5650_rt5676_codecs[1].of_node;
+ mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codec_of_node =
+ of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 2);
+ if (!mt8173_rt5650_rt5676_dais[DAI_LINK_HDMI_I2S].codec_of_node) {
+ dev_err(&pdev->dev,
+ "Property 'audio-codec' missing or invalid\n");
+ return -EINVAL;
+ }
+
card->dev = &pdev->dev;
platform_set_drvdata(pdev, card);
diff --git a/sound/soc/mediatek/mt8173-rt5650.c b/sound/soc/mediatek/mt8173-rt5650.c
index bb09bb1b7f1c..a27a6673dbe3 100644
--- a/sound/soc/mediatek/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173-rt5650.c
@@ -85,12 +85,29 @@ static int mt8173_rt5650_init(struct snd_soc_pcm_runtime *runtime)
{
struct snd_soc_card *card = runtime->card;
struct snd_soc_codec *codec = runtime->codec_dais[0]->codec;
+ const char *codec_capture_dai = runtime->codec_dais[1]->name;
int ret;
rt5645_sel_asrc_clk_src(codec,
- RT5645_DA_STEREO_FILTER |
- RT5645_AD_STEREO_FILTER,
+ RT5645_DA_STEREO_FILTER,
RT5645_CLK_SEL_I2S1_ASRC);
+
+ if (!strcmp(codec_capture_dai, "rt5645-aif1")) {
+ rt5645_sel_asrc_clk_src(codec,
+ RT5645_AD_STEREO_FILTER,
+ RT5645_CLK_SEL_I2S1_ASRC);
+ } else if (!strcmp(codec_capture_dai, "rt5645-aif2")) {
+ rt5645_sel_asrc_clk_src(codec,
+ RT5645_AD_STEREO_FILTER,
+ RT5645_CLK_SEL_I2S2_ASRC);
+ } else {
+ dev_warn(card->dev,
+ "Only one dai codec found in DTS, enabled rt5645 AD filter\n");
+ rt5645_sel_asrc_clk_src(codec,
+ RT5645_AD_STEREO_FILTER,
+ RT5645_CLK_SEL_I2S1_ASRC);
+ }
+
/* enable jack detection */
ret = snd_soc_card_jack_new(card, "Headset Jack",
SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
@@ -110,6 +127,11 @@ static int mt8173_rt5650_init(struct snd_soc_pcm_runtime *runtime)
static struct snd_soc_dai_link_component mt8173_rt5650_codecs[] = {
{
+ /* Playback */
+ .dai_name = "rt5645-aif1",
+ },
+ {
+ /* Capture */
.dai_name = "rt5645-aif1",
},
};
@@ -149,7 +171,7 @@ static struct snd_soc_dai_link mt8173_rt5650_dais[] = {
.cpu_dai_name = "I2S",
.no_pcm = 1,
.codecs = mt8173_rt5650_codecs,
- .num_codecs = 1,
+ .num_codecs = 2,
.init = mt8173_rt5650_init,
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
@@ -177,6 +199,8 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8173_rt5650_card;
struct device_node *platform_node;
+ struct device_node *np;
+ const char *codec_capture_dai;
int i, ret;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -199,6 +223,26 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
"Property 'audio-codec' missing or invalid\n");
return -EINVAL;
}
+ mt8173_rt5650_codecs[1].of_node = mt8173_rt5650_codecs[0].of_node;
+
+ if (of_find_node_by_name(platform_node, "codec-capture")) {
+ np = of_get_child_by_name(pdev->dev.of_node, "codec-capture");
+ if (!np) {
+ dev_err(&pdev->dev,
+ "%s: Can't find codec-capture DT node\n",
+ __func__);
+ return -EINVAL;
+ }
+ ret = snd_soc_of_get_dai_name(np, &codec_capture_dai);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "%s codec_capture_dai name fail %d\n",
+ __func__, ret);
+ return ret;
+ }
+ mt8173_rt5650_codecs[1].dai_name = codec_capture_dai;
+ }
+
card->dev = &pdev->dev;
platform_set_drvdata(pdev, card);
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index f1c58a2c12fb..2b5df2ef51a3 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -123,6 +123,7 @@
#define AFE_TDM_CON1_WLEN_32BIT (0x2 << 8)
#define AFE_TDM_CON1_MSB_ALIGNED (0x1 << 4)
#define AFE_TDM_CON1_1_BCK_DELAY (0x1 << 3)
+#define AFE_TDM_CON1_LRCK_INV (0x1 << 2)
#define AFE_TDM_CON1_BCK_INV (0x1 << 1)
#define AFE_TDM_CON1_EN (0x1 << 0)
@@ -449,6 +450,7 @@ static int mtk_afe_hdmi_prepare(struct snd_pcm_substream *substream,
runtime->rate * runtime->channels * 32);
val = AFE_TDM_CON1_BCK_INV |
+ AFE_TDM_CON1_LRCK_INV |
AFE_TDM_CON1_1_BCK_DELAY |
AFE_TDM_CON1_MSB_ALIGNED | /* I2S mode */
AFE_TDM_CON1_WLEN_32BIT |
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index c7563e230c7d..4a16e778966b 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -260,6 +260,10 @@ static void omap_st_on(struct omap_mcbsp *mcbsp)
if (mcbsp->pdata->enable_st_clock)
mcbsp->pdata->enable_st_clock(mcbsp->id, 1);
+ /* Disable Sidetone clock auto-gating for normal operation */
+ w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
+ MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w & ~(ST_AUTOIDLE));
+
/* Enable McBSP Sidetone */
w = MCBSP_READ(mcbsp, SSELCR);
MCBSP_WRITE(mcbsp, SSELCR, w | SIDETONEEN);
@@ -279,6 +283,10 @@ static void omap_st_off(struct omap_mcbsp *mcbsp)
w = MCBSP_READ(mcbsp, SSELCR);
MCBSP_WRITE(mcbsp, SSELCR, w & ~(SIDETONEEN));
+ /* Enable Sidetone clock auto-gating to reduce power consumption */
+ w = MCBSP_ST_READ(mcbsp, SYSCONFIG);
+ MCBSP_ST_WRITE(mcbsp, SYSCONFIG, w | ST_AUTOIDLE);
+
if (mcbsp->pdata->enable_st_clock)
mcbsp->pdata->enable_st_clock(mcbsp->id, 0);
}
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
index 99381a27295b..a84f677234f0 100644
--- a/sound/soc/omap/omap-pcm.c
+++ b/sound/soc/omap/omap-pcm.c
@@ -82,6 +82,8 @@ static int omap_pcm_hw_params(struct snd_pcm_substream *substream,
struct dma_chan *chan;
int err = 0;
+ memset(&config, 0x00, sizeof(config));
+
dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
/* return if this is a bufferless transfer e.g.
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index ec522e94b0e2..b6cb9950f05d 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -133,3 +133,4 @@ module_platform_driver(mmp_driver);
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
MODULE_DESCRIPTION("ALSA SoC Brownstone");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:brownstone-audio");
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 5c8f9db50a47..d1661fa6ee08 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -207,3 +207,4 @@ module_platform_driver(mioa701_wm9713_driver);
MODULE_AUTHOR("Robert Jarzmik (rjarzmik@free.fr)");
MODULE_DESCRIPTION("ALSA SoC WM9713 MIO A701");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mioa701-wm9713");
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 51e790d006f5..96df9b2d8fc4 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -248,3 +248,4 @@ module_platform_driver(mmp_pcm_driver);
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
MODULE_DESCRIPTION("MMP Soc Audio DMA module");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mmp-pcm-audio");
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index eca60c29791a..ca8b23f8c525 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -482,3 +482,4 @@ module_platform_driver(asoc_mmp_sspa_driver);
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
MODULE_DESCRIPTION("MMP SSPA SoC Interface");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mmp-sspa-dai");
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 4e74d9573f03..bcc81e920a67 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -161,3 +161,4 @@ module_platform_driver(palm27x_wm9712_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("ALSA SoC Palm T|X, T5 and LifeDrive");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:palm27x-asoc");
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index da03fad1b9cd..3cad990dad2c 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -833,3 +833,4 @@ module_platform_driver(asoc_ssp_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa-ssp-dai");
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index f3de615aacd7..9615e6de1306 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -287,3 +287,4 @@ module_platform_driver(pxa2xx_ac97_driver);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("AC97 driver for the Intel PXA2xx chip");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-ac97");
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 9f390398d518..410d48b93031 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -117,3 +117,4 @@ module_platform_driver(pxa_pcm_driver);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa-pcm-audio");
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 6e8665430bd5..db000c6987a1 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -474,7 +474,7 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
struct lpass_data *drvdata =
snd_soc_platform_get_drvdata(soc_runtime->platform);
struct lpass_variant *v = drvdata->variant;
- int ret;
+ int ret = -EINVAL;
struct lpass_pcm_data *data;
size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
@@ -491,7 +491,7 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
data->rdma_ch = v->alloc_dma_channel(drvdata,
SNDRV_PCM_STREAM_PLAYBACK);
- if (IS_ERR_VALUE(data->rdma_ch))
+ if (data->rdma_ch < 0)
return data->rdma_ch;
drvdata->substream[data->rdma_ch] = psubstream;
@@ -518,8 +518,10 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
data->wrdma_ch = v->alloc_dma_channel(drvdata,
SNDRV_PCM_STREAM_CAPTURE);
- if (IS_ERR_VALUE(data->wrdma_ch))
+ if (data->wrdma_ch < 0) {
+ ret = data->wrdma_ch;
goto capture_alloc_err;
+ }
drvdata->substream[data->wrdma_ch] = csubstream;
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 606399de684d..49354d17ea55 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -492,9 +492,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
*/
if (!count) {
clk = clk_register_fixed_rate(dev, clkout_name[CLKOUT],
- parent_clk_name,
- (parent_clk_name) ?
- 0 : CLK_IS_ROOT, req_rate);
+ parent_clk_name, 0, req_rate);
if (!IS_ERR(clk)) {
adg->clkout[CLKOUT] = clk;
of_clk_add_provider(np, of_clk_src_simple_get, clk);
@@ -506,9 +504,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
else {
for (i = 0; i < CLKOUTMAX; i++) {
clk = clk_register_fixed_rate(dev, clkout_name[i],
- parent_clk_name,
- (parent_clk_name) ?
- 0 : CLK_IS_ROOT,
+ parent_clk_name, 0,
req_rate);
if (!IS_ERR(clk)) {
adg->onecell.clks = adg->clkout;
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index 7658e8fd7bdc..6bc93cbb3049 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -316,11 +316,15 @@ static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
size = ARRAY_SIZE(gen2_id_table_cmd);
}
- if (!entry)
- return 0xFF;
+ if ((!entry) || (size <= id)) {
+ struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
- if (size <= id)
- return 0xFF;
+ dev_err(dev, "unknown connection (%s[%d])\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ /* use non-prohibited SRS number as error */
+ return 0x00; /* SSI00 */
+ }
return entry[id];
}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index fc89a67258ca..a8f61d79333b 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -276,8 +276,9 @@ struct rsnd_mod {
/*
* status
*
- * 0xH0000CB0
+ * 0xH0000CBA
*
+ * A 0: probe 1: remove
* B 0: init 1: quit
* C 0: start 1: stop
*
@@ -287,19 +288,19 @@ struct rsnd_mod {
* H 0: fallback
* H 0: hw_params
*/
+#define __rsnd_mod_shift_probe 0
+#define __rsnd_mod_shift_remove 0
#define __rsnd_mod_shift_init 4
#define __rsnd_mod_shift_quit 4
#define __rsnd_mod_shift_start 8
#define __rsnd_mod_shift_stop 8
-#define __rsnd_mod_shift_probe 28 /* always called */
-#define __rsnd_mod_shift_remove 28 /* always called */
#define __rsnd_mod_shift_irq 28 /* always called */
#define __rsnd_mod_shift_pcm_new 28 /* always called */
#define __rsnd_mod_shift_fallback 28 /* always called */
#define __rsnd_mod_shift_hw_params 28 /* always called */
-#define __rsnd_mod_add_probe 0
-#define __rsnd_mod_add_remove 0
+#define __rsnd_mod_add_probe 1
+#define __rsnd_mod_add_remove -1
#define __rsnd_mod_add_init 1
#define __rsnd_mod_add_quit -1
#define __rsnd_mod_add_start 1
@@ -310,7 +311,7 @@ struct rsnd_mod {
#define __rsnd_mod_add_hw_params 0
#define __rsnd_mod_call_probe 0
-#define __rsnd_mod_call_remove 0
+#define __rsnd_mod_call_remove 1
#define __rsnd_mod_call_init 0
#define __rsnd_mod_call_quit 1
#define __rsnd_mod_call_start 0
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 15d6ffe8be74..e39f916d0f2f 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -572,6 +572,9 @@ int rsnd_src_probe(struct rsnd_priv *priv)
i = 0;
for_each_child_of_node(node, np) {
+ if (!of_device_is_available(np))
+ goto skip;
+
src = rsnd_src_get(priv, i);
snprintf(name, RSND_SRC_NAME_SIZE, "%s.%d",
@@ -595,6 +598,7 @@ int rsnd_src_probe(struct rsnd_priv *priv)
if (ret)
goto rsnd_src_probe_done;
+skip:
i++;
}
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 1cf94d7fb9f4..ee7f15aa46fc 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -1023,6 +1023,11 @@ static int soc_tplg_kcontrol_elems_load(struct soc_tplg *tplg,
control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
+ if (control_hdr->size != sizeof(*control_hdr)) {
+ dev_err(tplg->dev, "ASoC: invalid control size\n");
+ return -EINVAL;
+ }
+
switch (control_hdr->ops.info) {
case SND_SOC_TPLG_CTL_VOLSW:
case SND_SOC_TPLG_CTL_STROBE:
@@ -1476,6 +1481,8 @@ widget:
widget->dobj.type = SND_SOC_DOBJ_WIDGET;
widget->dobj.ops = tplg->ops;
widget->dobj.index = tplg->index;
+ kfree(template.sname);
+ kfree(template.name);
list_add(&widget->dobj.list, &tplg->comp->dobj_list);
return 0;
@@ -1499,10 +1506,17 @@ static int soc_tplg_dapm_widget_elems_load(struct soc_tplg *tplg,
for (i = 0; i < count; i++) {
widget = (struct snd_soc_tplg_dapm_widget *) tplg->pos;
+ if (widget->size != sizeof(*widget)) {
+ dev_err(tplg->dev, "ASoC: invalid widget size\n");
+ return -EINVAL;
+ }
+
ret = soc_tplg_dapm_widget_create(tplg, widget);
- if (ret < 0)
+ if (ret < 0) {
dev_err(tplg->dev, "ASoC: failed to load widget %s\n",
widget->name);
+ return ret;
+ }
}
return 0;
@@ -1586,6 +1600,7 @@ static int soc_tplg_dai_create(struct soc_tplg *tplg,
return snd_soc_register_dai(tplg->comp, dai_drv);
}
+/* create the FE DAI link */
static int soc_tplg_link_create(struct soc_tplg *tplg,
struct snd_soc_tplg_pcm *pcm)
{
@@ -1598,6 +1613,16 @@ static int soc_tplg_link_create(struct soc_tplg *tplg,
link->name = pcm->pcm_name;
link->stream_name = pcm->pcm_name;
+ link->id = pcm->pcm_id;
+
+ link->cpu_dai_name = pcm->dai_name;
+ link->codec_name = "snd-soc-dummy";
+ link->codec_dai_name = "snd-soc-dummy-dai";
+
+ /* enable DPCM */
+ link->dynamic = 1;
+ link->dpcm_playback = pcm->playback;
+ link->dpcm_capture = pcm->capture;
/* pass control to component driver for optional further init */
ret = soc_tplg_dai_link_load(tplg, link);
@@ -1639,8 +1664,6 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
if (tplg->pass != SOC_TPLG_PASS_PCM_DAI)
return 0;
- pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
-
if (soc_tplg_check_elem_count(tplg,
sizeof(struct snd_soc_tplg_pcm), count,
hdr->payload_size, "PCM DAI")) {
@@ -1650,7 +1673,13 @@ static int soc_tplg_pcm_elems_load(struct soc_tplg *tplg,
}
/* create the FE DAIs and DAI links */
+ pcm = (struct snd_soc_tplg_pcm *)tplg->pos;
for (i = 0; i < count; i++) {
+ if (pcm->size != sizeof(*pcm)) {
+ dev_err(tplg->dev, "ASoC: invalid pcm size\n");
+ return -EINVAL;
+ }
+
soc_tplg_pcm_create(tplg, pcm);
pcm++;
}
@@ -1670,6 +1699,11 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
return 0;
manifest = (struct snd_soc_tplg_manifest *)tplg->pos;
+ if (manifest->size != sizeof(*manifest)) {
+ dev_err(tplg->dev, "ASoC: invalid manifest size\n");
+ return -EINVAL;
+ }
+
tplg->pos += sizeof(struct snd_soc_tplg_manifest);
if (tplg->comp && tplg->ops && tplg->ops->manifest)
@@ -1686,6 +1720,14 @@ static int soc_valid_header(struct soc_tplg *tplg,
if (soc_tplg_get_hdr_offset(tplg) >= tplg->fw->size)
return 0;
+ if (hdr->size != sizeof(*hdr)) {
+ dev_err(tplg->dev,
+ "ASoC: invalid header size for type %d at offset 0x%lx size 0x%zx.\n",
+ hdr->type, soc_tplg_get_hdr_offset(tplg),
+ tplg->fw->size);
+ return -EINVAL;
+ }
+
/* big endian firmware objects not supported atm */
if (hdr->magic == cpu_to_be32(SND_SOC_TPLG_MAGIC)) {
dev_err(tplg->dev,
diff --git a/sound/soc/sti/sti_uniperif.c b/sound/soc/sti/sti_uniperif.c
index 39bcefe5eea0..488ef4ed8fba 100644
--- a/sound/soc/sti/sti_uniperif.c
+++ b/sound/soc/sti/sti_uniperif.c
@@ -11,6 +11,142 @@
#include "uniperif.h"
/*
+ * User frame size shall be 2, 4, 6 or 8 32-bits words length
+ * (i.e. 8, 16, 24 or 32 bytes)
+ * This constraint comes from allowed values for
+ * UNIPERIF_I2S_FMT_NUM_CH register
+ */
+#define UNIPERIF_MAX_FRAME_SZ 0x20
+#define UNIPERIF_ALLOWED_FRAME_SZ (0x08 | 0x10 | 0x18 | UNIPERIF_MAX_FRAME_SZ)
+
+int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots,
+ int slot_width)
+{
+ struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+ struct uniperif *uni = priv->dai_data.uni;
+ int i, frame_size, avail_slots;
+
+ if (!UNIPERIF_TYPE_IS_TDM(uni)) {
+ dev_err(uni->dev, "cpu dai not in tdm mode\n");
+ return -EINVAL;
+ }
+
+ /* store info in unip context */
+ uni->tdm_slot.slots = slots;
+ uni->tdm_slot.slot_width = slot_width;
+ /* unip is unidirectionnal */
+ uni->tdm_slot.mask = (tx_mask != 0) ? tx_mask : rx_mask;
+
+ /* number of available timeslots */
+ for (i = 0, avail_slots = 0; i < uni->tdm_slot.slots; i++) {
+ if ((uni->tdm_slot.mask >> i) & 0x01)
+ avail_slots++;
+ }
+ uni->tdm_slot.avail_slots = avail_slots;
+
+ /* frame size in bytes */
+ frame_size = uni->tdm_slot.avail_slots * uni->tdm_slot.slot_width / 8;
+
+ /* check frame size is allowed */
+ if ((frame_size > UNIPERIF_MAX_FRAME_SZ) ||
+ (frame_size & ~(int)UNIPERIF_ALLOWED_FRAME_SZ)) {
+ dev_err(uni->dev, "frame size not allowed: %d bytes\n",
+ frame_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int sti_uniperiph_fix_tdm_chan(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct uniperif *uni = rule->private;
+ struct snd_interval t;
+
+ t.min = uni->tdm_slot.avail_slots;
+ t.max = uni->tdm_slot.avail_slots;
+ t.openmin = 0;
+ t.openmax = 0;
+ t.integer = 0;
+
+ return snd_interval_refine(hw_param_interval(params, rule->var), &t);
+}
+
+int sti_uniperiph_fix_tdm_format(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct uniperif *uni = rule->private;
+ struct snd_mask *maskp = hw_param_mask(params, rule->var);
+ u64 format;
+
+ switch (uni->tdm_slot.slot_width) {
+ case 16:
+ format = SNDRV_PCM_FMTBIT_S16_LE;
+ break;
+ case 32:
+ format = SNDRV_PCM_FMTBIT_S32_LE;
+ break;
+ default:
+ dev_err(uni->dev, "format not supported: %d bits\n",
+ uni->tdm_slot.slot_width);
+ return -EINVAL;
+ }
+
+ maskp->bits[0] &= (u_int32_t)format;
+ maskp->bits[1] &= (u_int32_t)(format >> 32);
+ /* clear remaining indexes */
+ memset(maskp->bits + 2, 0, (SNDRV_MASK_MAX - 64) / 8);
+
+ if (!maskp->bits[0] && !maskp->bits[1])
+ return -EINVAL;
+
+ return 0;
+}
+
+int sti_uniperiph_get_tdm_word_pos(struct uniperif *uni,
+ unsigned int *word_pos)
+{
+ int slot_width = uni->tdm_slot.slot_width / 8;
+ int slots_num = uni->tdm_slot.slots;
+ unsigned int slots_mask = uni->tdm_slot.mask;
+ int i, j, k;
+ unsigned int word16_pos[4];
+
+ /* word16_pos:
+ * word16_pos[0] = WORDX_LSB
+ * word16_pos[1] = WORDX_MSB,
+ * word16_pos[2] = WORDX+1_LSB
+ * word16_pos[3] = WORDX+1_MSB
+ */
+
+ /* set unip word position */
+ for (i = 0, j = 0, k = 0; (i < slots_num) && (k < WORD_MAX); i++) {
+ if ((slots_mask >> i) & 0x01) {
+ word16_pos[j] = i * slot_width;
+
+ if (slot_width == 4) {
+ word16_pos[j + 1] = word16_pos[j] + 2;
+ j++;
+ }
+ j++;
+
+ if (j > 3) {
+ word_pos[k] = word16_pos[1] |
+ (word16_pos[0] << 8) |
+ (word16_pos[3] << 16) |
+ (word16_pos[2] << 24);
+ j = 0;
+ k++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
* sti_uniperiph_dai_create_ctrl
* This function is used to create Ctrl associated to DAI but also pcm device.
* Request is done by front end to associate ctrl with pcm device id
@@ -45,10 +181,16 @@ int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
+ struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+ struct uniperif *uni = priv->dai_data.uni;
struct snd_dmaengine_dai_dma_data *dma_data;
int transfer_size;
- transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES;
+ if (uni->info->type == SND_ST_UNIPERIF_TYPE_TDM)
+ /* transfer size = user frame size (in 32-bits FIFO cell) */
+ transfer_size = snd_soc_params_to_frame_size(params) / 32;
+ else
+ transfer_size = params_channels(params) * UNIPERIF_FIFO_FRAMES;
dma_data = snd_soc_dai_get_dma_data(dai, substream);
dma_data->maxburst = transfer_size;
diff --git a/sound/soc/sti/uniperif.h b/sound/soc/sti/uniperif.h
index f0fd5a9944e9..eb9933c62ad6 100644
--- a/sound/soc/sti/uniperif.h
+++ b/sound/soc/sti/uniperif.h
@@ -25,7 +25,7 @@
writel_relaxed((((value) & mask) << shift), ip->base + offset)
/*
- * AUD_UNIPERIF_SOFT_RST reg
+ * UNIPERIF_SOFT_RST reg
*/
#define UNIPERIF_SOFT_RST_OFFSET(ip) 0x0000
@@ -50,7 +50,7 @@
UNIPERIF_SOFT_RST_SOFT_RST_MASK(ip))
/*
- * AUD_UNIPERIF_FIFO_DATA reg
+ * UNIPERIF_FIFO_DATA reg
*/
#define UNIPERIF_FIFO_DATA_OFFSET(ip) 0x0004
@@ -58,7 +58,7 @@
writel_relaxed(value, ip->base + UNIPERIF_FIFO_DATA_OFFSET(ip))
/*
- * AUD_UNIPERIF_CHANNEL_STA_REGN reg
+ * UNIPERIF_CHANNEL_STA_REGN reg
*/
#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
@@ -105,7 +105,7 @@
writel_relaxed(value, ip->base + UNIPERIF_CHANNEL_STA_REG5_OFFSET(ip))
/*
- * AUD_UNIPERIF_ITS reg
+ * UNIPERIF_ITS reg
*/
#define UNIPERIF_ITS_OFFSET(ip) 0x000C
@@ -143,7 +143,7 @@
0 : (BIT(UNIPERIF_ITS_UNDERFLOW_REC_FAILED_SHIFT(ip))))
/*
- * AUD_UNIPERIF_ITS_BCLR reg
+ * UNIPERIF_ITS_BCLR reg
*/
/* FIFO_ERROR */
@@ -160,7 +160,7 @@
writel_relaxed(value, ip->base + UNIPERIF_ITS_BCLR_OFFSET(ip))
/*
- * AUD_UNIPERIF_ITM reg
+ * UNIPERIF_ITM reg
*/
#define UNIPERIF_ITM_OFFSET(ip) 0x0018
@@ -188,7 +188,7 @@
0 : (BIT(UNIPERIF_ITM_UNDERFLOW_REC_FAILED_SHIFT(ip))))
/*
- * AUD_UNIPERIF_ITM_BCLR reg
+ * UNIPERIF_ITM_BCLR reg
*/
#define UNIPERIF_ITM_BCLR_OFFSET(ip) 0x001c
@@ -213,7 +213,7 @@
UNIPERIF_ITM_BCLR_DMA_ERROR_MASK(ip))
/*
- * AUD_UNIPERIF_ITM_BSET reg
+ * UNIPERIF_ITM_BSET reg
*/
#define UNIPERIF_ITM_BSET_OFFSET(ip) 0x0020
@@ -767,7 +767,7 @@
SET_UNIPERIF_REG(ip, \
UNIPERIF_CTRL_OFFSET(ip), \
UNIPERIF_CTRL_READER_OUT_SEL_SHIFT(ip), \
- CORAUD_UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1)
+ UNIPERIF_CTRL_READER_OUT_SEL_MASK(ip), 1)
/* UNDERFLOW_REC_WINDOW */
#define UNIPERIF_CTRL_UNDERFLOW_REC_WINDOW_SHIFT(ip) 20
@@ -1046,7 +1046,7 @@
UNIPERIF_STATUS_1_UNDERFLOW_DURATION_MASK(ip), value)
/*
- * AUD_UNIPERIF_CHANNEL_STA_REGN reg
+ * UNIPERIF_CHANNEL_STA_REGN reg
*/
#define UNIPERIF_CHANNEL_STA_REGN(ip, n) (0x0060 + (4 * n))
@@ -1057,7 +1057,7 @@
UNIPERIF_CHANNEL_STA_REGN(ip, n))
/*
- * AUD_UNIPERIF_USER_VALIDITY reg
+ * UNIPERIF_USER_VALIDITY reg
*/
#define UNIPERIF_USER_VALIDITY_OFFSET(ip) 0x0090
@@ -1101,12 +1101,136 @@
UNIPERIF_DBG_STANDBY_LEFT_SP_MASK(ip), value)
/*
+ * UNIPERIF_TDM_ENABLE
+ */
+#define UNIPERIF_TDM_ENABLE_OFFSET(ip) 0x0118
+#define GET_UNIPERIF_TDM_ENABLE(ip) \
+ readl_relaxed(ip->base + UNIPERIF_TDM_ENABLE_OFFSET(ip))
+#define SET_UNIPERIF_TDM_ENABLE(ip, value) \
+ writel_relaxed(value, ip->base + UNIPERIF_TDM_ENABLE_OFFSET(ip))
+
+/* TDM_ENABLE */
+#define UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip) 0x0
+#define UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip) 0x1
+#define GET_UNIPERIF_TDM_ENABLE_EN_TDM(ip) \
+ GET_UNIPERIF_REG(ip, \
+ UNIPERIF_TDM_ENABLE_OFFSET(ip), \
+ UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip), \
+ UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip))
+#define SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(ip) \
+ SET_UNIPERIF_REG(ip, \
+ UNIPERIF_TDM_ENABLE_OFFSET(ip), \
+ UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip), \
+ UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip), 1)
+#define SET_UNIPERIF_TDM_ENABLE_TDM_DISABLE(ip) \
+ SET_UNIPERIF_REG(ip, \
+ UNIPERIF_TDM_ENABLE_OFFSET(ip), \
+ UNIPERIF_TDM_ENABLE_EN_TDM_SHIFT(ip), \
+ UNIPERIF_TDM_ENABLE_EN_TDM_MASK(ip), 0)
+
+/*
+ * UNIPERIF_TDM_FS_REF_FREQ
+ */
+#define UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip) 0x011c
+#define GET_UNIPERIF_TDM_FS_REF_FREQ(ip) \
+ readl_relaxed(ip->base + UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip))
+#define SET_UNIPERIF_TDM_FS_REF_FREQ(ip, value) \
+ writel_relaxed(value, ip->base + \
+ UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip))
+
+/* REF_FREQ */
+#define UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip) 0x0
+#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_8KHZ(ip) 0
+#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_16KHZ(ip) 1
+#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_32KHZ(ip) 2
+#define VALUE_UNIPERIF_TDM_FS_REF_FREQ_48KHZ(ip) 3
+#define UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip) 0x3
+#define GET_UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ(ip) \
+ GET_UNIPERIF_REG(ip, \
+ UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
+ UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
+ UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip))
+#define SET_UNIPERIF_TDM_FS_REF_FREQ_8KHZ(ip) \
+ SET_UNIPERIF_REG(ip, \
+ UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
+ UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
+ UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
+ VALUE_UNIPERIF_TDM_FS_REF_FREQ_8KHZ(ip))
+#define SET_UNIPERIF_TDM_FS_REF_FREQ_16KHZ(ip) \
+ SET_UNIPERIF_REG(ip, \
+ UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
+ UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
+ UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
+ VALUE_UNIPERIF_TDM_FS_REF_FREQ_16KHZ(ip))
+#define SET_UNIPERIF_TDM_FS_REF_FREQ_32KHZ(ip) \
+ SET_UNIPERIF_REG(ip, \
+ UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
+ UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
+ UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
+ VALUE_UNIPERIF_TDM_FS_REF_FREQ_32KHZ(ip))
+#define SET_UNIPERIF_TDM_FS_REF_FREQ_48KHZ(ip) \
+ SET_UNIPERIF_REG(ip, \
+ UNIPERIF_TDM_FS_REF_FREQ_OFFSET(ip), \
+ UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_SHIFT(ip), \
+ UNIPERIF_TDM_FS_REF_FREQ_REF_FREQ_MASK(ip), \
+ VALUE_UNIPERIF_TDM_FS_REF_FREQ_48KHZ(ip))
+
+/*
+ * UNIPERIF_TDM_FS_REF_DIV
+ */
+#define UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip) 0x0120
+#define GET_UNIPERIF_TDM_FS_REF_DIV(ip) \
+ readl_relaxed(ip->base + UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip))
+#define SET_UNIPERIF_TDM_FS_REF_DIV(ip, value) \
+ writel_relaxed(value, ip->base + \
+ UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip))
+
+/* NUM_TIMESLOT */
+#define UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_SHIFT(ip) 0x0
+#define UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_MASK(ip) 0xff
+#define GET_UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT(ip) \
+ GET_UNIPERIF_REG(ip, \
+ UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip), \
+ UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_SHIFT(ip), \
+ UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_MASK(ip))
+#define SET_UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT(ip, value) \
+ SET_UNIPERIF_REG(ip, \
+ UNIPERIF_TDM_FS_REF_DIV_OFFSET(ip), \
+ UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_SHIFT(ip), \
+ UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT_MASK(ip), value)
+
+/*
+ * UNIPERIF_TDM_WORD_POS_X_Y
+ * 32 bits of UNIPERIF_TDM_WORD_POS_X_Y register shall be set in 1 shot
+ */
+#define UNIPERIF_TDM_WORD_POS_1_2_OFFSET(ip) 0x013c
+#define UNIPERIF_TDM_WORD_POS_3_4_OFFSET(ip) 0x0140
+#define UNIPERIF_TDM_WORD_POS_5_6_OFFSET(ip) 0x0144
+#define UNIPERIF_TDM_WORD_POS_7_8_OFFSET(ip) 0x0148
+#define GET_UNIPERIF_TDM_WORD_POS(ip, words) \
+ readl_relaxed(ip->base + UNIPERIF_TDM_WORD_POS_##words##_OFFSET(ip))
+#define SET_UNIPERIF_TDM_WORD_POS(ip, words, value) \
+ writel_relaxed(value, ip->base + \
+ UNIPERIF_TDM_WORD_POS_##words##_OFFSET(ip))
+/*
* uniperipheral IP capabilities
*/
#define UNIPERIF_FIFO_SIZE 70 /* FIFO is 70 cells deep */
#define UNIPERIF_FIFO_FRAMES 4 /* FDMA trigger limit in frames */
+#define UNIPERIF_TYPE_IS_HDMI(p) \
+ ((p)->info->type == SND_ST_UNIPERIF_TYPE_HDMI)
+#define UNIPERIF_TYPE_IS_PCM(p) \
+ ((p)->info->type == SND_ST_UNIPERIF_TYPE_PCM)
+#define UNIPERIF_TYPE_IS_SPDIF(p) \
+ ((p)->info->type == SND_ST_UNIPERIF_TYPE_SPDIF)
+#define UNIPERIF_TYPE_IS_IEC958(p) \
+ (UNIPERIF_TYPE_IS_HDMI(p) || \
+ UNIPERIF_TYPE_IS_SPDIF(p))
+#define UNIPERIF_TYPE_IS_TDM(p) \
+ ((p)->info->type == SND_ST_UNIPERIF_TYPE_TDM)
+
/*
* Uniperipheral IP revisions
*/
@@ -1125,10 +1249,11 @@ enum uniperif_version {
};
enum uniperif_type {
- SND_ST_UNIPERIF_PLAYER_TYPE_NONE,
- SND_ST_UNIPERIF_PLAYER_TYPE_HDMI,
- SND_ST_UNIPERIF_PLAYER_TYPE_PCM,
- SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF
+ SND_ST_UNIPERIF_TYPE_NONE,
+ SND_ST_UNIPERIF_TYPE_HDMI,
+ SND_ST_UNIPERIF_TYPE_PCM,
+ SND_ST_UNIPERIF_TYPE_SPDIF,
+ SND_ST_UNIPERIF_TYPE_TDM
};
enum uniperif_state {
@@ -1145,9 +1270,17 @@ enum uniperif_iec958_encoding_mode {
UNIPERIF_IEC958_ENCODING_MODE_ENCODED
};
+enum uniperif_word_pos {
+ WORD_1_2,
+ WORD_3_4,
+ WORD_5_6,
+ WORD_7_8,
+ WORD_MAX
+};
+
struct uniperif_info {
int id; /* instance value of the uniperipheral IP */
- enum uniperif_type player_type;
+ enum uniperif_type type;
int underflow_enabled; /* Underflow recovery mode */
};
@@ -1156,12 +1289,20 @@ struct uniperif_iec958_settings {
struct snd_aes_iec958 iec958;
};
+struct dai_tdm_slot {
+ unsigned int mask;
+ int slots;
+ int slot_width;
+ unsigned int avail_slots;
+};
+
struct uniperif {
/* System information */
struct uniperif_info *info;
struct device *dev;
int ver; /* IP version, used by register access macros */
struct regmap_field *clk_sel;
+ struct regmap_field *valid_sel;
/* capabilities */
const struct snd_pcm_hardware *hw;
@@ -1192,6 +1333,7 @@ struct uniperif {
/* dai properties */
unsigned int daifmt;
+ struct dai_tdm_slot tdm_slot;
/* DAI callbacks */
const struct snd_soc_dai_ops *dai_ops;
@@ -1209,6 +1351,28 @@ struct sti_uniperiph_data {
struct sti_uniperiph_dai dai_data;
};
+static const struct snd_pcm_hardware uni_tdm_hw = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID,
+
+ .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE,
+
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 8000,
+ .rate_max = 48000,
+
+ .channels_min = 1,
+ .channels_max = 32,
+
+ .periods_min = 2,
+ .periods_max = 10,
+
+ .period_bytes_min = 128,
+ .period_bytes_max = 64 * PAGE_SIZE,
+ .buffer_bytes_max = 256 * PAGE_SIZE
+};
+
/* uniperiph player*/
int uni_player_init(struct platform_device *pdev,
struct uniperif *uni_player);
@@ -1226,4 +1390,28 @@ int sti_uniperiph_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai);
+static inline int sti_uniperiph_get_user_frame_size(
+ struct snd_pcm_runtime *runtime)
+{
+ return (runtime->channels * snd_pcm_format_width(runtime->format) / 8);
+}
+
+static inline int sti_uniperiph_get_unip_tdm_frame_size(struct uniperif *uni)
+{
+ return (uni->tdm_slot.slots * uni->tdm_slot.slot_width / 8);
+}
+
+int sti_uniperiph_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots,
+ int slot_width);
+
+int sti_uniperiph_get_tdm_word_pos(struct uniperif *uni,
+ unsigned int *word_pos);
+
+int sti_uniperiph_fix_tdm_chan(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule);
+
+int sti_uniperiph_fix_tdm_format(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule);
+
#endif
diff --git a/sound/soc/sti/uniperif_player.c b/sound/soc/sti/uniperif_player.c
index 7aca6b92f718..ee1c7c245bc7 100644
--- a/sound/soc/sti/uniperif_player.c
+++ b/sound/soc/sti/uniperif_player.c
@@ -21,23 +21,14 @@
/* sys config registers definitions */
#define SYS_CFG_AUDIO_GLUE 0xA4
-#define SYS_CFG_AUDI0_GLUE_PCM_CLKX 8
/*
* Driver specific types.
*/
-#define UNIPERIF_PLAYER_TYPE_IS_HDMI(p) \
- ((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_HDMI)
-#define UNIPERIF_PLAYER_TYPE_IS_PCM(p) \
- ((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_PCM)
-#define UNIPERIF_PLAYER_TYPE_IS_SPDIF(p) \
- ((p)->info->player_type == SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF)
-#define UNIPERIF_PLAYER_TYPE_IS_IEC958(p) \
- (UNIPERIF_PLAYER_TYPE_IS_HDMI(p) || \
- UNIPERIF_PLAYER_TYPE_IS_SPDIF(p))
#define UNIPERIF_PLAYER_CLK_ADJ_MIN -999999
#define UNIPERIF_PLAYER_CLK_ADJ_MAX 1000000
+#define UNIPERIF_PLAYER_I2S_OUT 1 /* player id connected to I2S/TDM TX bus */
/*
* Note: snd_pcm_hardware is linked to DMA controller but is declared here to
@@ -444,18 +435,11 @@ static int uni_player_prepare_pcm(struct uniperif *player,
/* Force slot width to 32 in I2S mode (HW constraint) */
if ((player->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) ==
- SND_SOC_DAIFMT_I2S) {
+ SND_SOC_DAIFMT_I2S)
slot_width = 32;
- } else {
- switch (runtime->format) {
- case SNDRV_PCM_FORMAT_S16_LE:
- slot_width = 16;
- break;
- default:
- slot_width = 32;
- break;
- }
- }
+ else
+ slot_width = snd_pcm_format_width(runtime->format);
+
output_frame_size = slot_width * runtime->channels;
clk_div = player->mclk / runtime->rate;
@@ -530,7 +514,6 @@ static int uni_player_prepare_pcm(struct uniperif *player,
SET_UNIPERIF_CONFIG_ONE_BIT_AUD_DISABLE(player);
SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
- SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(player);
/* No iec958 formatting as outputting to DAC */
SET_UNIPERIF_CTRL_SPDIF_FMT_OFF(player);
@@ -538,6 +521,55 @@ static int uni_player_prepare_pcm(struct uniperif *player,
return 0;
}
+static int uni_player_prepare_tdm(struct uniperif *player,
+ struct snd_pcm_runtime *runtime)
+{
+ int tdm_frame_size; /* unip tdm frame size in bytes */
+ int user_frame_size; /* user tdm frame size in bytes */
+ /* default unip TDM_WORD_POS_X_Y */
+ unsigned int word_pos[4] = {
+ 0x04060002, 0x0C0E080A, 0x14161012, 0x1C1E181A};
+ int freq, ret;
+
+ tdm_frame_size =
+ sti_uniperiph_get_unip_tdm_frame_size(player);
+ user_frame_size =
+ sti_uniperiph_get_user_frame_size(runtime);
+
+ /* fix 16/0 format */
+ SET_UNIPERIF_CONFIG_MEM_FMT_16_0(player);
+ SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(player);
+
+ /* number of words inserted on the TDM line */
+ SET_UNIPERIF_I2S_FMT_NUM_CH(player, user_frame_size / 4 / 2);
+
+ SET_UNIPERIF_I2S_FMT_ORDER_MSB(player);
+ SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(player);
+
+ /* Enable the tdm functionality */
+ SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(player);
+
+ /* number of 8 bits timeslots avail in unip tdm frame */
+ SET_UNIPERIF_TDM_FS_REF_DIV_NUM_TIMESLOT(player, tdm_frame_size);
+
+ /* set the timeslot allocation for words in FIFO */
+ sti_uniperiph_get_tdm_word_pos(player, word_pos);
+ SET_UNIPERIF_TDM_WORD_POS(player, 1_2, word_pos[WORD_1_2]);
+ SET_UNIPERIF_TDM_WORD_POS(player, 3_4, word_pos[WORD_3_4]);
+ SET_UNIPERIF_TDM_WORD_POS(player, 5_6, word_pos[WORD_5_6]);
+ SET_UNIPERIF_TDM_WORD_POS(player, 7_8, word_pos[WORD_7_8]);
+
+ /* set unip clk rate (not done vai set_sysclk ops) */
+ freq = runtime->rate * tdm_frame_size * 8;
+ mutex_lock(&player->ctrl_lock);
+ ret = uni_player_clk_set_rate(player, freq);
+ if (!ret)
+ player->mclk = freq;
+ mutex_unlock(&player->ctrl_lock);
+
+ return 0;
+}
+
/*
* ALSA uniperipheral iec958 controls
*/
@@ -668,11 +700,29 @@ static int uni_player_startup(struct snd_pcm_substream *substream,
{
struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
struct uniperif *player = priv->dai_data.uni;
+ int ret;
+
player->substream = substream;
player->clk_adj = 0;
- return 0;
+ if (!UNIPERIF_TYPE_IS_TDM(player))
+ return 0;
+
+ /* refine hw constraint in tdm mode */
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ sti_uniperiph_fix_tdm_chan,
+ player, SNDRV_PCM_HW_PARAM_CHANNELS,
+ -1);
+ if (ret < 0)
+ return ret;
+
+ return snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ sti_uniperiph_fix_tdm_format,
+ player, SNDRV_PCM_HW_PARAM_FORMAT,
+ -1);
}
static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id,
@@ -682,7 +732,7 @@ static int uni_player_set_sysclk(struct snd_soc_dai *dai, int clk_id,
struct uniperif *player = priv->dai_data.uni;
int ret;
- if (dir == SND_SOC_CLOCK_IN)
+ if (UNIPERIF_TYPE_IS_TDM(player) || (dir == SND_SOC_CLOCK_IN))
return 0;
if (clk_id != 0)
@@ -714,7 +764,13 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
}
/* Calculate transfer size (in fifo cells and bytes) for frame count */
- transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+ if (player->info->type == SND_ST_UNIPERIF_TYPE_TDM) {
+ /* transfer size = user frame size (in 32 bits FIFO cell) */
+ transfer_size =
+ sti_uniperiph_get_user_frame_size(runtime) / 4;
+ } else {
+ transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+ }
/* Calculate number of empty cells available before asserting DREQ */
if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0) {
@@ -738,16 +794,19 @@ static int uni_player_prepare(struct snd_pcm_substream *substream,
SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(player, trigger_limit);
/* Uniperipheral setup depends on player type */
- switch (player->info->player_type) {
- case SND_ST_UNIPERIF_PLAYER_TYPE_HDMI:
+ switch (player->info->type) {
+ case SND_ST_UNIPERIF_TYPE_HDMI:
ret = uni_player_prepare_iec958(player, runtime);
break;
- case SND_ST_UNIPERIF_PLAYER_TYPE_PCM:
+ case SND_ST_UNIPERIF_TYPE_PCM:
ret = uni_player_prepare_pcm(player, runtime);
break;
- case SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF:
+ case SND_ST_UNIPERIF_TYPE_SPDIF:
ret = uni_player_prepare_iec958(player, runtime);
break;
+ case SND_ST_UNIPERIF_TYPE_TDM:
+ ret = uni_player_prepare_tdm(player, runtime);
+ break;
default:
dev_err(player->dev, "invalid player type");
return -EINVAL;
@@ -852,8 +911,8 @@ static int uni_player_start(struct uniperif *player)
* will not take affect and hang the player.
*/
if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
- if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player))
- SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
+ if (UNIPERIF_TYPE_IS_IEC958(player))
+ SET_UNIPERIF_CTRL_SPDIF_FMT_ON(player);
/* Force channel status update (no update if clk disable) */
if (player->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
@@ -954,27 +1013,30 @@ static void uni_player_shutdown(struct snd_pcm_substream *substream,
player->substream = NULL;
}
-static int uni_player_parse_dt_clk_glue(struct platform_device *pdev,
- struct uniperif *player)
+static int uni_player_parse_dt_audio_glue(struct platform_device *pdev,
+ struct uniperif *player)
{
- int bit_offset;
struct device_node *node = pdev->dev.of_node;
struct regmap *regmap;
-
- bit_offset = SYS_CFG_AUDI0_GLUE_PCM_CLKX + player->info->id;
+ struct reg_field regfield[2] = {
+ /* PCM_CLK_SEL */
+ REG_FIELD(SYS_CFG_AUDIO_GLUE,
+ 8 + player->info->id,
+ 8 + player->info->id),
+ /* PCMP_VALID_SEL */
+ REG_FIELD(SYS_CFG_AUDIO_GLUE, 0, 1)
+ };
regmap = syscon_regmap_lookup_by_phandle(node, "st,syscfg");
- if (regmap) {
- struct reg_field regfield =
- REG_FIELD(SYS_CFG_AUDIO_GLUE, bit_offset, bit_offset);
-
- player->clk_sel = regmap_field_alloc(regmap, regfield);
- } else {
+ if (!regmap) {
dev_err(&pdev->dev, "sti-audio-clk-glue syscf not found\n");
return -EINVAL;
}
+ player->clk_sel = regmap_field_alloc(regmap, regfield[0]);
+ player->valid_sel = regmap_field_alloc(regmap, regfield[1]);
+
return 0;
}
@@ -1012,19 +1074,21 @@ static int uni_player_parse_dt(struct platform_device *pdev,
}
if (strcasecmp(mode, "hdmi") == 0)
- info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_HDMI;
+ info->type = SND_ST_UNIPERIF_TYPE_HDMI;
else if (strcasecmp(mode, "pcm") == 0)
- info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_PCM;
+ info->type = SND_ST_UNIPERIF_TYPE_PCM;
else if (strcasecmp(mode, "spdif") == 0)
- info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_SPDIF;
+ info->type = SND_ST_UNIPERIF_TYPE_SPDIF;
+ else if (strcasecmp(mode, "tdm") == 0)
+ info->type = SND_ST_UNIPERIF_TYPE_TDM;
else
- info->player_type = SND_ST_UNIPERIF_PLAYER_TYPE_NONE;
+ info->type = SND_ST_UNIPERIF_TYPE_NONE;
/* Save the info structure */
player->info = info;
- /* Get the PCM_CLK_SEL bit from audio-glue-ctrl SoC register */
- if (uni_player_parse_dt_clk_glue(pdev, player))
+ /* Get PCM_CLK_SEL & PCMP_VALID_SEL from audio-glue-ctrl SoC reg */
+ if (uni_player_parse_dt_audio_glue(pdev, player))
return -EINVAL;
return 0;
@@ -1037,7 +1101,8 @@ static const struct snd_soc_dai_ops uni_player_dai_ops = {
.trigger = uni_player_trigger,
.hw_params = sti_uniperiph_dai_hw_params,
.set_fmt = sti_uniperiph_dai_set_fmt,
- .set_sysclk = uni_player_set_sysclk
+ .set_sysclk = uni_player_set_sysclk,
+ .set_tdm_slot = sti_uniperiph_set_tdm_slot
};
int uni_player_init(struct platform_device *pdev,
@@ -1047,7 +1112,6 @@ int uni_player_init(struct platform_device *pdev,
player->dev = &pdev->dev;
player->state = UNIPERIF_STATE_STOPPED;
- player->hw = &uni_player_pcm_hw;
player->dai_ops = &uni_player_dai_ops;
ret = uni_player_parse_dt(pdev, player);
@@ -1057,6 +1121,11 @@ int uni_player_init(struct platform_device *pdev,
return ret;
}
+ if (UNIPERIF_TYPE_IS_TDM(player))
+ player->hw = &uni_tdm_hw;
+ else
+ player->hw = &uni_player_pcm_hw;
+
/* Get uniperif resource */
player->clk = of_clk_get(pdev->dev.of_node, 0);
if (IS_ERR(player->clk))
@@ -1073,6 +1142,17 @@ int uni_player_init(struct platform_device *pdev,
}
}
+ /* connect to I2S/TDM TX bus */
+ if (player->valid_sel &&
+ (player->info->id == UNIPERIF_PLAYER_I2S_OUT)) {
+ ret = regmap_field_write(player->valid_sel, player->info->id);
+ if (ret) {
+ dev_err(player->dev,
+ "%s: unable to connect to tdm bus", __func__);
+ return ret;
+ }
+ }
+
ret = devm_request_irq(&pdev->dev, player->irq,
uni_player_irq_handler, IRQF_SHARED,
dev_name(&pdev->dev), player);
@@ -1087,7 +1167,7 @@ int uni_player_init(struct platform_device *pdev,
SET_UNIPERIF_CTRL_SPDIF_LAT_OFF(player);
SET_UNIPERIF_CONFIG_IDLE_MOD_DISABLE(player);
- if (UNIPERIF_PLAYER_TYPE_IS_IEC958(player)) {
+ if (UNIPERIF_TYPE_IS_IEC958(player)) {
/* Set default iec958 status bits */
/* Consumer, PCM, copyright, 2ch, mode 0 */
diff --git a/sound/soc/sti/uniperif_reader.c b/sound/soc/sti/uniperif_reader.c
index 8a0eb2050169..eb74a328c928 100644
--- a/sound/soc/sti/uniperif_reader.c
+++ b/sound/soc/sti/uniperif_reader.c
@@ -73,55 +73,10 @@ static irqreturn_t uni_reader_irq_handler(int irq, void *dev_id)
return ret;
}
-static int uni_reader_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
+static int uni_reader_prepare_pcm(struct snd_pcm_runtime *runtime,
+ struct uniperif *reader)
{
- struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
- struct uniperif *reader = priv->dai_data.uni;
- struct snd_pcm_runtime *runtime = substream->runtime;
- int transfer_size, trigger_limit;
int slot_width;
- int count = 10;
-
- /* The reader should be stopped */
- if (reader->state != UNIPERIF_STATE_STOPPED) {
- dev_err(reader->dev, "%s: invalid reader state %d", __func__,
- reader->state);
- return -EINVAL;
- }
-
- /* Calculate transfer size (in fifo cells and bytes) for frame count */
- transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
-
- /* Calculate number of empty cells available before asserting DREQ */
- if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
- trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
- else
- /*
- * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
- * FDMA_TRIGGER_LIMIT also controls when the state switches
- * from OFF or STANDBY to AUDIO DATA.
- */
- trigger_limit = transfer_size;
-
- /* Trigger limit must be an even number */
- if ((!trigger_limit % 2) ||
- (trigger_limit != 1 && transfer_size % 2) ||
- (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
- dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
- return -EINVAL;
- }
-
- SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
-
- switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
- case SND_SOC_DAIFMT_IB_IF:
- case SND_SOC_DAIFMT_NB_IF:
- SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
- break;
- default:
- SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
- }
/* Force slot width to 32 in I2S mode */
if ((reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK)
@@ -173,6 +128,109 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ /* Number of channels must be even */
+ if ((runtime->channels % 2) || (runtime->channels < 2) ||
+ (runtime->channels > 10)) {
+ dev_err(reader->dev, "%s: invalid nb of channels", __func__);
+ return -EINVAL;
+ }
+
+ SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
+ SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
+
+ return 0;
+}
+
+static int uni_reader_prepare_tdm(struct snd_pcm_runtime *runtime,
+ struct uniperif *reader)
+{
+ int frame_size; /* user tdm frame size in bytes */
+ /* default unip TDM_WORD_POS_X_Y */
+ unsigned int word_pos[4] = {
+ 0x04060002, 0x0C0E080A, 0x14161012, 0x1C1E181A};
+
+ frame_size = sti_uniperiph_get_user_frame_size(runtime);
+
+ /* fix 16/0 format */
+ SET_UNIPERIF_CONFIG_MEM_FMT_16_0(reader);
+ SET_UNIPERIF_I2S_FMT_DATA_SIZE_32(reader);
+
+ /* number of words inserted on the TDM line */
+ SET_UNIPERIF_I2S_FMT_NUM_CH(reader, frame_size / 4 / 2);
+
+ SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
+ SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
+ SET_UNIPERIF_TDM_ENABLE_TDM_ENABLE(reader);
+
+ /*
+ * set the timeslots allocation for words in FIFO
+ *
+ * HW bug: (LSB word < MSB word) => this config is not possible
+ * So if we want (LSB word < MSB) word, then it shall be
+ * handled by user
+ */
+ sti_uniperiph_get_tdm_word_pos(reader, word_pos);
+ SET_UNIPERIF_TDM_WORD_POS(reader, 1_2, word_pos[WORD_1_2]);
+ SET_UNIPERIF_TDM_WORD_POS(reader, 3_4, word_pos[WORD_3_4]);
+ SET_UNIPERIF_TDM_WORD_POS(reader, 5_6, word_pos[WORD_5_6]);
+ SET_UNIPERIF_TDM_WORD_POS(reader, 7_8, word_pos[WORD_7_8]);
+
+ return 0;
+}
+
+static int uni_reader_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+ struct uniperif *reader = priv->dai_data.uni;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int transfer_size, trigger_limit, ret;
+ int count = 10;
+
+ /* The reader should be stopped */
+ if (reader->state != UNIPERIF_STATE_STOPPED) {
+ dev_err(reader->dev, "%s: invalid reader state %d", __func__,
+ reader->state);
+ return -EINVAL;
+ }
+
+ /* Calculate transfer size (in fifo cells and bytes) for frame count */
+ if (reader->info->type == SND_ST_UNIPERIF_TYPE_TDM) {
+ /* transfer size = unip frame size (in 32 bits FIFO cell) */
+ transfer_size =
+ sti_uniperiph_get_user_frame_size(runtime) / 4;
+ } else {
+ transfer_size = runtime->channels * UNIPERIF_FIFO_FRAMES;
+ }
+
+ /* Calculate number of empty cells available before asserting DREQ */
+ if (reader->ver < SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0)
+ trigger_limit = UNIPERIF_FIFO_SIZE - transfer_size;
+ else
+ /*
+ * Since SND_ST_UNIPERIF_VERSION_UNI_PLR_TOP_1_0
+ * FDMA_TRIGGER_LIMIT also controls when the state switches
+ * from OFF or STANDBY to AUDIO DATA.
+ */
+ trigger_limit = transfer_size;
+
+ /* Trigger limit must be an even number */
+ if ((!trigger_limit % 2) ||
+ (trigger_limit != 1 && transfer_size % 2) ||
+ (trigger_limit > UNIPERIF_CONFIG_DMA_TRIG_LIMIT_MASK(reader))) {
+ dev_err(reader->dev, "invalid trigger limit %d", trigger_limit);
+ return -EINVAL;
+ }
+
+ SET_UNIPERIF_CONFIG_DMA_TRIG_LIMIT(reader, trigger_limit);
+
+ if (UNIPERIF_TYPE_IS_TDM(reader))
+ ret = uni_reader_prepare_tdm(runtime, reader);
+ else
+ ret = uni_reader_prepare_pcm(runtime, reader);
+ if (ret)
+ return ret;
+
switch (reader->daifmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
SET_UNIPERIF_I2S_FMT_ALIGN_LEFT(reader);
@@ -191,21 +249,26 @@ static int uni_reader_prepare(struct snd_pcm_substream *substream,
return -EINVAL;
}
- SET_UNIPERIF_I2S_FMT_ORDER_MSB(reader);
-
- /* Data clocking (changing) on the rising edge */
- SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
-
- /* Number of channels must be even */
-
- if ((runtime->channels % 2) || (runtime->channels < 2) ||
- (runtime->channels > 10)) {
- dev_err(reader->dev, "%s: invalid nb of channels", __func__);
- return -EINVAL;
+ /* Data clocking (changing) on the rising/falling edge */
+ switch (reader->daifmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
+ SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
+ SET_UNIPERIF_I2S_FMT_SCLK_EDGE_RISING(reader);
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ SET_UNIPERIF_I2S_FMT_LR_POL_LOW(reader);
+ SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(reader);
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ SET_UNIPERIF_I2S_FMT_LR_POL_HIG(reader);
+ SET_UNIPERIF_I2S_FMT_SCLK_EDGE_FALLING(reader);
+ break;
}
- SET_UNIPERIF_I2S_FMT_NUM_CH(reader, runtime->channels / 2);
-
/* Clear any pending interrupts */
SET_UNIPERIF_ITS_BCLR(reader, GET_UNIPERIF_ITS(reader));
@@ -293,6 +356,32 @@ static int uni_reader_trigger(struct snd_pcm_substream *substream,
}
}
+static int uni_reader_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sti_uniperiph_data *priv = snd_soc_dai_get_drvdata(dai);
+ struct uniperif *reader = priv->dai_data.uni;
+ int ret;
+
+ if (!UNIPERIF_TYPE_IS_TDM(reader))
+ return 0;
+
+ /* refine hw constraint in tdm mode */
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ sti_uniperiph_fix_tdm_chan,
+ reader, SNDRV_PCM_HW_PARAM_CHANNELS,
+ -1);
+ if (ret < 0)
+ return ret;
+
+ return snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ sti_uniperiph_fix_tdm_format,
+ reader, SNDRV_PCM_HW_PARAM_FORMAT,
+ -1);
+}
+
static void uni_reader_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -310,6 +399,7 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
{
struct uniperif_info *info;
struct device_node *node = pdev->dev.of_node;
+ const char *mode;
/* Allocate memory for the info structure */
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -322,6 +412,17 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
return -EINVAL;
}
+ /* Read the device mode property */
+ if (of_property_read_string(node, "st,mode", &mode)) {
+ dev_err(&pdev->dev, "uniperipheral mode not defined");
+ return -EINVAL;
+ }
+
+ if (strcasecmp(mode, "tdm") == 0)
+ info->type = SND_ST_UNIPERIF_TYPE_TDM;
+ else
+ info->type = SND_ST_UNIPERIF_TYPE_PCM;
+
/* Save the info structure */
reader->info = info;
@@ -329,11 +430,13 @@ static int uni_reader_parse_dt(struct platform_device *pdev,
}
static const struct snd_soc_dai_ops uni_reader_dai_ops = {
+ .startup = uni_reader_startup,
.shutdown = uni_reader_shutdown,
.prepare = uni_reader_prepare,
.trigger = uni_reader_trigger,
.hw_params = sti_uniperiph_dai_hw_params,
.set_fmt = sti_uniperiph_dai_set_fmt,
+ .set_tdm_slot = sti_uniperiph_set_tdm_slot
};
int uni_reader_init(struct platform_device *pdev,
@@ -343,7 +446,6 @@ int uni_reader_init(struct platform_device *pdev,
reader->dev = &pdev->dev;
reader->state = UNIPERIF_STATE_STOPPED;
- reader->hw = &uni_reader_pcm_hw;
reader->dai_ops = &uni_reader_dai_ops;
ret = uni_reader_parse_dt(pdev, reader);
@@ -352,6 +454,11 @@ int uni_reader_init(struct platform_device *pdev,
return ret;
}
+ if (UNIPERIF_TYPE_IS_TDM(reader))
+ reader->hw = &uni_tdm_hw;
+ else
+ reader->hw = &uni_reader_pcm_hw;
+
ret = devm_request_irq(&pdev->dev, reader->irq,
uni_reader_irq_handler, IRQF_SHARED,
dev_name(&pdev->dev), reader);
diff --git a/tools/Makefile b/tools/Makefile
index 6bf68fe7dd29..f10b64d8c674 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -16,6 +16,7 @@ help:
@echo ' gpio - GPIO tools'
@echo ' hv - tools used when in Hyper-V clients'
@echo ' iio - IIO tools'
+ @echo ' kvm_stat - top-like utility for displaying kvm statistics'
@echo ' lguest - a minimal 32-bit x86 hypervisor'
@echo ' net - misc networking tools'
@echo ' perf - Linux performance measurement and analysis tool'
@@ -110,10 +111,13 @@ tmon_install:
freefall_install:
$(call descend,laptop/$(@:_install=),install)
+kvm_stat_install:
+ $(call descend,kvm/$(@:_install=),install)
+
install: acpi_install cgroup_install cpupower_install hv_install firewire_install lguest_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install vm_install net_install x86_energy_perf_policy_install \
- tmon_install freefall_install objtool_install
+ tmon_install freefall_install objtool_install kvm_stat_install
acpi_clean:
$(call descend,power/acpi,clean)
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index ee566e8bd1cf..27f3583193e6 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -58,8 +58,8 @@ quiet_cmd_mkdir = MKDIR $(dir $@)
quiet_cmd_cc_o_c = CC $@
cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
-quiet_cmd_cc_i_c = CPP $@
- cmd_cc_i_c = $(CC) $(c_flags) -E -o $@ $<
+quiet_cmd_cpp_i_c = CPP $@
+ cmd_cpp_i_c = $(CC) $(c_flags) -E -o $@ $<
quiet_cmd_cc_s_c = AS $@
cmd_cc_s_c = $(CC) $(c_flags) -S -o $@ $<
@@ -83,11 +83,11 @@ $(OUTPUT)%.o: %.S FORCE
$(OUTPUT)%.i: %.c FORCE
$(call rule_mkdir)
- $(call if_changed_dep,cc_i_c)
+ $(call if_changed_dep,cpp_i_c)
$(OUTPUT)%.s: %.S FORCE
$(call rule_mkdir)
- $(call if_changed_dep,cc_i_c)
+ $(call if_changed_dep,cpp_i_c)
$(OUTPUT)%.s: %.c FORCE
$(call rule_mkdir)
diff --git a/tools/kvm/kvm_stat/Makefile b/tools/kvm/kvm_stat/Makefile
new file mode 100644
index 000000000000..5b1cba57e3b3
--- /dev/null
+++ b/tools/kvm/kvm_stat/Makefile
@@ -0,0 +1,41 @@
+include ../../scripts/Makefile.include
+include ../../scripts/utilities.mak
+BINDIR=usr/bin
+MANDIR=usr/share/man
+MAN1DIR=$(MANDIR)/man1
+
+MAN1=kvm_stat.1
+
+A2X=a2x
+a2x_path := $(call get-executable,$(A2X))
+
+all: man
+
+ifneq ($(findstring $(MAKEFLAGS),s),s)
+ ifneq ($(V),1)
+ QUIET_A2X = @echo ' A2X '$@;
+ endif
+endif
+
+%.1: %.txt
+ifeq ($(a2x_path),)
+ $(error "You need to install asciidoc for man pages")
+else
+ $(QUIET_A2X)$(A2X) --doctype manpage --format manpage $<
+endif
+
+clean:
+ rm -f $(MAN1)
+
+man: $(MAN1)
+
+install-man: man
+ install -d -m 755 $(INSTALL_ROOT)/$(MAN1DIR)
+ install -m 644 kvm_stat.1 $(INSTALL_ROOT)/$(MAN1DIR)
+
+install-tools:
+ install -d -m 755 $(INSTALL_ROOT)/$(BINDIR)
+ install -m 755 -p "kvm_stat" "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
+
+install: install-tools install-man
+.PHONY: all clean man install-tools install-man install
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
new file mode 100755
index 000000000000..581278c58488
--- /dev/null
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -0,0 +1,1127 @@
+#!/usr/bin/python
+#
+# top-like utility for displaying kvm statistics
+#
+# Copyright 2006-2008 Qumranet Technologies
+# Copyright 2008-2011 Red Hat, Inc.
+#
+# Authors:
+# Avi Kivity <avi@redhat.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2. See
+# the COPYING file in the top-level directory.
+"""The kvm_stat module outputs statistics about running KVM VMs
+
+Three different ways of output formatting are available:
+- as a top-like text ui
+- in a key -> value format
+- in an all keys, all values format
+
+The data is sampled from the KVM's debugfs entries and its perf events.
+"""
+
+import curses
+import sys
+import os
+import time
+import optparse
+import ctypes
+import fcntl
+import resource
+import struct
+import re
+from collections import defaultdict
+from time import sleep
+
+VMX_EXIT_REASONS = {
+ 'EXCEPTION_NMI': 0,
+ 'EXTERNAL_INTERRUPT': 1,
+ 'TRIPLE_FAULT': 2,
+ 'PENDING_INTERRUPT': 7,
+ 'NMI_WINDOW': 8,
+ 'TASK_SWITCH': 9,
+ 'CPUID': 10,
+ 'HLT': 12,
+ 'INVLPG': 14,
+ 'RDPMC': 15,
+ 'RDTSC': 16,
+ 'VMCALL': 18,
+ 'VMCLEAR': 19,
+ 'VMLAUNCH': 20,
+ 'VMPTRLD': 21,
+ 'VMPTRST': 22,
+ 'VMREAD': 23,
+ 'VMRESUME': 24,
+ 'VMWRITE': 25,
+ 'VMOFF': 26,
+ 'VMON': 27,
+ 'CR_ACCESS': 28,
+ 'DR_ACCESS': 29,
+ 'IO_INSTRUCTION': 30,
+ 'MSR_READ': 31,
+ 'MSR_WRITE': 32,
+ 'INVALID_STATE': 33,
+ 'MWAIT_INSTRUCTION': 36,
+ 'MONITOR_INSTRUCTION': 39,
+ 'PAUSE_INSTRUCTION': 40,
+ 'MCE_DURING_VMENTRY': 41,
+ 'TPR_BELOW_THRESHOLD': 43,
+ 'APIC_ACCESS': 44,
+ 'EPT_VIOLATION': 48,
+ 'EPT_MISCONFIG': 49,
+ 'WBINVD': 54,
+ 'XSETBV': 55,
+ 'APIC_WRITE': 56,
+ 'INVPCID': 58,
+}
+
+SVM_EXIT_REASONS = {
+ 'READ_CR0': 0x000,
+ 'READ_CR3': 0x003,
+ 'READ_CR4': 0x004,
+ 'READ_CR8': 0x008,
+ 'WRITE_CR0': 0x010,
+ 'WRITE_CR3': 0x013,
+ 'WRITE_CR4': 0x014,
+ 'WRITE_CR8': 0x018,
+ 'READ_DR0': 0x020,
+ 'READ_DR1': 0x021,
+ 'READ_DR2': 0x022,
+ 'READ_DR3': 0x023,
+ 'READ_DR4': 0x024,
+ 'READ_DR5': 0x025,
+ 'READ_DR6': 0x026,
+ 'READ_DR7': 0x027,
+ 'WRITE_DR0': 0x030,
+ 'WRITE_DR1': 0x031,
+ 'WRITE_DR2': 0x032,
+ 'WRITE_DR3': 0x033,
+ 'WRITE_DR4': 0x034,
+ 'WRITE_DR5': 0x035,
+ 'WRITE_DR6': 0x036,
+ 'WRITE_DR7': 0x037,
+ 'EXCP_BASE': 0x040,
+ 'INTR': 0x060,
+ 'NMI': 0x061,
+ 'SMI': 0x062,
+ 'INIT': 0x063,
+ 'VINTR': 0x064,
+ 'CR0_SEL_WRITE': 0x065,
+ 'IDTR_READ': 0x066,
+ 'GDTR_READ': 0x067,
+ 'LDTR_READ': 0x068,
+ 'TR_READ': 0x069,
+ 'IDTR_WRITE': 0x06a,
+ 'GDTR_WRITE': 0x06b,
+ 'LDTR_WRITE': 0x06c,
+ 'TR_WRITE': 0x06d,
+ 'RDTSC': 0x06e,
+ 'RDPMC': 0x06f,
+ 'PUSHF': 0x070,
+ 'POPF': 0x071,
+ 'CPUID': 0x072,
+ 'RSM': 0x073,
+ 'IRET': 0x074,
+ 'SWINT': 0x075,
+ 'INVD': 0x076,
+ 'PAUSE': 0x077,
+ 'HLT': 0x078,
+ 'INVLPG': 0x079,
+ 'INVLPGA': 0x07a,
+ 'IOIO': 0x07b,
+ 'MSR': 0x07c,
+ 'TASK_SWITCH': 0x07d,
+ 'FERR_FREEZE': 0x07e,
+ 'SHUTDOWN': 0x07f,
+ 'VMRUN': 0x080,
+ 'VMMCALL': 0x081,
+ 'VMLOAD': 0x082,
+ 'VMSAVE': 0x083,
+ 'STGI': 0x084,
+ 'CLGI': 0x085,
+ 'SKINIT': 0x086,
+ 'RDTSCP': 0x087,
+ 'ICEBP': 0x088,
+ 'WBINVD': 0x089,
+ 'MONITOR': 0x08a,
+ 'MWAIT': 0x08b,
+ 'MWAIT_COND': 0x08c,
+ 'XSETBV': 0x08d,
+ 'NPF': 0x400,
+}
+
+# EC definition of HSR (from arch/arm64/include/asm/kvm_arm.h)
+AARCH64_EXIT_REASONS = {
+ 'UNKNOWN': 0x00,
+ 'WFI': 0x01,
+ 'CP15_32': 0x03,
+ 'CP15_64': 0x04,
+ 'CP14_MR': 0x05,
+ 'CP14_LS': 0x06,
+ 'FP_ASIMD': 0x07,
+ 'CP10_ID': 0x08,
+ 'CP14_64': 0x0C,
+ 'ILL_ISS': 0x0E,
+ 'SVC32': 0x11,
+ 'HVC32': 0x12,
+ 'SMC32': 0x13,
+ 'SVC64': 0x15,
+ 'HVC64': 0x16,
+ 'SMC64': 0x17,
+ 'SYS64': 0x18,
+ 'IABT': 0x20,
+ 'IABT_HYP': 0x21,
+ 'PC_ALIGN': 0x22,
+ 'DABT': 0x24,
+ 'DABT_HYP': 0x25,
+ 'SP_ALIGN': 0x26,
+ 'FP_EXC32': 0x28,
+ 'FP_EXC64': 0x2C,
+ 'SERROR': 0x2F,
+ 'BREAKPT': 0x30,
+ 'BREAKPT_HYP': 0x31,
+ 'SOFTSTP': 0x32,
+ 'SOFTSTP_HYP': 0x33,
+ 'WATCHPT': 0x34,
+ 'WATCHPT_HYP': 0x35,
+ 'BKPT32': 0x38,
+ 'VECTOR32': 0x3A,
+ 'BRK64': 0x3C,
+}
+
+# From include/uapi/linux/kvm.h, KVM_EXIT_xxx
+USERSPACE_EXIT_REASONS = {
+ 'UNKNOWN': 0,
+ 'EXCEPTION': 1,
+ 'IO': 2,
+ 'HYPERCALL': 3,
+ 'DEBUG': 4,
+ 'HLT': 5,
+ 'MMIO': 6,
+ 'IRQ_WINDOW_OPEN': 7,
+ 'SHUTDOWN': 8,
+ 'FAIL_ENTRY': 9,
+ 'INTR': 10,
+ 'SET_TPR': 11,
+ 'TPR_ACCESS': 12,
+ 'S390_SIEIC': 13,
+ 'S390_RESET': 14,
+ 'DCR': 15,
+ 'NMI': 16,
+ 'INTERNAL_ERROR': 17,
+ 'OSI': 18,
+ 'PAPR_HCALL': 19,
+ 'S390_UCONTROL': 20,
+ 'WATCHDOG': 21,
+ 'S390_TSCH': 22,
+ 'EPR': 23,
+ 'SYSTEM_EVENT': 24,
+}
+
+IOCTL_NUMBERS = {
+ 'SET_FILTER': 0x40082406,
+ 'ENABLE': 0x00002400,
+ 'DISABLE': 0x00002401,
+ 'RESET': 0x00002403,
+}
+
+class Arch(object):
+ """Encapsulates global architecture specific data.
+
+ Contains the performance event open syscall and ioctl numbers, as
+ well as the VM exit reasons for the architecture it runs on.
+
+ """
+ @staticmethod
+ def get_arch():
+ machine = os.uname()[4]
+
+ if machine.startswith('ppc'):
+ return ArchPPC()
+ elif machine.startswith('aarch64'):
+ return ArchA64()
+ elif machine.startswith('s390'):
+ return ArchS390()
+ else:
+ # X86_64
+ for line in open('/proc/cpuinfo'):
+ if not line.startswith('flags'):
+ continue
+
+ flags = line.split()
+ if 'vmx' in flags:
+ return ArchX86(VMX_EXIT_REASONS)
+ if 'svm' in flags:
+ return ArchX86(SVM_EXIT_REASONS)
+ return
+
+class ArchX86(Arch):
+ def __init__(self, exit_reasons):
+ self.sc_perf_evt_open = 298
+ self.ioctl_numbers = IOCTL_NUMBERS
+ self.exit_reasons = exit_reasons
+
+class ArchPPC(Arch):
+ def __init__(self):
+ self.sc_perf_evt_open = 319
+ self.ioctl_numbers = IOCTL_NUMBERS
+ self.ioctl_numbers['ENABLE'] = 0x20002400
+ self.ioctl_numbers['DISABLE'] = 0x20002401
+ self.ioctl_numbers['RESET'] = 0x20002403
+
+ # PPC comes in 32 and 64 bit and some generated ioctl
+ # numbers depend on the wordsize.
+ char_ptr_size = ctypes.sizeof(ctypes.c_char_p)
+ self.ioctl_numbers['SET_FILTER'] = 0x80002406 | char_ptr_size << 16
+ self.exit_reasons = {}
+
+class ArchA64(Arch):
+ def __init__(self):
+ self.sc_perf_evt_open = 241
+ self.ioctl_numbers = IOCTL_NUMBERS
+ self.exit_reasons = AARCH64_EXIT_REASONS
+
+class ArchS390(Arch):
+ def __init__(self):
+ self.sc_perf_evt_open = 331
+ self.ioctl_numbers = IOCTL_NUMBERS
+ self.exit_reasons = None
+
+ARCH = Arch.get_arch()
+
+
+def walkdir(path):
+ """Returns os.walk() data for specified directory.
+
+ As it is only a wrapper it returns the same 3-tuple of (dirpath,
+ dirnames, filenames).
+ """
+ return next(os.walk(path))
+
+
+def parse_int_list(list_string):
+ """Returns an int list from a string of comma separated integers and
+ integer ranges."""
+ integers = []
+ members = list_string.split(',')
+
+ for member in members:
+ if '-' not in member:
+ integers.append(int(member))
+ else:
+ int_range = member.split('-')
+ integers.extend(range(int(int_range[0]),
+ int(int_range[1]) + 1))
+
+ return integers
+
+
+def get_online_cpus():
+ """Returns a list of cpu id integers."""
+ with open('/sys/devices/system/cpu/online') as cpu_list:
+ cpu_string = cpu_list.readline()
+ return parse_int_list(cpu_string)
+
+
+def get_filters():
+ """Returns a dict of trace events, their filter ids and
+ the values that can be filtered.
+
+ Trace events can be filtered for special values by setting a
+ filter string via an ioctl. The string normally has the format
+ identifier==value. For each filter a new event will be created, to
+ be able to distinguish the events.
+
+ """
+ filters = {}
+ filters['kvm_userspace_exit'] = ('reason', USERSPACE_EXIT_REASONS)
+ if ARCH.exit_reasons:
+ filters['kvm_exit'] = ('exit_reason', ARCH.exit_reasons)
+ return filters
+
+libc = ctypes.CDLL('libc.so.6', use_errno=True)
+syscall = libc.syscall
+
+class perf_event_attr(ctypes.Structure):
+ """Struct that holds the necessary data to set up a trace event.
+
+ For an extensive explanation see perf_event_open(2) and
+ include/uapi/linux/perf_event.h, struct perf_event_attr
+
+ All fields that are not initialized in the constructor are 0.
+
+ """
+ _fields_ = [('type', ctypes.c_uint32),
+ ('size', ctypes.c_uint32),
+ ('config', ctypes.c_uint64),
+ ('sample_freq', ctypes.c_uint64),
+ ('sample_type', ctypes.c_uint64),
+ ('read_format', ctypes.c_uint64),
+ ('flags', ctypes.c_uint64),
+ ('wakeup_events', ctypes.c_uint32),
+ ('bp_type', ctypes.c_uint32),
+ ('bp_addr', ctypes.c_uint64),
+ ('bp_len', ctypes.c_uint64),
+ ]
+
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ self.type = PERF_TYPE_TRACEPOINT
+ self.size = ctypes.sizeof(self)
+ self.read_format = PERF_FORMAT_GROUP
+
+def perf_event_open(attr, pid, cpu, group_fd, flags):
+ """Wrapper for the sys_perf_evt_open() syscall.
+
+ Used to set up performance events, returns a file descriptor or -1
+ on error.
+
+ Attributes are:
+ - syscall number
+ - struct perf_event_attr *
+ - pid or -1 to monitor all pids
+ - cpu number or -1 to monitor all cpus
+ - The file descriptor of the group leader or -1 to create a group.
+ - flags
+
+ """
+ return syscall(ARCH.sc_perf_evt_open, ctypes.pointer(attr),
+ ctypes.c_int(pid), ctypes.c_int(cpu),
+ ctypes.c_int(group_fd), ctypes.c_long(flags))
+
+PERF_TYPE_TRACEPOINT = 2
+PERF_FORMAT_GROUP = 1 << 3
+
+PATH_DEBUGFS_TRACING = '/sys/kernel/debug/tracing'
+PATH_DEBUGFS_KVM = '/sys/kernel/debug/kvm'
+
+class Group(object):
+ """Represents a perf event group."""
+
+ def __init__(self):
+ self.events = []
+
+ def add_event(self, event):
+ self.events.append(event)
+
+ def read(self):
+ """Returns a dict with 'event name: value' for all events in the
+ group.
+
+ Values are read by reading from the file descriptor of the
+ event that is the group leader. See perf_event_open(2) for
+ details.
+
+ Read format for the used event configuration is:
+ struct read_format {
+ u64 nr; /* The number of events */
+ struct {
+ u64 value; /* The value of the event */
+ } values[nr];
+ };
+
+ """
+ length = 8 * (1 + len(self.events))
+ read_format = 'xxxxxxxx' + 'Q' * len(self.events)
+ return dict(zip([event.name for event in self.events],
+ struct.unpack(read_format,
+ os.read(self.events[0].fd, length))))
+
+class Event(object):
+ """Represents a performance event and manages its life cycle."""
+ def __init__(self, name, group, trace_cpu, trace_pid, trace_point,
+ trace_filter, trace_set='kvm'):
+ self.name = name
+ self.fd = None
+ self.setup_event(group, trace_cpu, trace_pid, trace_point,
+ trace_filter, trace_set)
+
+ def __del__(self):
+ """Closes the event's file descriptor.
+
+ As no python file object was created for the file descriptor,
+ python will not reference count the descriptor and will not
+ close it itself automatically, so we do it.
+
+ """
+ if self.fd:
+ os.close(self.fd)
+
+ def setup_event_attribute(self, trace_set, trace_point):
+ """Returns an initialized ctype perf_event_attr struct."""
+
+ id_path = os.path.join(PATH_DEBUGFS_TRACING, 'events', trace_set,
+ trace_point, 'id')
+
+ event_attr = perf_event_attr()
+ event_attr.config = int(open(id_path).read())
+ return event_attr
+
+ def setup_event(self, group, trace_cpu, trace_pid, trace_point,
+ trace_filter, trace_set):
+ """Sets up the perf event in Linux.
+
+ Issues the syscall to register the event in the kernel and
+ then sets the optional filter.
+
+ """
+
+ event_attr = self.setup_event_attribute(trace_set, trace_point)
+
+ # First event will be group leader.
+ group_leader = -1
+
+ # All others have to pass the leader's descriptor instead.
+ if group.events:
+ group_leader = group.events[0].fd
+
+ fd = perf_event_open(event_attr, trace_pid,
+ trace_cpu, group_leader, 0)
+ if fd == -1:
+ err = ctypes.get_errno()
+ raise OSError(err, os.strerror(err),
+ 'while calling sys_perf_event_open().')
+
+ if trace_filter:
+ fcntl.ioctl(fd, ARCH.ioctl_numbers['SET_FILTER'],
+ trace_filter)
+
+ self.fd = fd
+
+ def enable(self):
+ """Enables the trace event in the kernel.
+
+ Enabling the group leader makes reading counters from it and the
+ events under it possible.
+
+ """
+ fcntl.ioctl(self.fd, ARCH.ioctl_numbers['ENABLE'], 0)
+
+ def disable(self):
+ """Disables the trace event in the kernel.
+
+ Disabling the group leader makes reading all counters under it
+ impossible.
+
+ """
+ fcntl.ioctl(self.fd, ARCH.ioctl_numbers['DISABLE'], 0)
+
+ def reset(self):
+ """Resets the count of the trace event in the kernel."""
+ fcntl.ioctl(self.fd, ARCH.ioctl_numbers['RESET'], 0)
+
+class TracepointProvider(object):
+ """Data provider for the stats class.
+
+ Manages the events/groups from which it acquires its data.
+
+ """
+ def __init__(self):
+ self.group_leaders = []
+ self.filters = get_filters()
+ self._fields = self.get_available_fields()
+ self._pid = 0
+
+ def get_available_fields(self):
+ """Returns a list of available event's of format 'event name(filter
+ name)'.
+
+ All available events have directories under
+ /sys/kernel/debug/tracing/events/ which export information
+ about the specific event. Therefore, listing the dirs gives us
+ a list of all available events.
+
+ Some events like the vm exit reasons can be filtered for
+ specific values. To take account for that, the routine below
+ creates special fields with the following format:
+ event name(filter name)
+
+ """
+ path = os.path.join(PATH_DEBUGFS_TRACING, 'events', 'kvm')
+ fields = walkdir(path)[1]
+ extra = []
+ for field in fields:
+ if field in self.filters:
+ filter_name_, filter_dicts = self.filters[field]
+ for name in filter_dicts:
+ extra.append(field + '(' + name + ')')
+ fields += extra
+ return fields
+
+ def setup_traces(self):
+ """Creates all event and group objects needed to be able to retrieve
+ data."""
+ if self._pid > 0:
+ # Fetch list of all threads of the monitored pid, as qemu
+ # starts a thread for each vcpu.
+ path = os.path.join('/proc', str(self._pid), 'task')
+ groupids = walkdir(path)[1]
+ else:
+ groupids = get_online_cpus()
+
+ # The constant is needed as a buffer for python libs, std
+ # streams and other files that the script opens.
+ newlim = len(groupids) * len(self._fields) + 50
+ try:
+ softlim_, hardlim = resource.getrlimit(resource.RLIMIT_NOFILE)
+
+ if hardlim < newlim:
+ # Now we need CAP_SYS_RESOURCE, to increase the hard limit.
+ resource.setrlimit(resource.RLIMIT_NOFILE, (newlim, newlim))
+ else:
+ # Raising the soft limit is sufficient.
+ resource.setrlimit(resource.RLIMIT_NOFILE, (newlim, hardlim))
+
+ except ValueError:
+ sys.exit("NOFILE rlimit could not be raised to {0}".format(newlim))
+
+ for groupid in groupids:
+ group = Group()
+ for name in self._fields:
+ tracepoint = name
+ tracefilter = None
+ match = re.match(r'(.*)\((.*)\)', name)
+ if match:
+ tracepoint, sub = match.groups()
+ tracefilter = ('%s==%d\0' %
+ (self.filters[tracepoint][0],
+ self.filters[tracepoint][1][sub]))
+
+ # From perf_event_open(2):
+ # pid > 0 and cpu == -1
+ # This measures the specified process/thread on any CPU.
+ #
+ # pid == -1 and cpu >= 0
+ # This measures all processes/threads on the specified CPU.
+ trace_cpu = groupid if self._pid == 0 else -1
+ trace_pid = int(groupid) if self._pid != 0 else -1
+
+ group.add_event(Event(name=name,
+ group=group,
+ trace_cpu=trace_cpu,
+ trace_pid=trace_pid,
+ trace_point=tracepoint,
+ trace_filter=tracefilter))
+
+ self.group_leaders.append(group)
+
+ def available_fields(self):
+ return self.get_available_fields()
+
+ @property
+ def fields(self):
+ return self._fields
+
+ @fields.setter
+ def fields(self, fields):
+ """Enables/disables the (un)wanted events"""
+ self._fields = fields
+ for group in self.group_leaders:
+ for index, event in enumerate(group.events):
+ if event.name in fields:
+ event.reset()
+ event.enable()
+ else:
+ # Do not disable the group leader.
+ # It would disable all of its events.
+ if index != 0:
+ event.disable()
+
+ @property
+ def pid(self):
+ return self._pid
+
+ @pid.setter
+ def pid(self, pid):
+ """Changes the monitored pid by setting new traces."""
+ self._pid = pid
+ # The garbage collector will get rid of all Event/Group
+ # objects and open files after removing the references.
+ self.group_leaders = []
+ self.setup_traces()
+ self.fields = self._fields
+
+ def read(self):
+ """Returns 'event name: current value' for all enabled events."""
+ ret = defaultdict(int)
+ for group in self.group_leaders:
+ for name, val in group.read().iteritems():
+ if name in self._fields:
+ ret[name] += val
+ return ret
+
+class DebugfsProvider(object):
+ """Provides data from the files that KVM creates in the kvm debugfs
+ folder."""
+ def __init__(self):
+ self._fields = self.get_available_fields()
+ self._pid = 0
+ self.do_read = True
+
+ def get_available_fields(self):
+ """"Returns a list of available fields.
+
+ The fields are all available KVM debugfs files
+
+ """
+ return walkdir(PATH_DEBUGFS_KVM)[2]
+
+ @property
+ def fields(self):
+ return self._fields
+
+ @fields.setter
+ def fields(self, fields):
+ self._fields = fields
+
+ @property
+ def pid(self):
+ return self._pid
+
+ @pid.setter
+ def pid(self, pid):
+ if pid != 0:
+ self._pid = pid
+
+ vms = walkdir(PATH_DEBUGFS_KVM)[1]
+ if len(vms) == 0:
+ self.do_read = False
+
+ self.paths = filter(lambda x: "{}-".format(pid) in x, vms)
+
+ else:
+ self.paths = ['']
+ self.do_read = True
+
+ def read(self):
+ """Returns a dict with format:'file name / field -> current value'."""
+ results = {}
+
+ # If no debugfs filtering support is available, then don't read.
+ if not self.do_read:
+ return results
+
+ for path in self.paths:
+ for field in self._fields:
+ results[field] = results.get(field, 0) \
+ + self.read_field(field, path)
+
+ return results
+
+ def read_field(self, field, path):
+ """Returns the value of a single field from a specific VM."""
+ try:
+ return int(open(os.path.join(PATH_DEBUGFS_KVM,
+ path,
+ field))
+ .read())
+ except IOError:
+ return 0
+
+class Stats(object):
+ """Manages the data providers and the data they provide.
+
+ It is used to set filters on the provider's data and collect all
+ provider data.
+
+ """
+ def __init__(self, providers, pid, fields=None):
+ self.providers = providers
+ self._pid_filter = pid
+ self._fields_filter = fields
+ self.values = {}
+ self.update_provider_pid()
+ self.update_provider_filters()
+
+ def update_provider_filters(self):
+ """Propagates fields filters to providers."""
+ def wanted(key):
+ if not self._fields_filter:
+ return True
+ return re.match(self._fields_filter, key) is not None
+
+ # As we reset the counters when updating the fields we can
+ # also clear the cache of old values.
+ self.values = {}
+ for provider in self.providers:
+ provider_fields = [key for key in provider.get_available_fields()
+ if wanted(key)]
+ provider.fields = provider_fields
+
+ def update_provider_pid(self):
+ """Propagates pid filters to providers."""
+ for provider in self.providers:
+ provider.pid = self._pid_filter
+
+ @property
+ def fields_filter(self):
+ return self._fields_filter
+
+ @fields_filter.setter
+ def fields_filter(self, fields_filter):
+ self._fields_filter = fields_filter
+ self.update_provider_filters()
+
+ @property
+ def pid_filter(self):
+ return self._pid_filter
+
+ @pid_filter.setter
+ def pid_filter(self, pid):
+ self._pid_filter = pid
+ self.values = {}
+ self.update_provider_pid()
+
+ def get(self):
+ """Returns a dict with field -> (value, delta to last value) of all
+ provider data."""
+ for provider in self.providers:
+ new = provider.read()
+ for key in provider.fields:
+ oldval = self.values.get(key, (0, 0))
+ newval = new.get(key, 0)
+ newdelta = None
+ if oldval is not None:
+ newdelta = newval - oldval[0]
+ self.values[key] = (newval, newdelta)
+ return self.values
+
+LABEL_WIDTH = 40
+NUMBER_WIDTH = 10
+
+class Tui(object):
+ """Instruments curses to draw a nice text ui."""
+ def __init__(self, stats):
+ self.stats = stats
+ self.screen = None
+ self.drilldown = False
+ self.update_drilldown()
+
+ def __enter__(self):
+ """Initialises curses for later use. Based on curses.wrapper
+ implementation from the Python standard library."""
+ self.screen = curses.initscr()
+ curses.noecho()
+ curses.cbreak()
+
+ # The try/catch works around a minor bit of
+ # over-conscientiousness in the curses module, the error
+ # return from C start_color() is ignorable.
+ try:
+ curses.start_color()
+ except:
+ pass
+
+ curses.use_default_colors()
+ return self
+
+ def __exit__(self, *exception):
+ """Resets the terminal to its normal state. Based on curses.wrappre
+ implementation from the Python standard library."""
+ if self.screen:
+ self.screen.keypad(0)
+ curses.echo()
+ curses.nocbreak()
+ curses.endwin()
+
+ def update_drilldown(self):
+ """Sets or removes a filter that only allows fields without braces."""
+ if not self.stats.fields_filter:
+ self.stats.fields_filter = r'^[^\(]*$'
+
+ elif self.stats.fields_filter == r'^[^\(]*$':
+ self.stats.fields_filter = None
+
+ def update_pid(self, pid):
+ """Propagates pid selection to stats object."""
+ self.stats.pid_filter = pid
+
+ def refresh(self, sleeptime):
+ """Refreshes on-screen data."""
+ self.screen.erase()
+ if self.stats.pid_filter > 0:
+ self.screen.addstr(0, 0, 'kvm statistics - pid {0}'
+ .format(self.stats.pid_filter),
+ curses.A_BOLD)
+ else:
+ self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
+ self.screen.addstr(2, 1, 'Event')
+ self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH -
+ len('Total'), 'Total')
+ self.screen.addstr(2, 1 + LABEL_WIDTH + NUMBER_WIDTH + 8 -
+ len('Current'), 'Current')
+ row = 3
+ stats = self.stats.get()
+ def sortkey(x):
+ if stats[x][1]:
+ return (-stats[x][1], -stats[x][0])
+ else:
+ return (0, -stats[x][0])
+ for key in sorted(stats.keys(), key=sortkey):
+
+ if row >= self.screen.getmaxyx()[0]:
+ break
+ values = stats[key]
+ if not values[0] and not values[1]:
+ break
+ col = 1
+ self.screen.addstr(row, col, key)
+ col += LABEL_WIDTH
+ self.screen.addstr(row, col, '%10d' % (values[0],))
+ col += NUMBER_WIDTH
+ if values[1] is not None:
+ self.screen.addstr(row, col, '%8d' % (values[1] / sleeptime,))
+ row += 1
+ self.screen.refresh()
+
+ def show_filter_selection(self):
+ """Draws filter selection mask.
+
+ Asks for a valid regex and sets the fields filter accordingly.
+
+ """
+ while True:
+ self.screen.erase()
+ self.screen.addstr(0, 0,
+ "Show statistics for events matching a regex.",
+ curses.A_BOLD)
+ self.screen.addstr(2, 0,
+ "Current regex: {0}"
+ .format(self.stats.fields_filter))
+ self.screen.addstr(3, 0, "New regex: ")
+ curses.echo()
+ regex = self.screen.getstr()
+ curses.noecho()
+ if len(regex) == 0:
+ return
+ try:
+ re.compile(regex)
+ self.stats.fields_filter = regex
+ return
+ except re.error:
+ continue
+
+ def show_vm_selection(self):
+ """Draws PID selection mask.
+
+ Asks for a pid until a valid pid or 0 has been entered.
+
+ """
+ while True:
+ self.screen.erase()
+ self.screen.addstr(0, 0,
+ 'Show statistics for specific pid.',
+ curses.A_BOLD)
+ self.screen.addstr(1, 0,
+ 'This might limit the shown data to the trace '
+ 'statistics.')
+
+ curses.echo()
+ self.screen.addstr(3, 0, "Pid [0 or pid]: ")
+ pid = self.screen.getstr()
+ curses.noecho()
+
+ try:
+ pid = int(pid)
+
+ if pid == 0:
+ self.update_pid(pid)
+ break
+ else:
+ if not os.path.isdir(os.path.join('/proc/', str(pid))):
+ continue
+ else:
+ self.update_pid(pid)
+ break
+
+ except ValueError:
+ continue
+
+ def show_stats(self):
+ """Refreshes the screen and processes user input."""
+ sleeptime = 0.25
+ while True:
+ self.refresh(sleeptime)
+ curses.halfdelay(int(sleeptime * 10))
+ sleeptime = 3
+ try:
+ char = self.screen.getkey()
+ if char == 'x':
+ self.drilldown = not self.drilldown
+ self.update_drilldown()
+ if char == 'q':
+ break
+ if char == 'f':
+ self.show_filter_selection()
+ if char == 'p':
+ self.show_vm_selection()
+ except KeyboardInterrupt:
+ break
+ except curses.error:
+ continue
+
+def batch(stats):
+ """Prints statistics in a key, value format."""
+ s = stats.get()
+ time.sleep(1)
+ s = stats.get()
+ for key in sorted(s.keys()):
+ values = s[key]
+ print '%-42s%10d%10d' % (key, values[0], values[1])
+
+def log(stats):
+ """Prints statistics as reiterating key block, multiple value blocks."""
+ keys = sorted(stats.get().iterkeys())
+ def banner():
+ for k in keys:
+ print '%s' % k,
+ print
+ def statline():
+ s = stats.get()
+ for k in keys:
+ print ' %9d' % s[k][1],
+ print
+ line = 0
+ banner_repeat = 20
+ while True:
+ time.sleep(1)
+ if line % banner_repeat == 0:
+ banner()
+ statline()
+ line += 1
+
+def get_options():
+ """Returns processed program arguments."""
+ description_text = """
+This script displays various statistics about VMs running under KVM.
+The statistics are gathered from the KVM debugfs entries and / or the
+currently available perf traces.
+
+The monitoring takes additional cpu cycles and might affect the VM's
+performance.
+
+Requirements:
+- Access to:
+ /sys/kernel/debug/kvm
+ /sys/kernel/debug/trace/events/*
+ /proc/pid/task
+- /proc/sys/kernel/perf_event_paranoid < 1 if user has no
+ CAP_SYS_ADMIN and perf events are used.
+- CAP_SYS_RESOURCE if the hard limit is not high enough to allow
+ the large number of files that are possibly opened.
+"""
+
+ class PlainHelpFormatter(optparse.IndentedHelpFormatter):
+ def format_description(self, description):
+ if description:
+ return description + "\n"
+ else:
+ return ""
+
+ optparser = optparse.OptionParser(description=description_text,
+ formatter=PlainHelpFormatter())
+ optparser.add_option('-1', '--once', '--batch',
+ action='store_true',
+ default=False,
+ dest='once',
+ help='run in batch mode for one second',
+ )
+ optparser.add_option('-l', '--log',
+ action='store_true',
+ default=False,
+ dest='log',
+ help='run in logging mode (like vmstat)',
+ )
+ optparser.add_option('-t', '--tracepoints',
+ action='store_true',
+ default=False,
+ dest='tracepoints',
+ help='retrieve statistics from tracepoints',
+ )
+ optparser.add_option('-d', '--debugfs',
+ action='store_true',
+ default=False,
+ dest='debugfs',
+ help='retrieve statistics from debugfs',
+ )
+ optparser.add_option('-f', '--fields',
+ action='store',
+ default=None,
+ dest='fields',
+ help='fields to display (regex)',
+ )
+ optparser.add_option('-p', '--pid',
+ action='store',
+ default=0,
+ type=int,
+ dest='pid',
+ help='restrict statistics to pid',
+ )
+ (options, _) = optparser.parse_args(sys.argv)
+ return options
+
+def get_providers(options):
+ """Returns a list of data providers depending on the passed options."""
+ providers = []
+
+ if options.tracepoints:
+ providers.append(TracepointProvider())
+ if options.debugfs:
+ providers.append(DebugfsProvider())
+ if len(providers) == 0:
+ providers.append(TracepointProvider())
+
+ return providers
+
+def check_access(options):
+ """Exits if the current user can't access all needed directories."""
+ if not os.path.exists('/sys/kernel/debug'):
+ sys.stderr.write('Please enable CONFIG_DEBUG_FS in your kernel.')
+ sys.exit(1)
+
+ if not os.path.exists(PATH_DEBUGFS_KVM):
+ sys.stderr.write("Please make sure, that debugfs is mounted and "
+ "readable by the current user:\n"
+ "('mount -t debugfs debugfs /sys/kernel/debug')\n"
+ "Also ensure, that the kvm modules are loaded.\n")
+ sys.exit(1)
+
+ if not os.path.exists(PATH_DEBUGFS_TRACING) and (options.tracepoints
+ or not options.debugfs):
+ sys.stderr.write("Please enable CONFIG_TRACING in your kernel "
+ "when using the option -t (default).\n"
+ "If it is enabled, make {0} readable by the "
+ "current user.\n"
+ .format(PATH_DEBUGFS_TRACING))
+ if options.tracepoints:
+ sys.exit(1)
+
+ sys.stderr.write("Falling back to debugfs statistics!\n")
+ options.debugfs = True
+ sleep(5)
+
+ return options
+
+def main():
+ options = get_options()
+ options = check_access(options)
+
+ if (options.pid > 0 and
+ not os.path.isdir(os.path.join('/proc/',
+ str(options.pid)))):
+ sys.stderr.write('Did you use a (unsupported) tid instead of a pid?\n')
+ sys.exit('Specified pid does not exist.')
+
+ providers = get_providers(options)
+ stats = Stats(providers, options.pid, fields=options.fields)
+
+ if options.log:
+ log(stats)
+ elif not options.once:
+ with Tui(stats) as tui:
+ tui.show_stats()
+ else:
+ batch(stats)
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
new file mode 100644
index 000000000000..b92a153d7115
--- /dev/null
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -0,0 +1,63 @@
+kvm_stat(1)
+===========
+
+NAME
+----
+kvm_stat - Report KVM kernel module event counters
+
+SYNOPSIS
+--------
+[verse]
+'kvm_stat' [OPTION]...
+
+DESCRIPTION
+-----------
+kvm_stat prints counts of KVM kernel module trace events. These events signify
+state transitions such as guest mode entry and exit.
+
+This tool is useful for observing guest behavior from the host perspective.
+Often conclusions about performance or buggy behavior can be drawn from the
+output.
+
+The set of KVM kernel module trace events may be specific to the kernel version
+or architecture. It is best to check the KVM kernel module source code for the
+meaning of events.
+
+OPTIONS
+-------
+-1::
+--once::
+--batch::
+ run in batch mode for one second
+
+-l::
+--log::
+ run in logging mode (like vmstat)
+
+-t::
+--tracepoints::
+ retrieve statistics from tracepoints
+
+-d::
+--debugfs::
+ retrieve statistics from debugfs
+
+-p<pid>::
+--pid=<pid>::
+ limit statistics to one virtual machine (pid)
+
+-f<fields>::
+--fields=<fields>::
+ fields to display (regex)
+
+-h::
+--help::
+ show help message
+
+SEE ALSO
+--------
+'perf'(1), 'trace-cmd'(1)
+
+AUTHOR
+------
+Stefan Hajnoczi <stefanha@redhat.com>
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 6765c7e949f3..f094f3c4ed84 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -30,6 +30,10 @@ INCLUDES := -I$(srctree)/tools/include
CFLAGS += -Wall -Werror $(EXTRA_WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
LDFLAGS += -lelf $(LIBSUBCMD)
+# Allow old libelf to be used:
+elfshdr := $(shell echo '\#include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
+CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED)
+
AWK = awk
export srctree OUTPUT CFLAGS ARCH AWK
include $(srctree)/tools/build/Makefile.include
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index 7f3e00a2f907..aa1ff6596684 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -23,6 +23,11 @@
#include <linux/list.h>
#include <linux/hashtable.h>
+#ifdef LIBELF_USE_DEPRECATED
+# define elf_getshdrnum elf_getshnum
+# define elf_getshdrstrndx elf_getshstrndx
+#endif
+
struct section {
struct list_head list;
GElf_Shdr sh;
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index ebaf849e30ef..9cbddc290aff 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -103,12 +103,13 @@ OPTIONS
If --branch-stack option is used, following sort keys are also
available:
- dso_from, dso_to, symbol_from, symbol_to, mispredict.
- dso_from: name of library or module branched from
- dso_to: name of library or module branched to
- symbol_from: name of function branched from
- symbol_to: name of function branched to
+ - srcline_from: source file and line branched from
+ - srcline_to: source file and line branched to
- mispredict: "N" for predicted branch, "Y" for mispredicted branch
- in_tx: branch in TSX transaction
- abort: TSX transaction abort.
@@ -248,7 +249,7 @@ OPTIONS
Note that when using the --itrace option the synthesized callchain size
will override this value if the synthesized callchain size is bigger.
- Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
+ Default: 127
-G::
--inverted::
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index a856a1095893..4fc44c75263f 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -267,7 +267,7 @@ include::itrace.txt[]
Note that when using the --itrace option the synthesized callchain size
will override this value if the synthesized callchain size is bigger.
- Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
+ Default: 127
--ns::
Use 9 decimal places when displaying time (i.e. show the nanoseconds)
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 6afe20121bc0..1ab0782369b1 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -143,7 +143,8 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
Implies '--call-graph dwarf' when --call-graph not present on the
command line, on systems where DWARF unwinding was built in.
- Default: /proc/sys/kernel/perf_event_max_stack when present, 127 otherwise.
+ Default: /proc/sys/kernel/perf_event_max_stack when present for
+ live sessions (without --input/-i), 127 otherwise.
--min-stack::
Set the stack depth limit when parsing the callchain, anything
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 814158393656..25c81734a950 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -324,8 +324,9 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing,
"Skip symbols that cannot be annotated"),
OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"),
- OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
- "Look for files with symbols relative to this directory"),
+ OPT_CALLBACK(0, "symfs", NULL, "directory",
+ "Look for files with symbols relative to this directory",
+ symbol__config_symfs),
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 632efc6b79a0..d75bded21fe0 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -119,8 +119,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
if (build_id_cache__kcore_buildid(from_dir, sbuildid) < 0)
return -1;
- scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s",
- buildid_dir, sbuildid);
+ scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s",
+ buildid_dir, DSO__NAME_KCORE, sbuildid);
if (!force &&
!build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
@@ -131,8 +131,8 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
if (build_id_cache__kcore_dir(dir, sizeof(dir)))
return -1;
- scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s/%s",
- buildid_dir, sbuildid, dir);
+ scnprintf(to_dir, sizeof(to_dir), "%s/%s/%s/%s",
+ buildid_dir, DSO__NAME_KCORE, sbuildid, dir);
if (mkdir_p(to_dir, 0755))
return -1;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 9ce354f469dc..f7645a42708e 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -812,8 +812,9 @@ static const struct option options[] = {
OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
"separator for columns, no spaces will be added between "
"columns '.' is reserved."),
- OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
- "Look for files with symbols relative to this directory"),
+ OPT_CALLBACK(0, "symfs", NULL, "directory",
+ "Look for files with symbols relative to this directory",
+ symbol__config_symfs),
OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"How to display percentage of filtered entries", parse_filter_percentage),
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f3679c44d3f3..dc3fcb597e4c 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -40,6 +40,7 @@
#include <unistd.h>
#include <sched.h>
#include <sys/mman.h>
+#include <asm/bug.h>
struct record {
@@ -82,27 +83,87 @@ static int process_synthesized_event(struct perf_tool *tool,
return record__write(rec, event, event->header.size);
}
+static int
+backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
+{
+ struct perf_event_header *pheader;
+ u64 evt_head = head;
+ int size = mask + 1;
+
+ pr_debug2("backward_rb_find_range: buf=%p, head=%"PRIx64"\n", buf, head);
+ pheader = (struct perf_event_header *)(buf + (head & mask));
+ *start = head;
+ while (true) {
+ if (evt_head - head >= (unsigned int)size) {
+ pr_debug("Finshed reading backward ring buffer: rewind\n");
+ if (evt_head - head > (unsigned int)size)
+ evt_head -= pheader->size;
+ *end = evt_head;
+ return 0;
+ }
+
+ pheader = (struct perf_event_header *)(buf + (evt_head & mask));
+
+ if (pheader->size == 0) {
+ pr_debug("Finshed reading backward ring buffer: get start\n");
+ *end = evt_head;
+ return 0;
+ }
+
+ evt_head += pheader->size;
+ pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
+ }
+ WARN_ONCE(1, "Shouldn't get here\n");
+ return -1;
+}
+
+static int
+rb_find_range(struct perf_evlist *evlist,
+ void *data, int mask, u64 head, u64 old,
+ u64 *start, u64 *end)
+{
+ if (!evlist->backward) {
+ *start = old;
+ *end = head;
+ return 0;
+ }
+
+ return backward_rb_find_range(data, mask, head, start, end);
+}
+
static int record__mmap_read(struct record *rec, int idx)
{
struct perf_mmap *md = &rec->evlist->mmap[idx];
u64 head = perf_mmap__read_head(md);
u64 old = md->prev;
+ u64 end = head, start = old;
unsigned char *data = md->base + page_size;
unsigned long size;
void *buf;
int rc = 0;
- if (old == head)
+ if (rb_find_range(rec->evlist, data, md->mask, head,
+ old, &start, &end))
+ return -1;
+
+ if (start == end)
return 0;
rec->samples++;
- size = head - old;
+ size = end - start;
+ if (size > (unsigned long)(md->mask) + 1) {
+ WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
+
+ md->prev = head;
+ perf_evlist__mmap_consume(rec->evlist, idx);
+ return 0;
+ }
- if ((old & md->mask) + size != (head & md->mask)) {
- buf = &data[old & md->mask];
- size = md->mask + 1 - (old & md->mask);
- old += size;
+ if ((start & md->mask) + size != (end & md->mask)) {
+ buf = &data[start & md->mask];
+ size = md->mask + 1 - (start & md->mask);
+ start += size;
if (record__write(rec, buf, size) < 0) {
rc = -1;
@@ -110,16 +171,16 @@ static int record__mmap_read(struct record *rec, int idx)
}
}
- buf = &data[old & md->mask];
- size = head - old;
- old += size;
+ buf = &data[start & md->mask];
+ size = end - start;
+ start += size;
if (record__write(rec, buf, size) < 0) {
rc = -1;
goto out;
}
- md->prev = old;
+ md->prev = head;
perf_evlist__mmap_consume(rec->evlist, idx);
out:
return rc;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 87d40e3c4078..a87cb338bdf1 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -691,7 +691,7 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
.ordered_events = true,
.ordering_requires_timestamps = true,
},
- .max_stack = sysctl_perf_event_max_stack,
+ .max_stack = PERF_MAX_STACK_DEPTH,
.pretty_printing_style = "normal",
.socket_filter = -1,
};
@@ -770,8 +770,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"columns '.' is reserved."),
OPT_BOOLEAN('U', "hide-unresolved", &symbol_conf.hide_unresolved,
"Only display entries resolved to a symbol"),
- OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
- "Look for files with symbols relative to this directory"),
+ OPT_CALLBACK(0, "symfs", NULL, "directory",
+ "Look for files with symbols relative to this directory",
+ symbol__config_symfs),
OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
"list of cpus to profile"),
OPT_BOOLEAN('I', "show-info", &report.show_full_info,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index efca81679bb3..e3ce2f34d3ad 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -2010,8 +2010,9 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
"file", "kallsyms pathname"),
OPT_BOOLEAN('G', "hide-call-graph", &no_callchain,
"When printing symbols do not display call chain"),
- OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
- "Look for files with symbols relative to this directory"),
+ OPT_CALLBACK(0, "symfs", NULL, "directory",
+ "Look for files with symbols relative to this directory",
+ symbol__config_symfs),
OPT_CALLBACK('F', "fields", NULL, "str",
"comma separated output fields prepend with 'type:'. "
"Valid types: hw,sw,trace,raw. "
@@ -2067,8 +2068,6 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
NULL
};
- scripting_max_stack = sysctl_perf_event_max_stack;
-
setup_scripting();
argc = parse_options_subcommand(argc, argv, options, script_subcommands, script_usage,
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index e459b685a4e9..ee7ada78d86f 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -66,6 +66,7 @@
#include <stdlib.h>
#include <sys/prctl.h>
#include <locale.h>
+#include <math.h>
#define DEFAULT_SEPARATOR " "
#define CNTR_NOT_SUPPORTED "<not supported>"
@@ -991,12 +992,12 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
const char *fmt;
if (csv_output) {
- fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s";
+ fmt = floor(sc) != sc ? "%.2f%s" : "%.0f%s";
} else {
if (big_num)
- fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s";
+ fmt = floor(sc) != sc ? "%'18.2f%s" : "%'18.0f%s";
else
- fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s";
+ fmt = floor(sc) != sc ? "%18.2f%s" : "%18.0f%s";
}
aggr_printout(evsel, id, nr);
@@ -1909,6 +1910,9 @@ static int add_default_attributes(void)
}
if (!evsel_list->nr_entries) {
+ if (target__has_cpu(&target))
+ default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
+
if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
return -1;
if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
@@ -2000,7 +2004,7 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_session *session)
{
- struct stat_round_event *round = &event->stat_round;
+ struct stat_round_event *stat_round = &event->stat_round;
struct perf_evsel *counter;
struct timespec tsh, *ts = NULL;
const char **argv = session->header.env.cmdline_argv;
@@ -2009,12 +2013,12 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
evlist__for_each(evsel_list, counter)
perf_stat_process_counter(&stat_config, counter);
- if (round->type == PERF_STAT_ROUND_TYPE__FINAL)
- update_stats(&walltime_nsecs_stats, round->time);
+ if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
+ update_stats(&walltime_nsecs_stats, stat_round->time);
- if (stat_config.interval && round->time) {
- tsh.tv_sec = round->time / NSECS_PER_SEC;
- tsh.tv_nsec = round->time % NSECS_PER_SEC;
+ if (stat_config.interval && stat_round->time) {
+ tsh.tv_sec = stat_round->time / NSECS_PER_SEC;
+ tsh.tv_nsec = stat_round->time % NSECS_PER_SEC;
ts = &tsh;
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 40cc9bb3506c..733a55422d03 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1945,8 +1945,9 @@ int cmd_timechart(int argc, const char **argv,
OPT_CALLBACK('p', "process", NULL, "process",
"process selector. Pass a pid or process name.",
parse_process),
- OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
- "Look for files with symbols relative to this directory"),
+ OPT_CALLBACK(0, "symfs", NULL, "directory",
+ "Look for files with symbols relative to this directory",
+ symbol__config_symfs),
OPT_INTEGER('n', "proc-num", &tchart.proc_num,
"min. number of tasks to print"),
OPT_BOOLEAN('t', "topology", &tchart.topology,
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1793da585676..2a6cc254ad0c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -732,7 +732,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (machine__resolve(machine, &al, sample) < 0)
return;
- if (!top->kptr_restrict_warned &&
+ if (!machine->kptr_restrict_warned &&
symbol_conf.kptr_restrict &&
al.cpumode == PERF_RECORD_MISC_KERNEL) {
ui__warning(
@@ -743,7 +743,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
" modules" : "");
if (use_browser <= 0)
sleep(5);
- top->kptr_restrict_warned = true;
+ machine->kptr_restrict_warned = true;
}
if (al.sym == NULL) {
@@ -759,7 +759,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
* --hide-kernel-symbols, even if the user specifies an
* invalid --vmlinux ;-)
*/
- if (!top->kptr_restrict_warned && !top->vmlinux_warned &&
+ if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
if (symbol_conf.vmlinux_name) {
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 6e5c325148e4..5c50fe70d6b3 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -576,84 +576,54 @@ static struct syscall_fmt {
bool hexret;
} syscall_fmts[] = {
{ .name = "access", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
- [1] = SCA_ACCMODE, /* mode */ }, },
+ .arg_scnprintf = { [1] = SCA_ACCMODE, /* mode */ }, },
{ .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
{ .name = "bpf", .errmsg = true, STRARRAY(0, cmd, bpf_cmd), },
{ .name = "brk", .hexret = true,
.arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
- { .name = "chdir", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
- { .name = "chmod", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
- { .name = "chroot", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+ { .name = "chdir", .errmsg = true, },
+ { .name = "chmod", .errmsg = true, },
+ { .name = "chroot", .errmsg = true, },
{ .name = "clock_gettime", .errmsg = true, STRARRAY(0, clk_id, clockid), },
{ .name = "clone", .errpid = true, },
{ .name = "close", .errmsg = true,
.arg_scnprintf = { [0] = SCA_CLOSE_FD, /* fd */ }, },
{ .name = "connect", .errmsg = true, },
- { .name = "creat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
- { .name = "dup", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "dup2", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "dup3", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "creat", .errmsg = true, },
+ { .name = "dup", .errmsg = true, },
+ { .name = "dup2", .errmsg = true, },
+ { .name = "dup3", .errmsg = true, },
{ .name = "epoll_ctl", .errmsg = true, STRARRAY(1, op, epoll_ctl_ops), },
{ .name = "eventfd2", .errmsg = true,
.arg_scnprintf = { [1] = SCA_EFD_FLAGS, /* flags */ }, },
- { .name = "faccessat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
- [1] = SCA_FILENAME, /* filename */ }, },
- { .name = "fadvise64", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "fallocate", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "fchdir", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "fchmod", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "faccessat", .errmsg = true, },
+ { .name = "fadvise64", .errmsg = true, },
+ { .name = "fallocate", .errmsg = true, },
+ { .name = "fchdir", .errmsg = true, },
+ { .name = "fchmod", .errmsg = true, },
{ .name = "fchmodat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
- [1] = SCA_FILENAME, /* filename */ }, },
- { .name = "fchown", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ { .name = "fchown", .errmsg = true, },
{ .name = "fchownat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
- [1] = SCA_FILENAME, /* filename */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "fcntl", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
- [1] = SCA_STRARRAY, /* cmd */ },
+ .arg_scnprintf = { [1] = SCA_STRARRAY, /* cmd */ },
.arg_parm = { [1] = &strarray__fcntl_cmds, /* cmd */ }, },
- { .name = "fdatasync", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "fdatasync", .errmsg = true, },
{ .name = "flock", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
- [1] = SCA_FLOCK, /* cmd */ }, },
- { .name = "fsetxattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "fstat", .errmsg = true, .alias = "newfstat",
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "fstatat", .errmsg = true, .alias = "newfstatat",
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
- [1] = SCA_FILENAME, /* filename */ }, },
- { .name = "fstatfs", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "fsync", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "ftruncate", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [1] = SCA_FLOCK, /* cmd */ }, },
+ { .name = "fsetxattr", .errmsg = true, },
+ { .name = "fstat", .errmsg = true, .alias = "newfstat", },
+ { .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
+ { .name = "fstatfs", .errmsg = true, },
+ { .name = "fsync", .errmsg = true, },
+ { .name = "ftruncate", .errmsg = true, },
{ .name = "futex", .errmsg = true,
.arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
{ .name = "futimesat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
- [1] = SCA_FILENAME, /* filename */ }, },
- { .name = "getdents", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "getdents64", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ { .name = "getdents", .errmsg = true, },
+ { .name = "getdents64", .errmsg = true, },
{ .name = "getitimer", .errmsg = true, STRARRAY(0, which, itimers), },
{ .name = "getpid", .errpid = true, },
{ .name = "getpgid", .errpid = true, },
@@ -661,12 +631,10 @@ static struct syscall_fmt {
{ .name = "getrandom", .errmsg = true,
.arg_scnprintf = { [2] = SCA_GETRANDOM_FLAGS, /* flags */ }, },
{ .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
- { .name = "getxattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
- { .name = "inotify_add_watch", .errmsg = true,
- .arg_scnprintf = { [1] = SCA_FILENAME, /* pathname */ }, },
+ { .name = "getxattr", .errmsg = true, },
+ { .name = "inotify_add_watch", .errmsg = true, },
{ .name = "ioctl", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
+ .arg_scnprintf = {
#if defined(__i386__) || defined(__x86_64__)
/*
* FIXME: Make this available to all arches.
@@ -680,41 +648,28 @@ static struct syscall_fmt {
{ .name = "keyctl", .errmsg = true, STRARRAY(0, option, keyctl_options), },
{ .name = "kill", .errmsg = true,
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
- { .name = "lchown", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
- { .name = "lgetxattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+ { .name = "lchown", .errmsg = true, },
+ { .name = "lgetxattr", .errmsg = true, },
{ .name = "linkat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
- { .name = "listxattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
- { .name = "llistxattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
- { .name = "lremovexattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+ { .name = "listxattr", .errmsg = true, },
+ { .name = "llistxattr", .errmsg = true, },
+ { .name = "lremovexattr", .errmsg = true, },
{ .name = "lseek", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
- [2] = SCA_STRARRAY, /* whence */ },
+ .arg_scnprintf = { [2] = SCA_STRARRAY, /* whence */ },
.arg_parm = { [2] = &strarray__whences, /* whence */ }, },
- { .name = "lsetxattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
- { .name = "lstat", .errmsg = true, .alias = "newlstat",
- .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
- { .name = "lsxattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+ { .name = "lsetxattr", .errmsg = true, },
+ { .name = "lstat", .errmsg = true, .alias = "newlstat", },
+ { .name = "lsxattr", .errmsg = true, },
{ .name = "madvise", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* start */
[2] = SCA_MADV_BHV, /* behavior */ }, },
- { .name = "mkdir", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+ { .name = "mkdir", .errmsg = true, },
{ .name = "mkdirat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
- [1] = SCA_FILENAME, /* pathname */ }, },
- { .name = "mknod", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
+ { .name = "mknod", .errmsg = true, },
{ .name = "mknodat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* fd */
- [1] = SCA_FILENAME, /* filename */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* fd */ }, },
{ .name = "mlock", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "mlockall", .errmsg = true,
@@ -722,8 +677,7 @@ static struct syscall_fmt {
{ .name = "mmap", .hexret = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */
[2] = SCA_MMAP_PROT, /* prot */
- [3] = SCA_MMAP_FLAGS, /* flags */
- [4] = SCA_FD, /* fd */ }, },
+ [3] = SCA_MMAP_FLAGS, /* flags */ }, },
{ .name = "mprotect", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* start */
[2] = SCA_MMAP_PROT, /* prot */ }, },
@@ -740,17 +694,14 @@ static struct syscall_fmt {
{ .name = "name_to_handle_at", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "newfstatat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
- [1] = SCA_FILENAME, /* filename */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
{ .name = "open", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */
- [1] = SCA_OPEN_FLAGS, /* flags */ }, },
+ .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
{ .name = "open_by_handle_at", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
[2] = SCA_OPEN_FLAGS, /* flags */ }, },
{ .name = "openat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
- [1] = SCA_FILENAME, /* filename */
[2] = SCA_OPEN_FLAGS, /* flags */ }, },
{ .name = "perf_event_open", .errmsg = true,
.arg_scnprintf = { [2] = SCA_INT, /* cpu */
@@ -760,39 +711,26 @@ static struct syscall_fmt {
.arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
{ .name = "poll", .errmsg = true, .timeout = true, },
{ .name = "ppoll", .errmsg = true, .timeout = true, },
- { .name = "pread", .errmsg = true, .alias = "pread64",
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "preadv", .errmsg = true, .alias = "pread",
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "pread", .errmsg = true, .alias = "pread64", },
+ { .name = "preadv", .errmsg = true, .alias = "pread", },
{ .name = "prlimit64", .errmsg = true, STRARRAY(1, resource, rlimit_resources), },
- { .name = "pwrite", .errmsg = true, .alias = "pwrite64",
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "pwritev", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "read", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "readlink", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
+ { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
+ { .name = "pwritev", .errmsg = true, },
+ { .name = "read", .errmsg = true, },
+ { .name = "readlink", .errmsg = true, },
{ .name = "readlinkat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
- [1] = SCA_FILENAME, /* pathname */ }, },
- { .name = "readv", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ { .name = "readv", .errmsg = true, },
{ .name = "recvfrom", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
- [3] = SCA_MSG_FLAGS, /* flags */ }, },
+ .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
{ .name = "recvmmsg", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
- [3] = SCA_MSG_FLAGS, /* flags */ }, },
+ .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
{ .name = "recvmsg", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
- [2] = SCA_MSG_FLAGS, /* flags */ }, },
- { .name = "removexattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+ .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
+ { .name = "removexattr", .errmsg = true, },
{ .name = "renameat", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
- { .name = "rmdir", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+ { .name = "rmdir", .errmsg = true, },
{ .name = "rt_sigaction", .errmsg = true,
.arg_scnprintf = { [0] = SCA_SIGNUM, /* sig */ }, },
{ .name = "rt_sigprocmask", .errmsg = true, STRARRAY(0, how, sighow), },
@@ -807,22 +745,17 @@ static struct syscall_fmt {
[1] = SCA_SECCOMP_FLAGS, /* flags */ }, },
{ .name = "select", .errmsg = true, .timeout = true, },
{ .name = "sendmmsg", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
- [3] = SCA_MSG_FLAGS, /* flags */ }, },
+ .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
{ .name = "sendmsg", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
- [2] = SCA_MSG_FLAGS, /* flags */ }, },
+ .arg_scnprintf = { [2] = SCA_MSG_FLAGS, /* flags */ }, },
{ .name = "sendto", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */
- [3] = SCA_MSG_FLAGS, /* flags */ }, },
+ .arg_scnprintf = { [3] = SCA_MSG_FLAGS, /* flags */ }, },
{ .name = "set_tid_address", .errpid = true, },
{ .name = "setitimer", .errmsg = true, STRARRAY(0, which, itimers), },
{ .name = "setpgid", .errmsg = true, },
{ .name = "setrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
- { .name = "setxattr", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
- { .name = "shutdown", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "setxattr", .errmsg = true, },
+ { .name = "shutdown", .errmsg = true, },
{ .name = "socket", .errmsg = true,
.arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
[1] = SCA_SK_TYPE, /* type */ },
@@ -831,10 +764,8 @@ static struct syscall_fmt {
.arg_scnprintf = { [0] = SCA_STRARRAY, /* family */
[1] = SCA_SK_TYPE, /* type */ },
.arg_parm = { [0] = &strarray__socket_families, /* family */ }, },
- { .name = "stat", .errmsg = true, .alias = "newstat",
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
- { .name = "statfs", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* pathname */ }, },
+ { .name = "stat", .errmsg = true, .alias = "newstat", },
+ { .name = "statfs", .errmsg = true, },
{ .name = "swapoff", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FILENAME, /* specialfile */ }, },
{ .name = "swapon", .errmsg = true,
@@ -845,29 +776,21 @@ static struct syscall_fmt {
.arg_scnprintf = { [2] = SCA_SIGNUM, /* sig */ }, },
{ .name = "tkill", .errmsg = true,
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
- { .name = "truncate", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* path */ }, },
+ { .name = "truncate", .errmsg = true, },
{ .name = "uname", .errmsg = true, .alias = "newuname", },
{ .name = "unlinkat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
- [1] = SCA_FILENAME, /* pathname */ }, },
- { .name = "utime", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */ }, },
+ { .name = "utime", .errmsg = true, },
{ .name = "utimensat", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */
- [1] = SCA_FILENAME, /* filename */ }, },
- { .name = "utimes", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FILENAME, /* filename */ }, },
- { .name = "vmsplice", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ .arg_scnprintf = { [0] = SCA_FDAT, /* dirfd */ }, },
+ { .name = "utimes", .errmsg = true, },
+ { .name = "vmsplice", .errmsg = true, },
{ .name = "wait4", .errpid = true,
.arg_scnprintf = { [2] = SCA_WAITID_OPTIONS, /* options */ }, },
{ .name = "waitid", .errpid = true,
.arg_scnprintf = { [3] = SCA_WAITID_OPTIONS, /* options */ }, },
- { .name = "write", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
- { .name = "writev", .errmsg = true,
- .arg_scnprintf = { [0] = SCA_FD, /* fd */ }, },
+ { .name = "write", .errmsg = true, },
+ { .name = "writev", .errmsg = true, },
};
static int syscall_fmt__cmp(const void *name, const void *fmtp)
@@ -1160,6 +1083,24 @@ static int trace__tool_process(struct perf_tool *tool,
return trace__process_event(trace, machine, event, sample);
}
+static char *trace__machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
+{
+ struct machine *machine = vmachine;
+
+ if (machine->kptr_restrict_warned)
+ return NULL;
+
+ if (symbol_conf.kptr_restrict) {
+ pr_warning("Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
+ "Check /proc/sys/kernel/kptr_restrict.\n\n"
+ "Kernel samples will not be resolved.\n");
+ machine->kptr_restrict_warned = true;
+ return NULL;
+ }
+
+ return machine__resolve_kernel_addr(vmachine, addrp, modp);
+}
+
static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
{
int err = symbol__init(NULL);
@@ -1171,7 +1112,7 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
if (trace->host == NULL)
return -ENOMEM;
- if (trace_event__register_resolver(trace->host, machine__resolve_kernel_addr) < 0)
+ if (trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr) < 0)
return -errno;
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
@@ -1186,7 +1127,7 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
static int syscall__set_arg_fmts(struct syscall *sc)
{
struct format_field *field;
- int idx = 0;
+ int idx = 0, len;
sc->arg_scnprintf = calloc(sc->nr_args, sizeof(void *));
if (sc->arg_scnprintf == NULL)
@@ -1198,12 +1139,31 @@ static int syscall__set_arg_fmts(struct syscall *sc)
for (field = sc->args; field; field = field->next) {
if (sc->fmt && sc->fmt->arg_scnprintf[idx])
sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
+ else if (strcmp(field->type, "const char *") == 0 &&
+ (strcmp(field->name, "filename") == 0 ||
+ strcmp(field->name, "path") == 0 ||
+ strcmp(field->name, "pathname") == 0))
+ sc->arg_scnprintf[idx] = SCA_FILENAME;
else if (field->flags & FIELD_IS_POINTER)
sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
else if (strcmp(field->type, "pid_t") == 0)
sc->arg_scnprintf[idx] = SCA_PID;
else if (strcmp(field->type, "umode_t") == 0)
sc->arg_scnprintf[idx] = SCA_MODE_T;
+ else if ((strcmp(field->type, "int") == 0 ||
+ strcmp(field->type, "unsigned int") == 0 ||
+ strcmp(field->type, "long") == 0) &&
+ (len = strlen(field->name)) >= 2 &&
+ strcmp(field->name + len - 2, "fd") == 0) {
+ /*
+ * /sys/kernel/tracing/events/syscalls/sys_enter*
+ * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
+ * 65 int
+ * 23 unsigned int
+ * 7 unsigned long
+ */
+ sc->arg_scnprintf[idx] = SCA_FD;
+ }
++idx;
}
@@ -1534,7 +1494,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (sc->is_exit) {
if (!(trace->duration_filter || trace->summary_only || trace->min_stack)) {
trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
- fprintf(trace->output, "%-70s\n", ttrace->entry_str);
+ fprintf(trace->output, "%-70s)\n", ttrace->entry_str);
}
} else {
ttrace->entry_pending = true;
@@ -2887,12 +2847,12 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
mmap_pages_user_set = false;
if (trace.max_stack == UINT_MAX) {
- trace.max_stack = sysctl_perf_event_max_stack;
+ trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl_perf_event_max_stack;
max_stack_user_set = false;
}
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled)
+ if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled && trace.trace_syscalls)
record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
#endif
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 797000842d40..15982cee5ef3 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -549,6 +549,9 @@ int main(int argc, const char **argv)
if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
sysctl_perf_event_max_stack = value;
+ if (sysctl__read_int("kernel/perf_event_max_contexts_per_stack", &value) == 0)
+ sysctl_perf_event_max_contexts_per_stack = value;
+
cmd = extract_argv0_path(argv[0]);
if (!cmd)
cmd = "perf-help";
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 4db73d5a0dbc..7e5a1e8874ce 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -354,9 +354,6 @@ static struct ins_ops nop_ops = {
.scnprintf = nop__scnprintf,
};
-/*
- * Must be sorted by name!
- */
static struct ins instructions[] = {
{ .name = "add", .ops = &mov_ops, },
{ .name = "addl", .ops = &mov_ops, },
@@ -372,8 +369,8 @@ static struct ins instructions[] = {
{ .name = "bgt", .ops = &jump_ops, },
{ .name = "bhi", .ops = &jump_ops, },
{ .name = "bl", .ops = &call_ops, },
- { .name = "blt", .ops = &jump_ops, },
{ .name = "bls", .ops = &jump_ops, },
+ { .name = "blt", .ops = &jump_ops, },
{ .name = "blx", .ops = &call_ops, },
{ .name = "bne", .ops = &jump_ops, },
#endif
@@ -449,18 +446,39 @@ static struct ins instructions[] = {
{ .name = "xbeginq", .ops = &jump_ops, },
};
-static int ins__cmp(const void *name, const void *insp)
+static int ins__key_cmp(const void *name, const void *insp)
{
const struct ins *ins = insp;
return strcmp(name, ins->name);
}
+static int ins__cmp(const void *a, const void *b)
+{
+ const struct ins *ia = a;
+ const struct ins *ib = b;
+
+ return strcmp(ia->name, ib->name);
+}
+
+static void ins__sort(void)
+{
+ const int nmemb = ARRAY_SIZE(instructions);
+
+ qsort(instructions, nmemb, sizeof(struct ins), ins__cmp);
+}
+
static struct ins *ins__find(const char *name)
{
const int nmemb = ARRAY_SIZE(instructions);
+ static bool sorted;
+
+ if (!sorted) {
+ ins__sort();
+ sorted = true;
+ }
- return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp);
+ return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__key_cmp);
}
int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym)
@@ -1122,7 +1140,7 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
} else if (dso__is_kcore(dso)) {
goto fallback;
} else if (readlink(symfs_filename, command, sizeof(command)) < 0 ||
- strstr(command, "[kernel.kallsyms]") ||
+ strstr(command, DSO__NAME_KALLSYMS) ||
access(symfs_filename, R_OK)) {
free(filename);
fallback:
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index bff425e1232c..67e5966503b2 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -256,7 +256,7 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
size_t name_len;
bool in_kernel = false;
- if (!pos->hit)
+ if (!pos->hit && !dso__is_vdso(pos))
continue;
if (dso__is_vdso(pos)) {
diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c
index 8d96c80cc67e..c9a6dc173e74 100644
--- a/tools/perf/util/db-export.c
+++ b/tools/perf/util/db-export.c
@@ -298,8 +298,7 @@ static struct call_path *call_path_from_sample(struct db_export *dbe,
*/
callchain_param.order = ORDER_CALLER;
err = thread__resolve_callchain(thread, &callchain_cursor, evsel,
- sample, NULL, NULL,
- sysctl_perf_event_max_stack);
+ sample, NULL, NULL, PERF_MAX_STACK_DEPTH);
if (err) {
callchain_param.order = saved_order;
return NULL;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 3357479082ca..5d286f5d7906 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -7,6 +7,7 @@
#include "auxtrace.h"
#include "util.h"
#include "debug.h"
+#include "vdso.h"
char dso__symtab_origin(const struct dso *dso)
{
@@ -62,9 +63,7 @@ int dso__read_binary_type_filename(const struct dso *dso,
}
break;
case DSO_BINARY_TYPE__BUILD_ID_CACHE:
- /* skip the locally configured cache if a symfs is given */
- if (symbol_conf.symfs[0] ||
- (dso__build_id_filename(dso, filename, size) == NULL))
+ if (dso__build_id_filename(dso, filename, size) == NULL)
ret = -1;
break;
@@ -1169,7 +1168,7 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
struct dso *pos;
list_for_each_entry(pos, head, node) {
- if (with_hits && !pos->hit)
+ if (with_hits && !pos->hit && !dso__is_vdso(pos))
continue;
if (pos->has_build_id) {
have_build_id = true;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index c4bfe11479a0..e82ba90cc969 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -44,6 +44,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
perf_evlist__set_maps(evlist, cpus, threads);
fdarray__init(&evlist->pollfd, 64);
evlist->workload.pid = -1;
+ evlist->backward = false;
}
struct perf_evlist *perf_evlist__new(void)
@@ -679,6 +680,33 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
return NULL;
}
+static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
+{
+ int i;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ int fd = evlist->mmap[i].fd;
+ int err;
+
+ if (fd < 0)
+ continue;
+ err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int perf_evlist__pause(struct perf_evlist *evlist)
+{
+ return perf_evlist__set_paused(evlist, true);
+}
+
+int perf_evlist__resume(struct perf_evlist *evlist)
+{
+ return perf_evlist__set_paused(evlist, false);
+}
+
/* When check_messup is true, 'end' must points to a good entry */
static union perf_event *
perf_mmap__read(struct perf_mmap *md, bool check_messup, u64 start,
@@ -881,6 +909,7 @@ static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
if (evlist->mmap[idx].base != NULL) {
munmap(evlist->mmap[idx].base, evlist->mmap_len);
evlist->mmap[idx].base = NULL;
+ evlist->mmap[idx].fd = -1;
atomic_set(&evlist->mmap[idx].refcnt, 0);
}
auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
@@ -901,10 +930,14 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
{
+ int i;
+
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
if (cpu_map__empty(evlist->cpus))
evlist->nr_mmaps = thread_map__nr(evlist->threads);
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ evlist->mmap[i].fd = -1;
return evlist->mmap != NULL ? 0 : -ENOMEM;
}
@@ -941,6 +974,7 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
evlist->mmap[idx].base = NULL;
return -1;
}
+ evlist->mmap[idx].fd = fd;
if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
&mp->auxtrace_mp, evlist->mmap[idx].base, fd))
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 85d1b59802e8..d740fb877ab6 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -28,6 +28,7 @@ struct record_opts;
struct perf_mmap {
void *base;
int mask;
+ int fd;
atomic_t refcnt;
u64 prev;
struct auxtrace_mmap auxtrace_mmap;
@@ -43,6 +44,7 @@ struct perf_evlist {
bool overwrite;
bool enabled;
bool has_user_cpus;
+ bool backward;
size_t mmap_len;
int id_pos;
int is_pos;
@@ -135,6 +137,8 @@ void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
+int perf_evlist__pause(struct perf_evlist *evlist);
+int perf_evlist__resume(struct perf_evlist *evlist);
int perf_evlist__open(struct perf_evlist *evlist);
void perf_evlist__close(struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 52c7d8884741..5d7037ef7d3b 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -37,6 +37,7 @@ static struct {
bool clockid;
bool clockid_wrong;
bool lbr_flags;
+ bool write_backward;
} perf_missing_features;
static clockid_t clockid;
@@ -1376,6 +1377,8 @@ fallback_missing_features:
if (perf_missing_features.lbr_flags)
evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
PERF_SAMPLE_BRANCH_NO_CYCLES);
+ if (perf_missing_features.write_backward)
+ evsel->attr.write_backward = false;
retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
@@ -1438,6 +1441,12 @@ retry_open:
err = -EINVAL;
goto out_close;
}
+
+ if (evsel->overwrite &&
+ perf_missing_features.write_backward) {
+ err = -EINVAL;
+ goto out_close;
+ }
}
}
@@ -1500,6 +1509,10 @@ try_fallback:
PERF_SAMPLE_BRANCH_NO_FLAGS))) {
perf_missing_features.lbr_flags = true;
goto fallback_missing_features;
+ } else if (!perf_missing_features.write_backward &&
+ evsel->attr.write_backward) {
+ perf_missing_features.write_backward = true;
+ goto fallback_missing_features;
}
out_close:
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 8a644fef452c..c1f10159804c 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -112,6 +112,7 @@ struct perf_evsel {
bool tracking;
bool per_pkg;
bool precise_max;
+ bool overwrite;
/* parse modifier helper */
int exclude_GH;
int nr_members;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index cfab531437c7..d1f19e0012d4 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -117,6 +117,13 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
}
+
+ if (h->branch_info->srcline_from)
+ hists__new_col_len(hists, HISTC_SRCLINE_FROM,
+ strlen(h->branch_info->srcline_from));
+ if (h->branch_info->srcline_to)
+ hists__new_col_len(hists, HISTC_SRCLINE_TO,
+ strlen(h->branch_info->srcline_to));
}
if (h->mem_info) {
@@ -1042,6 +1049,8 @@ void hist_entry__delete(struct hist_entry *he)
if (he->branch_info) {
map__zput(he->branch_info->from.map);
map__zput(he->branch_info->to.map);
+ free_srcline(he->branch_info->srcline_from);
+ free_srcline(he->branch_info->srcline_to);
zfree(&he->branch_info);
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 0f84bfb42bb1..7b54ccf1b737 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -52,6 +52,8 @@ enum hist_column {
HISTC_MEM_IADDR_SYMBOL,
HISTC_TRANSACTION,
HISTC_CYCLES,
+ HISTC_SRCLINE_FROM,
+ HISTC_SRCLINE_TO,
HISTC_TRACE,
HISTC_NR_COLS, /* Last entry */
};
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index f9644f79686c..b1772180c820 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -43,6 +43,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
machine->symbol_filter = NULL;
machine->id_hdr_size = 0;
+ machine->kptr_restrict_warned = false;
machine->comm_exec = false;
machine->kernel_start = 0;
@@ -709,7 +710,7 @@ static struct dso *machine__get_kernel(struct machine *machine)
if (machine__is_host(machine)) {
vmlinux_name = symbol_conf.vmlinux_name;
if (!vmlinux_name)
- vmlinux_name = "[kernel.kallsyms]";
+ vmlinux_name = DSO__NAME_KALLSYMS;
kernel = machine__findnew_kernel(machine, vmlinux_name,
"[kernel]", DSO_TYPE_KERNEL);
@@ -1135,10 +1136,10 @@ int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
const char *name;
- u64 addr = machine__get_running_kernel_start(machine, &name);
+ u64 addr;
int ret;
- if (!addr || kernel == NULL)
+ if (kernel == NULL)
return -1;
ret = __machine__create_kernel_maps(machine, kernel);
@@ -1160,8 +1161,9 @@ int machine__create_kernel_maps(struct machine *machine)
*/
map_groups__fixup_end(&machine->kmaps);
- if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
- addr)) {
+ addr = machine__get_running_kernel_start(machine, &name);
+ if (!addr) {
+ } else if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) {
machine__destroy_kernel_maps(machine);
return -1;
}
@@ -1769,11 +1771,6 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
*/
int mix_chain_nr = i + 1 + lbr_nr + 1;
- if (mix_chain_nr > (int)sysctl_perf_event_max_stack + PERF_MAX_BRANCH_DEPTH) {
- pr_warning("corrupted callchain. skipping...\n");
- return 0;
- }
-
for (j = 0; j < mix_chain_nr; j++) {
if (callchain_param.order == ORDER_CALLEE) {
if (j < i + 1)
@@ -1811,9 +1808,9 @@ static int thread__resolve_callchain_sample(struct thread *thread,
{
struct branch_stack *branch = sample->branch_stack;
struct ip_callchain *chain = sample->callchain;
- int chain_nr = min(max_stack, (int)chain->nr);
+ int chain_nr = chain->nr;
u8 cpumode = PERF_RECORD_MISC_USER;
- int i, j, err;
+ int i, j, err, nr_entries;
int skip_idx = -1;
int first_call = 0;
@@ -1828,8 +1825,7 @@ static int thread__resolve_callchain_sample(struct thread *thread,
* Based on DWARF debug information, some architectures skip
* a callchain entry saved by the kernel.
*/
- if (chain->nr < sysctl_perf_event_max_stack)
- skip_idx = arch_skip_callchain_idx(thread, chain);
+ skip_idx = arch_skip_callchain_idx(thread, chain);
/*
* Add branches to call stack for easier browsing. This gives
@@ -1889,12 +1885,8 @@ static int thread__resolve_callchain_sample(struct thread *thread,
}
check_calls:
- if (chain->nr > sysctl_perf_event_max_stack && (int)chain->nr > max_stack) {
- pr_warning("corrupted callchain. skipping...\n");
- return 0;
- }
-
- for (i = first_call; i < chain_nr; i++) {
+ for (i = first_call, nr_entries = 0;
+ i < chain_nr && nr_entries < max_stack; i++) {
u64 ip;
if (callchain_param.order == ORDER_CALLEE)
@@ -1908,6 +1900,9 @@ check_calls:
#endif
ip = chain->ips[j];
+ if (ip < PERF_CONTEXT_MAX)
+ ++nr_entries;
+
err = add_callchain_ip(thread, cursor, parent, root_al, &cpumode, ip);
if (err)
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 83f46790c52f..41ac9cfd416b 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -28,6 +28,7 @@ struct machine {
pid_t pid;
u16 id_hdr_size;
bool comm_exec;
+ bool kptr_restrict_warned;
char *root_dir;
struct rb_root threads;
pthread_rwlock_t threads_lock;
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 62c7f6988e0e..5d1eb1ccd96c 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -264,8 +264,7 @@ static SV *perl_process_callchain(struct perf_sample *sample,
goto exit;
if (thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
- sample, NULL, NULL,
- sysctl_perf_event_max_stack) != 0) {
+ sample, NULL, NULL, scripting_max_stack) != 0) {
pr_err("Failed to resolve callchain. Skipping\n");
goto exit;
}
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 20e69edd5006..c4e9bd70723c 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -353,6 +353,88 @@ struct sort_entry sort_srcline = {
.se_width_idx = HISTC_SRCLINE,
};
+/* --sort srcline_from */
+
+static int64_t
+sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ if (!left->branch_info->srcline_from) {
+ struct map *map = left->branch_info->from.map;
+ if (!map)
+ left->branch_info->srcline_from = SRCLINE_UNKNOWN;
+ else
+ left->branch_info->srcline_from = get_srcline(map->dso,
+ map__rip_2objdump(map,
+ left->branch_info->from.al_addr),
+ left->branch_info->from.sym, true);
+ }
+ if (!right->branch_info->srcline_from) {
+ struct map *map = right->branch_info->from.map;
+ if (!map)
+ right->branch_info->srcline_from = SRCLINE_UNKNOWN;
+ else
+ right->branch_info->srcline_from = get_srcline(map->dso,
+ map__rip_2objdump(map,
+ right->branch_info->from.al_addr),
+ right->branch_info->from.sym, true);
+ }
+ return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
+}
+
+static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
+}
+
+struct sort_entry sort_srcline_from = {
+ .se_header = "From Source:Line",
+ .se_cmp = sort__srcline_from_cmp,
+ .se_snprintf = hist_entry__srcline_from_snprintf,
+ .se_width_idx = HISTC_SRCLINE_FROM,
+};
+
+/* --sort srcline_to */
+
+static int64_t
+sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ if (!left->branch_info->srcline_to) {
+ struct map *map = left->branch_info->to.map;
+ if (!map)
+ left->branch_info->srcline_to = SRCLINE_UNKNOWN;
+ else
+ left->branch_info->srcline_to = get_srcline(map->dso,
+ map__rip_2objdump(map,
+ left->branch_info->to.al_addr),
+ left->branch_info->from.sym, true);
+ }
+ if (!right->branch_info->srcline_to) {
+ struct map *map = right->branch_info->to.map;
+ if (!map)
+ right->branch_info->srcline_to = SRCLINE_UNKNOWN;
+ else
+ right->branch_info->srcline_to = get_srcline(map->dso,
+ map__rip_2objdump(map,
+ right->branch_info->to.al_addr),
+ right->branch_info->to.sym, true);
+ }
+ return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
+}
+
+static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
+}
+
+struct sort_entry sort_srcline_to = {
+ .se_header = "To Source:Line",
+ .se_cmp = sort__srcline_to_cmp,
+ .se_snprintf = hist_entry__srcline_to_snprintf,
+ .se_width_idx = HISTC_SRCLINE_TO,
+};
+
/* --sort srcfile */
static char no_srcfile[1];
@@ -1347,6 +1429,8 @@ static struct sort_dimension bstack_sort_dimensions[] = {
DIM(SORT_IN_TX, "in_tx", sort_in_tx),
DIM(SORT_ABORT, "abort", sort_abort),
DIM(SORT_CYCLES, "cycles", sort_cycles),
+ DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
+ DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
};
#undef DIM
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 42927f448bcb..ebb59cacd092 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -215,6 +215,8 @@ enum sort_type {
SORT_ABORT,
SORT_IN_TX,
SORT_CYCLES,
+ SORT_SRCLINE_FROM,
+ SORT_SRCLINE_TO,
/* memory mode specific sort keys */
__SORT_MEMORY_MODE,
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index fdb71961143e..aa9efe08762b 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -94,7 +94,8 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
{
int ctx = evsel_context(counter);
- if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
+ if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
+ perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
update_stats(&runtime_nsecs_stats[cpu], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
@@ -188,7 +189,7 @@ static void print_stalled_cycles_backend(int cpu,
color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
- out->print_metric(out->ctx, color, "%6.2f%%", "backend cycles idle", ratio);
+ out->print_metric(out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
}
static void print_branch_misses(int cpu,
@@ -444,7 +445,8 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
ratio = total / avg;
print_metric(ctxp, NULL, "%8.0f", "cycles / elision", ratio);
- } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK)) {
+ } else if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK) ||
+ perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK)) {
if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
print_metric(ctxp, NULL, "%8.3f", "CPUs utilized",
avg / ratio);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 7fb33304fb4e..20f9cb32b703 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1662,8 +1662,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
- scnprintf(path, sizeof(path), "%s/[kernel.kcore]/%s", buildid_dir,
- sbuild_id);
+ scnprintf(path, sizeof(path), "%s/%s/%s", buildid_dir,
+ DSO__NAME_KCORE, sbuild_id);
/* Use /proc/kallsyms if possible */
if (is_host) {
@@ -1699,8 +1699,8 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
if (!find_matching_kcore(map, path, sizeof(path)))
return strdup(path);
- scnprintf(path, sizeof(path), "%s/[kernel.kallsyms]/%s",
- buildid_dir, sbuild_id);
+ scnprintf(path, sizeof(path), "%s/%s/%s",
+ buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
if (access(path, F_OK)) {
pr_err("No kallsyms or vmlinux with build-id %s was found\n",
@@ -1769,7 +1769,7 @@ do_kallsyms:
if (err > 0 && !dso__is_kcore(dso)) {
dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
- dso__set_long_name(dso, "[kernel.kallsyms]", false);
+ dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
map__fixup_start(map);
map__fixup_end(map);
}
@@ -2033,3 +2033,26 @@ void symbol__exit(void)
symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
symbol_conf.initialized = false;
}
+
+int symbol__config_symfs(const struct option *opt __maybe_unused,
+ const char *dir, int unset __maybe_unused)
+{
+ char *bf = NULL;
+ int ret;
+
+ symbol_conf.symfs = strdup(dir);
+ if (symbol_conf.symfs == NULL)
+ return -ENOMEM;
+
+ /* skip the locally configured cache if a symfs is given, and
+ * config buildid dir to symfs/.debug
+ */
+ ret = asprintf(&bf, "%s/%s", dir, ".debug");
+ if (ret < 0)
+ return -ENOMEM;
+
+ set_buildid_dir(bf);
+
+ free(bf);
+ return 0;
+}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 2b5e4ed76fcb..b10d558a8803 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -44,6 +44,9 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
#define DMGL_ANSI (1 << 1) /* Include const, volatile, etc */
#endif
+#define DSO__NAME_KALLSYMS "[kernel.kallsyms]"
+#define DSO__NAME_KCORE "[kernel.kcore]"
+
/** struct symbol - symtab entry
*
* @ignore - resolvable but tools ignore it (e.g. idle routines)
@@ -183,6 +186,8 @@ struct branch_info {
struct addr_map_symbol from;
struct addr_map_symbol to;
struct branch_flags flags;
+ char *srcline_from;
+ char *srcline_to;
};
struct mem_info {
@@ -287,6 +292,8 @@ bool symbol_type__is_a(char symbol_type, enum map_type map_type);
bool symbol__restricted_filename(const char *filename,
const char *restricted_filename);
bool symbol__is_idle(struct symbol *sym);
+int symbol__config_symfs(const struct option *opt __maybe_unused,
+ const char *dir, int unset __maybe_unused);
int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss,
struct symsrc *runtime_ss, symbol_filter_t filter,
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index f92c37abb0a8..b2940c88734a 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -27,7 +27,6 @@ struct perf_top {
int max_stack;
bool hide_kernel_symbols, hide_user_symbols, zero;
bool use_tui, use_stdio;
- bool kptr_restrict_warned;
bool vmlinux_warned;
bool dump_symtab;
struct hist_entry *sym_filter_entry;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index eab077ad6ca9..23504ad5d6dd 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -33,7 +33,8 @@ struct callchain_param callchain_param = {
unsigned int page_size;
int cacheline_size;
-unsigned int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
+int sysctl_perf_event_max_stack = PERF_MAX_STACK_DEPTH;
+int sysctl_perf_event_max_contexts_per_stack = PERF_MAX_CONTEXTS_PER_STACK;
bool test_attr__enabled;
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 7651633a8dc7..1e8c3167b9fb 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -261,7 +261,8 @@ void sighandler_dump_stack(int sig);
extern unsigned int page_size;
extern int cacheline_size;
-extern unsigned int sysctl_perf_event_max_stack;
+extern int sysctl_perf_event_max_stack;
+extern int sysctl_perf_event_max_contexts_per_stack;
struct parse_tag {
char tag;
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 409db3304471..e2d5b6f988fb 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -20,6 +20,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <clocksource/arm_arch_timer.h>
#include <asm/arch_timer.h>
@@ -174,10 +175,10 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level)
timer->active_cleared_last = false;
timer->irq.level = new_level;
- trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq,
+ trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->irq.irq,
timer->irq.level);
ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
- timer->map,
+ timer->irq.irq,
timer->irq.level);
WARN_ON(ret);
}
@@ -196,7 +197,7 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
* because the guest would never see the interrupt. Instead wait
* until we call this function from kvm_timer_flush_hwstate.
*/
- if (!vgic_initialized(vcpu->kvm))
+ if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
return -ENODEV;
if (kvm_timer_should_fire(vcpu) != timer->irq.level)
@@ -274,10 +275,8 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
* to ensure that hardware interrupts from the timer triggers a guest
* exit.
*/
- if (timer->irq.level || kvm_vgic_map_is_active(vcpu, timer->map))
- phys_active = true;
- else
- phys_active = false;
+ phys_active = timer->irq.level ||
+ kvm_vgic_map_is_active(vcpu, timer->irq.irq);
/*
* We want to avoid hitting the (re)distributor as much as
@@ -302,7 +301,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
if (timer->active_cleared_last && !phys_active)
return;
- ret = irq_set_irqchip_state(timer->map->irq,
+ ret = irq_set_irqchip_state(host_vtimer_irq,
IRQCHIP_STATE_ACTIVE,
phys_active);
WARN_ON(ret);
@@ -334,7 +333,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
- struct irq_phys_map *map;
/*
* The vcpu timer irq number cannot be determined in
@@ -353,15 +351,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
timer->cntv_ctl = 0;
kvm_timer_update_state(vcpu);
- /*
- * Tell the VGIC that the virtual interrupt is tied to a
- * physical interrupt. We do that once per VCPU.
- */
- map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
- if (WARN_ON(IS_ERR(map)))
- return PTR_ERR(map);
-
- timer->map = map;
return 0;
}
@@ -487,14 +476,43 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
timer_disarm(timer);
- if (timer->map)
- kvm_vgic_unmap_phys_irq(vcpu, timer->map);
+ kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq);
}
-void kvm_timer_enable(struct kvm *kvm)
+int kvm_timer_enable(struct kvm_vcpu *vcpu)
{
- if (kvm->arch.timer.enabled)
- return;
+ struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+ struct irq_desc *desc;
+ struct irq_data *data;
+ int phys_irq;
+ int ret;
+
+ if (timer->enabled)
+ return 0;
+
+ /*
+ * Find the physical IRQ number corresponding to the host_vtimer_irq
+ */
+ desc = irq_to_desc(host_vtimer_irq);
+ if (!desc) {
+ kvm_err("%s: no interrupt descriptor\n", __func__);
+ return -EINVAL;
+ }
+
+ data = irq_desc_get_irq_data(desc);
+ while (data->parent_data)
+ data = data->parent_data;
+
+ phys_irq = data->hwirq;
+
+ /*
+ * Tell the VGIC that the virtual interrupt is tied to a
+ * physical interrupt. We do that once per VCPU.
+ */
+ ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq);
+ if (ret)
+ return ret;
+
/*
* There is a potential race here between VCPUs starting for the first
@@ -505,7 +523,9 @@ void kvm_timer_enable(struct kvm *kvm)
* the arch timers are enabled.
*/
if (timecounter && wqueue)
- kvm->arch.timer.enabled = 1;
+ timer->enabled = 1;
+
+ return 0;
}
void kvm_timer_init(struct kvm *kvm)
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index ea00d69e7078..798866a8d875 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -24,11 +24,10 @@
/* vcpu is already in the HYP VA space */
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
u64 val;
- if (kvm->arch.timer.enabled) {
+ if (timer->enabled) {
timer->cntv_ctl = read_sysreg_el0(cntv_ctl);
timer->cntv_cval = read_sysreg_el0(cntv_cval);
}
@@ -60,7 +59,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
val |= CNTHCTL_EL1PCTEN;
write_sysreg(val, cnthctl_el2);
- if (kvm->arch.timer.enabled) {
+ if (timer->enabled) {
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
write_sysreg_el0(timer->cntv_cval, cntv_cval);
isb();
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
index 674bdf8ecf4f..a3f12b3b277b 100644
--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -21,11 +21,18 @@
#include <asm/kvm_hyp.h>
+#ifdef CONFIG_KVM_NEW_VGIC
+extern struct vgic_global kvm_vgic_global_state;
+#define vgic_v2_params kvm_vgic_global_state
+#else
+extern struct vgic_params vgic_v2_params;
+#endif
+
static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
void __iomem *base)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+ int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
u32 eisr0, eisr1;
int i;
bool expect_mi;
@@ -67,7 +74,7 @@ static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+ int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
u32 elrsr0, elrsr1;
elrsr0 = readl_relaxed(base + GICH_ELRSR0);
@@ -86,7 +93,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+ int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
int i;
for (i = 0; i < nr_lr; i++) {
@@ -141,13 +148,13 @@ void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
struct vgic_dist *vgic = &kvm->arch.vgic;
void __iomem *base = kern_hyp_va(vgic->vctrl_base);
- int i, nr_lr;
+ int nr_lr = (kern_hyp_va(&vgic_v2_params))->nr_lr;
+ int i;
u64 live_lrs = 0;
if (!base)
return;
- nr_lr = vcpu->arch.vgic_cpu.nr_lr;
for (i = 0; i < nr_lr; i++)
if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 575c7aa30d7e..a027569facfa 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -436,7 +436,14 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
return 0;
}
-static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi)
+#define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS)
+
+/*
+ * For one VM the interrupt type must be same for each vcpu.
+ * As a PPI, the interrupt number is the same for all vcpus,
+ * while as an SPI it must be a separate number per vcpu.
+ */
+static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
{
int i;
struct kvm_vcpu *vcpu;
@@ -445,7 +452,7 @@ static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi)
if (!kvm_arm_pmu_irq_initialized(vcpu))
continue;
- if (is_ppi) {
+ if (irq_is_ppi(irq)) {
if (vcpu->arch.pmu.irq_num != irq)
return false;
} else {
@@ -457,7 +464,6 @@ static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi)
return true;
}
-
int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
{
switch (attr->attr) {
@@ -471,14 +477,11 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
if (get_user(irq, uaddr))
return -EFAULT;
- /*
- * The PMU overflow interrupt could be a PPI or SPI, but for one
- * VM the interrupt type must be same for each vcpu. As a PPI,
- * the interrupt number is the same for all vcpus, while as an
- * SPI it must be a separate number per vcpu.
- */
- if (irq < VGIC_NR_SGIS || irq >= vcpu->kvm->arch.vgic.nr_irqs ||
- !irq_is_valid(vcpu->kvm, irq, irq < VGIC_NR_PRIVATE_IRQS))
+ /* The PMU overflow interrupt can be a PPI or a valid SPI. */
+ if (!(irq_is_ppi(irq) || vgic_valid_spi(vcpu->kvm, irq)))
+ return -EINVAL;
+
+ if (!pmu_irq_is_valid(vcpu->kvm, irq))
return -EINVAL;
if (kvm_arm_pmu_irq_initialized(vcpu))
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index 7e826c9b2b0a..334cd7a89106 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -171,7 +171,7 @@ static const struct vgic_ops vgic_v2_ops = {
.enable = vgic_v2_enable,
};
-static struct vgic_params vgic_v2_params;
+struct vgic_params __section(.hyp.text) vgic_v2_params;
static void vgic_cpu_init_lrs(void *params)
{
@@ -201,6 +201,8 @@ int vgic_v2_probe(const struct gic_kvm_info *gic_kvm_info,
const struct resource *vctrl_res = &gic_kvm_info->vctrl;
const struct resource *vcpu_res = &gic_kvm_info->vcpu;
+ memset(vgic, 0, sizeof(*vgic));
+
if (!gic_kvm_info->maint_irq) {
kvm_err("error getting vgic maintenance irq\n");
ret = -ENXIO;
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index c02a1b1cf855..75b02fa86436 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -29,12 +29,6 @@
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
-/* These are for GICv2 emulation only */
-#define GICH_LR_VIRTUALID (0x3ffUL << 0)
-#define GICH_LR_PHYSID_CPUID_SHIFT (10)
-#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
-#define ICH_LR_VIRTUALID_MASK (BIT_ULL(32) - 1)
-
static u32 ich_vtr_el2;
static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
@@ -43,7 +37,7 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr];
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
- lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
+ lr_desc.irq = val & ICH_LR_VIRTUAL_ID_MASK;
else
lr_desc.irq = val & GICH_LR_VIRTUALID;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 60668a7f319a..c3bfbb981e73 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -690,12 +690,11 @@ bool vgic_handle_cfg_reg(u32 *reg, struct kvm_exit_mmio *mmio,
*/
void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
{
- struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
u64 elrsr = vgic_get_elrsr(vcpu);
unsigned long *elrsr_ptr = u64_to_bitmask(&elrsr);
int i;
- for_each_clear_bit(i, elrsr_ptr, vgic_cpu->nr_lr) {
+ for_each_clear_bit(i, elrsr_ptr, vgic->nr_lr) {
struct vgic_lr lr = vgic_get_lr(vcpu, i);
/*
@@ -820,7 +819,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_io_device *iodev = container_of(this,
struct vgic_io_device, dev);
- struct kvm_run *run = vcpu->run;
const struct vgic_io_range *range;
struct kvm_exit_mmio mmio;
bool updated_state;
@@ -849,12 +847,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
updated_state = false;
}
spin_unlock(&dist->lock);
- run->mmio.is_write = is_write;
- run->mmio.len = len;
- run->mmio.phys_addr = addr;
- memcpy(run->mmio.data, val, len);
-
- kvm_handle_mmio_return(vcpu, run);
if (updated_state)
vgic_kick_vcpus(vcpu->kvm);
@@ -1102,18 +1094,18 @@ static bool dist_active_irq(struct kvm_vcpu *vcpu)
return test_bit(vcpu->vcpu_id, dist->irq_active_on_cpu);
}
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{
int i;
- for (i = 0; i < vcpu->arch.vgic_cpu.nr_lr; i++) {
+ for (i = 0; i < vgic->nr_lr; i++) {
struct vgic_lr vlr = vgic_get_lr(vcpu, i);
- if (vlr.irq == map->virt_irq && vlr.state & LR_STATE_ACTIVE)
+ if (vlr.irq == virt_irq && vlr.state & LR_STATE_ACTIVE)
return true;
}
- return vgic_irq_is_active(vcpu, map->virt_irq);
+ return vgic_irq_is_active(vcpu, virt_irq);
}
/*
@@ -1521,7 +1513,6 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
}
static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
- struct irq_phys_map *map,
unsigned int irq_num, bool level)
{
struct vgic_dist *dist = &kvm->arch.vgic;
@@ -1660,14 +1651,14 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
if (map)
return -EINVAL;
- return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level);
+ return vgic_update_irq_pending(kvm, cpuid, irq_num, level);
}
/**
* kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
* @kvm: The VM structure pointer
* @cpuid: The CPU for PPIs
- * @map: Pointer to a irq_phys_map structure describing the mapping
+ * @virt_irq: The virtual IRQ to be injected
* @level: Edge-triggered: true: to trigger the interrupt
* false: to ignore the call
* Level-sensitive true: raise the input signal
@@ -1678,7 +1669,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
* being HIGH and 0 being LOW and all devices being active-HIGH.
*/
int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
- struct irq_phys_map *map, bool level)
+ unsigned int virt_irq, bool level)
{
int ret;
@@ -1686,7 +1677,7 @@ int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
if (ret)
return ret;
- return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level);
+ return vgic_update_irq_pending(kvm, cpuid, virt_irq, level);
}
static irqreturn_t vgic_maintenance_handler(int irq, void *data)
@@ -1712,43 +1703,28 @@ static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
/**
* kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
* @vcpu: The VCPU pointer
- * @virt_irq: The virtual irq number
- * @irq: The Linux IRQ number
+ * @virt_irq: The virtual IRQ number for the guest
+ * @phys_irq: The hardware IRQ number of the host
*
* Establish a mapping between a guest visible irq (@virt_irq) and a
- * Linux irq (@irq). On injection, @virt_irq will be associated with
- * the physical interrupt represented by @irq. This mapping can be
+ * hardware irq (@phys_irq). On injection, @virt_irq will be associated with
+ * the physical interrupt represented by @phys_irq. This mapping can be
* established multiple times as long as the parameters are the same.
*
- * Returns a valid pointer on success, and an error pointer otherwise
+ * Returns 0 on success or an error value otherwise.
*/
-struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
- int virt_irq, int irq)
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, int virt_irq, int phys_irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
struct irq_phys_map *map;
struct irq_phys_map_entry *entry;
- struct irq_desc *desc;
- struct irq_data *data;
- int phys_irq;
-
- desc = irq_to_desc(irq);
- if (!desc) {
- kvm_err("%s: no interrupt descriptor\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- data = irq_desc_get_irq_data(desc);
- while (data->parent_data)
- data = data->parent_data;
-
- phys_irq = data->hwirq;
+ int ret = 0;
/* Create a new mapping */
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
spin_lock(&dist->irq_phys_map_lock);
@@ -1756,9 +1732,8 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
map = vgic_irq_map_search(vcpu, virt_irq);
if (map) {
/* Make sure this mapping matches */
- if (map->phys_irq != phys_irq ||
- map->irq != irq)
- map = ERR_PTR(-EINVAL);
+ if (map->phys_irq != phys_irq)
+ ret = -EINVAL;
/* Found an existing, valid mapping */
goto out;
@@ -1767,7 +1742,6 @@ struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
map = &entry->map;
map->virt_irq = virt_irq;
map->phys_irq = phys_irq;
- map->irq = irq;
list_add_tail_rcu(&entry->entry, root);
@@ -1775,9 +1749,9 @@ out:
spin_unlock(&dist->irq_phys_map_lock);
/* If we've found a hit in the existing list, free the useless
* entry */
- if (IS_ERR(map) || map != &entry->map)
+ if (ret || map != &entry->map)
kfree(entry);
- return map;
+ return ret;
}
static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
@@ -1813,25 +1787,22 @@ static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
/**
* kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
* @vcpu: The VCPU pointer
- * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
+ * @virt_irq: The virtual IRQ number to be unmapped
*
* Remove an existing mapping between virtual and physical interrupts.
*/
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct irq_phys_map_entry *entry;
struct list_head *root;
- if (!map)
- return -EINVAL;
-
- root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
+ root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
spin_lock(&dist->irq_phys_map_lock);
list_for_each_entry(entry, root, entry) {
- if (&entry->map == map) {
+ if (entry->map.virt_irq == virt_irq) {
list_del_rcu(&entry->entry);
call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
break;
@@ -1887,13 +1858,6 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
return -ENOMEM;
}
- /*
- * Store the number of LRs per vcpu, so we don't have to go
- * all the way to the distributor structure to find out. Only
- * assembly code should use this one.
- */
- vgic_cpu->nr_lr = vgic->nr_lr;
-
return 0;
}
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
new file mode 100644
index 000000000000..a1442f7c9c4d
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -0,0 +1,452 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/cpu.h>
+#include <linux/kvm_host.h>
+#include <kvm/arm_vgic.h>
+#include <asm/kvm_mmu.h>
+#include "vgic.h"
+
+/*
+ * Initialization rules: there are multiple stages to the vgic
+ * initialization, both for the distributor and the CPU interfaces.
+ *
+ * Distributor:
+ *
+ * - kvm_vgic_early_init(): initialization of static data that doesn't
+ * depend on any sizing information or emulation type. No allocation
+ * is allowed there.
+ *
+ * - vgic_init(): allocation and initialization of the generic data
+ * structures that depend on sizing information (number of CPUs,
+ * number of interrupts). Also initializes the vcpu specific data
+ * structures. Can be executed lazily for GICv2.
+ *
+ * CPU Interface:
+ *
+ * - kvm_vgic_cpu_early_init(): initialization of static data that
+ * doesn't depend on any sizing information or emulation type. No
+ * allocation is allowed there.
+ */
+
+/* EARLY INIT */
+
+/*
+ * Those 2 functions should not be needed anymore but they
+ * still are called from arm.c
+ */
+void kvm_vgic_early_init(struct kvm *kvm)
+{
+}
+
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
+{
+}
+
+/* CREATION */
+
+/**
+ * kvm_vgic_create: triggered by the instantiation of the VGIC device by
+ * user space, either through the legacy KVM_CREATE_IRQCHIP ioctl (v2 only)
+ * or through the generic KVM_CREATE_DEVICE API ioctl.
+ * irqchip_in_kernel() tells you if this function succeeded or not.
+ * @kvm: kvm struct pointer
+ * @type: KVM_DEV_TYPE_ARM_VGIC_V[23]
+ */
+int kvm_vgic_create(struct kvm *kvm, u32 type)
+{
+ int i, vcpu_lock_idx = -1, ret;
+ struct kvm_vcpu *vcpu;
+
+ mutex_lock(&kvm->lock);
+
+ if (irqchip_in_kernel(kvm)) {
+ ret = -EEXIST;
+ goto out;
+ }
+
+ /*
+ * This function is also called by the KVM_CREATE_IRQCHIP handler,
+ * which had no chance yet to check the availability of the GICv2
+ * emulation. So check this here again. KVM_CREATE_DEVICE does
+ * the proper checks already.
+ */
+ if (type == KVM_DEV_TYPE_ARM_VGIC_V2 &&
+ !kvm_vgic_global_state.can_emulate_gicv2) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /*
+ * Any time a vcpu is run, vcpu_load is called which tries to grab the
+ * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
+ * that no other VCPUs are run while we create the vgic.
+ */
+ ret = -EBUSY;
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!mutex_trylock(&vcpu->mutex))
+ goto out_unlock;
+ vcpu_lock_idx = i;
+ }
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu->arch.has_run_once)
+ goto out_unlock;
+ }
+ ret = 0;
+
+ if (type == KVM_DEV_TYPE_ARM_VGIC_V2)
+ kvm->arch.max_vcpus = VGIC_V2_MAX_CPUS;
+ else
+ kvm->arch.max_vcpus = VGIC_V3_MAX_CPUS;
+
+ if (atomic_read(&kvm->online_vcpus) > kvm->arch.max_vcpus) {
+ ret = -E2BIG;
+ goto out_unlock;
+ }
+
+ kvm->arch.vgic.in_kernel = true;
+ kvm->arch.vgic.vgic_model = type;
+
+ /*
+ * kvm_vgic_global_state.vctrl_base is set on vgic probe (kvm_arch_init)
+ * it is stored in distributor struct for asm save/restore purpose
+ */
+ kvm->arch.vgic.vctrl_base = kvm_vgic_global_state.vctrl_base;
+
+ kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
+ kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+ kvm->arch.vgic.vgic_redist_base = VGIC_ADDR_UNDEF;
+
+out_unlock:
+ for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+ vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+ mutex_unlock(&vcpu->mutex);
+ }
+
+out:
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+/* INIT/DESTROY */
+
+/**
+ * kvm_vgic_dist_init: initialize the dist data structures
+ * @kvm: kvm struct pointer
+ * @nr_spis: number of spis, frozen by caller
+ */
+static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
+ int i;
+
+ dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL);
+ if (!dist->spis)
+ return -ENOMEM;
+
+ /*
+ * In the following code we do not take the irq struct lock since
+ * no other action on irq structs can happen while the VGIC is
+ * not initialized yet:
+ * If someone wants to inject an interrupt or does a MMIO access, we
+ * require prior initialization in case of a virtual GICv3 or trigger
+ * initialization when using a virtual GICv2.
+ */
+ for (i = 0; i < nr_spis; i++) {
+ struct vgic_irq *irq = &dist->spis[i];
+
+ irq->intid = i + VGIC_NR_PRIVATE_IRQS;
+ INIT_LIST_HEAD(&irq->ap_list);
+ spin_lock_init(&irq->irq_lock);
+ irq->vcpu = NULL;
+ irq->target_vcpu = vcpu0;
+ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
+ irq->targets = 0;
+ else
+ irq->mpidr = 0;
+ }
+ return 0;
+}
+
+/**
+ * kvm_vgic_vcpu_init: initialize the vcpu data structures and
+ * enable the VCPU interface
+ * @vcpu: the VCPU which's VGIC should be initialized
+ */
+static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ int i;
+
+ INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+ spin_lock_init(&vgic_cpu->ap_list_lock);
+
+ /*
+ * Enable and configure all SGIs to be edge-triggered and
+ * configure all PPIs as level-triggered.
+ */
+ for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) {
+ struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
+
+ INIT_LIST_HEAD(&irq->ap_list);
+ spin_lock_init(&irq->irq_lock);
+ irq->intid = i;
+ irq->vcpu = NULL;
+ irq->target_vcpu = vcpu;
+ irq->targets = 1U << vcpu->vcpu_id;
+ if (vgic_irq_is_sgi(i)) {
+ /* SGIs */
+ irq->enabled = 1;
+ irq->config = VGIC_CONFIG_EDGE;
+ } else {
+ /* PPIs */
+ irq->config = VGIC_CONFIG_LEVEL;
+ }
+ }
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_enable(vcpu);
+ else
+ vgic_v3_enable(vcpu);
+}
+
+/*
+ * vgic_init: allocates and initializes dist and vcpu data structures
+ * depending on two dimensioning parameters:
+ * - the number of spis
+ * - the number of vcpus
+ * The function is generally called when nr_spis has been explicitly set
+ * by the guest through the KVM DEVICE API. If not nr_spis is set to 256.
+ * vgic_initialized() returns true when this function has succeeded.
+ * Must be called with kvm->lock held!
+ */
+int vgic_init(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct kvm_vcpu *vcpu;
+ int ret = 0, i;
+
+ if (vgic_initialized(kvm))
+ return 0;
+
+ /* freeze the number of spis */
+ if (!dist->nr_spis)
+ dist->nr_spis = VGIC_NR_IRQS_LEGACY - VGIC_NR_PRIVATE_IRQS;
+
+ ret = kvm_vgic_dist_init(kvm, dist->nr_spis);
+ if (ret)
+ goto out;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vgic_vcpu_init(vcpu);
+
+ dist->initialized = true;
+out:
+ return ret;
+}
+
+static void kvm_vgic_dist_destroy(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ mutex_lock(&kvm->lock);
+
+ dist->ready = false;
+ dist->initialized = false;
+
+ kfree(dist->spis);
+ kfree(dist->redist_iodevs);
+ dist->nr_spis = 0;
+
+ mutex_unlock(&kvm->lock);
+}
+
+void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+ INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
+}
+
+void kvm_vgic_destroy(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_vgic_dist_destroy(kvm);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vgic_vcpu_destroy(vcpu);
+}
+
+/**
+ * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
+ * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
+ * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
+ * @kvm: kvm struct pointer
+ */
+int vgic_lazy_init(struct kvm *kvm)
+{
+ int ret = 0;
+
+ if (unlikely(!vgic_initialized(kvm))) {
+ /*
+ * We only provide the automatic initialization of the VGIC
+ * for the legacy case of a GICv2. Any other type must
+ * be explicitly initialized once setup with the respective
+ * KVM device call.
+ */
+ if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
+ return -EBUSY;
+
+ mutex_lock(&kvm->lock);
+ ret = vgic_init(kvm);
+ mutex_unlock(&kvm->lock);
+ }
+
+ return ret;
+}
+
+/* RESOURCE MAPPING */
+
+/**
+ * Map the MMIO regions depending on the VGIC model exposed to the guest
+ * called on the first VCPU run.
+ * Also map the virtual CPU interface into the VM.
+ * v2/v3 derivatives call vgic_init if not already done.
+ * vgic_ready() returns true if this function has succeeded.
+ * @kvm: kvm struct pointer
+ */
+int kvm_vgic_map_resources(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int ret = 0;
+
+ mutex_lock(&kvm->lock);
+ if (!irqchip_in_kernel(kvm))
+ goto out;
+
+ if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
+ ret = vgic_v2_map_resources(kvm);
+ else
+ ret = vgic_v3_map_resources(kvm);
+out:
+ mutex_unlock(&kvm->lock);
+ return ret;
+}
+
+/* GENERIC PROBE */
+
+static void vgic_init_maintenance_interrupt(void *info)
+{
+ enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
+}
+
+static int vgic_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *cpu)
+{
+ switch (action) {
+ case CPU_STARTING:
+ case CPU_STARTING_FROZEN:
+ vgic_init_maintenance_interrupt(NULL);
+ break;
+ case CPU_DYING:
+ case CPU_DYING_FROZEN:
+ disable_percpu_irq(kvm_vgic_global_state.maint_irq);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block vgic_cpu_nb = {
+ .notifier_call = vgic_cpu_notify,
+};
+
+static irqreturn_t vgic_maintenance_handler(int irq, void *data)
+{
+ /*
+ * We cannot rely on the vgic maintenance interrupt to be
+ * delivered synchronously. This means we can only use it to
+ * exit the VM, and we perform the handling of EOIed
+ * interrupts on the exit path (see vgic_process_maintenance).
+ */
+ return IRQ_HANDLED;
+}
+
+/**
+ * kvm_vgic_hyp_init: populates the kvm_vgic_global_state variable
+ * according to the host GIC model. Accordingly calls either
+ * vgic_v2/v3_probe which registers the KVM_DEVICE that can be
+ * instantiated by a guest later on .
+ */
+int kvm_vgic_hyp_init(void)
+{
+ const struct gic_kvm_info *gic_kvm_info;
+ int ret;
+
+ gic_kvm_info = gic_get_kvm_info();
+ if (!gic_kvm_info)
+ return -ENODEV;
+
+ if (!gic_kvm_info->maint_irq) {
+ kvm_err("No vgic maintenance irq\n");
+ return -ENXIO;
+ }
+
+ switch (gic_kvm_info->type) {
+ case GIC_V2:
+ ret = vgic_v2_probe(gic_kvm_info);
+ break;
+ case GIC_V3:
+ ret = vgic_v3_probe(gic_kvm_info);
+ break;
+ default:
+ ret = -ENODEV;
+ };
+
+ if (ret)
+ return ret;
+
+ kvm_vgic_global_state.maint_irq = gic_kvm_info->maint_irq;
+ ret = request_percpu_irq(kvm_vgic_global_state.maint_irq,
+ vgic_maintenance_handler,
+ "vgic", kvm_get_running_vcpus());
+ if (ret) {
+ kvm_err("Cannot register interrupt %d\n",
+ kvm_vgic_global_state.maint_irq);
+ return ret;
+ }
+
+ ret = __register_cpu_notifier(&vgic_cpu_nb);
+ if (ret) {
+ kvm_err("Cannot register vgic CPU notifier\n");
+ goto out_free_irq;
+ }
+
+ on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+
+ kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
+ return 0;
+
+out_free_irq:
+ free_percpu_irq(kvm_vgic_global_state.maint_irq,
+ kvm_get_running_vcpus());
+ return ret;
+}
diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
new file mode 100644
index 000000000000..c675513270bb
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-irqfd.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <trace/events/kvm.h>
+
+int kvm_irq_map_gsi(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *entries,
+ int gsi)
+{
+ return 0;
+}
+
+int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned int irqchip,
+ unsigned int pin)
+{
+ return pin;
+}
+
+int kvm_set_irq(struct kvm *kvm, int irq_source_id,
+ u32 irq, int level, bool line_status)
+{
+ unsigned int spi = irq + VGIC_NR_PRIVATE_IRQS;
+
+ trace_kvm_set_irq(irq, level, irq_source_id);
+
+ BUG_ON(!vgic_initialized(kvm));
+
+ return kvm_vgic_inject_irq(kvm, 0, spi, level);
+}
+
+/* MSI not implemented yet */
+int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm, int irq_source_id,
+ int level, bool line_status)
+{
+ return 0;
+}
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
new file mode 100644
index 000000000000..0130c4b147b7
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -0,0 +1,431 @@
+/*
+ * VGIC: KVM DEVICE API
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kvm_host.h>
+#include <kvm/arm_vgic.h>
+#include <linux/uaccess.h>
+#include <asm/kvm_mmu.h>
+#include "vgic.h"
+
+/* common helpers */
+
+static int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
+ phys_addr_t addr, phys_addr_t alignment)
+{
+ if (addr & ~KVM_PHYS_MASK)
+ return -E2BIG;
+
+ if (!IS_ALIGNED(addr, alignment))
+ return -EINVAL;
+
+ if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
+ return -EEXIST;
+
+ return 0;
+}
+
+/**
+ * kvm_vgic_addr - set or get vgic VM base addresses
+ * @kvm: pointer to the vm struct
+ * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
+ * @addr: pointer to address value
+ * @write: if true set the address in the VM address space, if false read the
+ * address
+ *
+ * Set or get the vgic base addresses for the distributor and the virtual CPU
+ * interface in the VM physical address space. These addresses are properties
+ * of the emulated core/SoC and therefore user space initially knows this
+ * information.
+ * Check them for sanity (alignment, double assignment). We can't check for
+ * overlapping regions in case of a virtual GICv3 here, since we don't know
+ * the number of VCPUs yet, so we defer this check to map_resources().
+ */
+int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
+{
+ int r = 0;
+ struct vgic_dist *vgic = &kvm->arch.vgic;
+ int type_needed;
+ phys_addr_t *addr_ptr, alignment;
+
+ mutex_lock(&kvm->lock);
+ switch (type) {
+ case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
+ addr_ptr = &vgic->vgic_dist_base;
+ alignment = SZ_4K;
+ break;
+ case KVM_VGIC_V2_ADDR_TYPE_CPU:
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V2;
+ addr_ptr = &vgic->vgic_cpu_base;
+ alignment = SZ_4K;
+ break;
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+ case KVM_VGIC_V3_ADDR_TYPE_DIST:
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
+ addr_ptr = &vgic->vgic_dist_base;
+ alignment = SZ_64K;
+ break;
+ case KVM_VGIC_V3_ADDR_TYPE_REDIST:
+ type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
+ addr_ptr = &vgic->vgic_redist_base;
+ alignment = SZ_64K;
+ break;
+#endif
+ default:
+ r = -ENODEV;
+ goto out;
+ }
+
+ if (vgic->vgic_model != type_needed) {
+ r = -ENODEV;
+ goto out;
+ }
+
+ if (write) {
+ r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
+ if (!r)
+ *addr_ptr = *addr;
+ } else {
+ *addr = *addr_ptr;
+ }
+
+out:
+ mutex_unlock(&kvm->lock);
+ return r;
+}
+
+static int vgic_set_common_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int r;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR: {
+ u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+ u64 addr;
+ unsigned long type = (unsigned long)attr->attr;
+
+ if (copy_from_user(&addr, uaddr, sizeof(addr)))
+ return -EFAULT;
+
+ r = kvm_vgic_addr(dev->kvm, type, &addr, true);
+ return (r == -ENODEV) ? -ENXIO : r;
+ }
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 val;
+ int ret = 0;
+
+ if (get_user(val, uaddr))
+ return -EFAULT;
+
+ /*
+ * We require:
+ * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
+ * - at most 1024 interrupts
+ * - a multiple of 32 interrupts
+ */
+ if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
+ val > VGIC_MAX_RESERVED ||
+ (val & 31))
+ return -EINVAL;
+
+ mutex_lock(&dev->kvm->lock);
+
+ if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
+ ret = -EBUSY;
+ else
+ dev->kvm->arch.vgic.nr_spis =
+ val - VGIC_NR_PRIVATE_IRQS;
+
+ mutex_unlock(&dev->kvm->lock);
+
+ return ret;
+ }
+ case KVM_DEV_ARM_VGIC_GRP_CTRL: {
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ mutex_lock(&dev->kvm->lock);
+ r = vgic_init(dev->kvm);
+ mutex_unlock(&dev->kvm->lock);
+ return r;
+ }
+ break;
+ }
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_get_common_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int r = -ENXIO;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR: {
+ u64 __user *uaddr = (u64 __user *)(long)attr->addr;
+ u64 addr;
+ unsigned long type = (unsigned long)attr->attr;
+
+ r = kvm_vgic_addr(dev->kvm, type, &addr, false);
+ if (r)
+ return (r == -ENODEV) ? -ENXIO : r;
+
+ if (copy_to_user(uaddr, &addr, sizeof(addr)))
+ return -EFAULT;
+ break;
+ }
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+
+ r = put_user(dev->kvm->arch.vgic.nr_spis +
+ VGIC_NR_PRIVATE_IRQS, uaddr);
+ break;
+ }
+ }
+
+ return r;
+}
+
+static int vgic_create(struct kvm_device *dev, u32 type)
+{
+ return kvm_vgic_create(dev->kvm, type);
+}
+
+static void vgic_destroy(struct kvm_device *dev)
+{
+ kfree(dev);
+}
+
+void kvm_register_vgic_device(unsigned long type)
+{
+ switch (type) {
+ case KVM_DEV_TYPE_ARM_VGIC_V2:
+ kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
+ KVM_DEV_TYPE_ARM_VGIC_V2);
+ break;
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+ case KVM_DEV_TYPE_ARM_VGIC_V3:
+ kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
+ KVM_DEV_TYPE_ARM_VGIC_V3);
+ break;
+#endif
+ }
+}
+
+/** vgic_attr_regs_access: allows user space to read/write VGIC registers
+ *
+ * @dev: kvm device handle
+ * @attr: kvm device attribute
+ * @reg: address the value is read or written
+ * @is_write: write flag
+ *
+ */
+static int vgic_attr_regs_access(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ u32 *reg, bool is_write)
+{
+ gpa_t addr;
+ int cpuid, ret, c;
+ struct kvm_vcpu *vcpu, *tmp_vcpu;
+ int vcpu_lock_idx = -1;
+
+ cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+ KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+ vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+
+ mutex_lock(&dev->kvm->lock);
+
+ ret = vgic_init(dev->kvm);
+ if (ret)
+ goto out;
+
+ if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Any time a vcpu is run, vcpu_load is called which tries to grab the
+ * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
+ * that no other VCPUs are run and fiddle with the vgic state while we
+ * access it.
+ */
+ ret = -EBUSY;
+ kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
+ if (!mutex_trylock(&tmp_vcpu->mutex))
+ goto out;
+ vcpu_lock_idx = c;
+ }
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+ tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
+ mutex_unlock(&tmp_vcpu->mutex);
+ }
+
+ mutex_unlock(&dev->kvm->lock);
+ return ret;
+}
+
+/* V2 ops */
+
+static int vgic_v2_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_set_common_attr(dev, attr);
+ if (ret != -ENXIO)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 reg;
+
+ if (get_user(reg, uaddr))
+ return -EFAULT;
+
+ return vgic_attr_regs_access(dev, attr, &reg, true);
+ }
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v2_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ ret = vgic_get_common_attr(dev, attr);
+ if (ret != -ENXIO)
+ return ret;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
+ u32 __user *uaddr = (u32 __user *)(long)attr->addr;
+ u32 reg = 0;
+
+ ret = vgic_attr_regs_access(dev, attr, &reg, false);
+ if (ret)
+ return ret;
+ return put_user(reg, uaddr);
+ }
+ }
+
+ return -ENXIO;
+}
+
+static int vgic_v2_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_V2_ADDR_TYPE_DIST:
+ case KVM_VGIC_V2_ADDR_TYPE_CPU:
+ return 0;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ return vgic_v2_has_attr_regs(dev, attr);
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ return 0;
+ case KVM_DEV_ARM_VGIC_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ return 0;
+ }
+ }
+ return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v2_ops = {
+ .name = "kvm-arm-vgic-v2",
+ .create = vgic_create,
+ .destroy = vgic_destroy,
+ .set_attr = vgic_v2_set_attr,
+ .get_attr = vgic_v2_get_attr,
+ .has_attr = vgic_v2_has_attr,
+};
+
+/* V3 ops */
+
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+
+static int vgic_v3_set_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ return vgic_set_common_attr(dev, attr);
+}
+
+static int vgic_v3_get_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ return vgic_get_common_attr(dev, attr);
+}
+
+static int vgic_v3_has_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr)
+{
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_ADDR:
+ switch (attr->attr) {
+ case KVM_VGIC_V3_ADDR_TYPE_DIST:
+ case KVM_VGIC_V3_ADDR_TYPE_REDIST:
+ return 0;
+ }
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
+ return 0;
+ case KVM_DEV_ARM_VGIC_GRP_CTRL:
+ switch (attr->attr) {
+ case KVM_DEV_ARM_VGIC_CTRL_INIT:
+ return 0;
+ }
+ }
+ return -ENXIO;
+}
+
+struct kvm_device_ops kvm_arm_vgic_v3_ops = {
+ .name = "kvm-arm-vgic-v3",
+ .create = vgic_create,
+ .destroy = vgic_destroy,
+ .set_attr = vgic_v3_set_attr,
+ .get_attr = vgic_v3_get_attr,
+ .has_attr = vgic_v3_has_attr,
+};
+
+#endif /* CONFIG_KVM_ARM_VGIC_V3 */
+
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c
new file mode 100644
index 000000000000..a21393637e4b
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c
@@ -0,0 +1,446 @@
+/*
+ * VGICv2 MMIO handling functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irqchip/arm-gic.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <kvm/iodev.h>
+#include <kvm/arm_vgic.h>
+
+#include "vgic.h"
+#include "vgic-mmio.h"
+
+static unsigned long vgic_mmio_read_v2_misc(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 value;
+
+ switch (addr & 0x0c) {
+ case GIC_DIST_CTRL:
+ value = vcpu->kvm->arch.vgic.enabled ? GICD_ENABLE : 0;
+ break;
+ case GIC_DIST_CTR:
+ value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+ value = (value >> 5) - 1;
+ value |= (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
+ break;
+ case GIC_DIST_IIDR:
+ value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ break;
+ default:
+ return 0;
+ }
+
+ return value;
+}
+
+static void vgic_mmio_write_v2_misc(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ bool was_enabled = dist->enabled;
+
+ switch (addr & 0x0c) {
+ case GIC_DIST_CTRL:
+ dist->enabled = val & GICD_ENABLE;
+ if (!was_enabled && dist->enabled)
+ vgic_kick_vcpus(vcpu->kvm);
+ break;
+ case GIC_DIST_CTR:
+ case GIC_DIST_IIDR:
+ /* Nothing to do */
+ return;
+ }
+}
+
+static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ int nr_vcpus = atomic_read(&source_vcpu->kvm->online_vcpus);
+ int intid = val & 0xf;
+ int targets = (val >> 16) & 0xff;
+ int mode = (val >> 24) & 0x03;
+ int c;
+ struct kvm_vcpu *vcpu;
+
+ switch (mode) {
+ case 0x0: /* as specified by targets */
+ break;
+ case 0x1:
+ targets = (1U << nr_vcpus) - 1; /* all, ... */
+ targets &= ~(1U << source_vcpu->vcpu_id); /* but self */
+ break;
+ case 0x2: /* this very vCPU only */
+ targets = (1U << source_vcpu->vcpu_id);
+ break;
+ case 0x3: /* reserved */
+ return;
+ }
+
+ kvm_for_each_vcpu(c, vcpu, source_vcpu->kvm) {
+ struct vgic_irq *irq;
+
+ if (!(targets & (1U << c)))
+ continue;
+
+ irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
+
+ spin_lock(&irq->irq_lock);
+ irq->pending = true;
+ irq->source |= 1U << source_vcpu->vcpu_id;
+
+ vgic_queue_irq_unlock(source_vcpu->kvm, irq);
+ }
+}
+
+static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
+ int i;
+ u64 val = 0;
+
+ for (i = 0; i < len; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ val |= (u64)irq->targets << (i * 8);
+ }
+
+ return val;
+}
+
+static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
+ int i;
+
+ /* GICD_ITARGETSR[0-7] are read-only */
+ if (intid < VGIC_NR_PRIVATE_IRQS)
+ return;
+
+ for (i = 0; i < len; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
+ int target;
+
+ spin_lock(&irq->irq_lock);
+
+ irq->targets = (val >> (i * 8)) & 0xff;
+ target = irq->targets ? __ffs(irq->targets) : 0;
+ irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
+
+ spin_unlock(&irq->irq_lock);
+ }
+}
+
+static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 intid = addr & 0x0f;
+ int i;
+ u64 val = 0;
+
+ for (i = 0; i < len; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ val |= (u64)irq->source << (i * 8);
+ }
+ return val;
+}
+
+static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = addr & 0x0f;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ spin_lock(&irq->irq_lock);
+
+ irq->source &= ~((val >> (i * 8)) & 0xff);
+ if (!irq->source)
+ irq->pending = false;
+
+ spin_unlock(&irq->irq_lock);
+ }
+}
+
+static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = addr & 0x0f;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ spin_lock(&irq->irq_lock);
+
+ irq->source |= (val >> (i * 8)) & 0xff;
+
+ if (irq->source) {
+ irq->pending = true;
+ vgic_queue_irq_unlock(vcpu->kvm, irq);
+ } else {
+ spin_unlock(&irq->irq_lock);
+ }
+ }
+}
+
+static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_set_vmcr(vcpu, vmcr);
+ else
+ vgic_v3_set_vmcr(vcpu, vmcr);
+}
+
+static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_get_vmcr(vcpu, vmcr);
+ else
+ vgic_v3_get_vmcr(vcpu, vmcr);
+}
+
+#define GICC_ARCH_VERSION_V2 0x2
+
+/* These are for userland accesses only, there is no guest-facing emulation. */
+static unsigned long vgic_mmio_read_vcpuif(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ struct vgic_vmcr vmcr;
+ u32 val;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+
+ switch (addr & 0xff) {
+ case GIC_CPU_CTRL:
+ val = vmcr.ctlr;
+ break;
+ case GIC_CPU_PRIMASK:
+ val = vmcr.pmr;
+ break;
+ case GIC_CPU_BINPOINT:
+ val = vmcr.bpr;
+ break;
+ case GIC_CPU_ALIAS_BINPOINT:
+ val = vmcr.abpr;
+ break;
+ case GIC_CPU_IDENT:
+ val = ((PRODUCT_ID_KVM << 20) |
+ (GICC_ARCH_VERSION_V2 << 16) |
+ IMPLEMENTER_ARM);
+ break;
+ default:
+ return 0;
+ }
+
+ return val;
+}
+
+static void vgic_mmio_write_vcpuif(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ struct vgic_vmcr vmcr;
+
+ vgic_get_vmcr(vcpu, &vmcr);
+
+ switch (addr & 0xff) {
+ case GIC_CPU_CTRL:
+ vmcr.ctlr = val;
+ break;
+ case GIC_CPU_PRIMASK:
+ vmcr.pmr = val;
+ break;
+ case GIC_CPU_BINPOINT:
+ vmcr.bpr = val;
+ break;
+ case GIC_CPU_ALIAS_BINPOINT:
+ vmcr.abpr = val;
+ break;
+ }
+
+ vgic_set_vmcr(vcpu, &vmcr);
+}
+
+static const struct vgic_register_region vgic_v2_dist_registers[] = {
+ REGISTER_DESC_WITH_LENGTH(GIC_DIST_CTRL,
+ vgic_mmio_read_v2_misc, vgic_mmio_write_v2_misc, 12,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_IGROUP,
+ vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_SET,
+ vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ENABLE_CLEAR,
+ vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_SET,
+ vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PENDING_CLEAR,
+ vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_SET,
+ vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_ACTIVE_CLEAR,
+ vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_PRI,
+ vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_TARGET,
+ vgic_mmio_read_target, vgic_mmio_write_target, 8,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ(GIC_DIST_CONFIG,
+ vgic_mmio_read_config, vgic_mmio_write_config, 2,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GIC_DIST_SOFTINT,
+ vgic_mmio_read_raz, vgic_mmio_write_sgir, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_CLEAR,
+ vgic_mmio_read_sgipend, vgic_mmio_write_sgipendc, 16,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ REGISTER_DESC_WITH_LENGTH(GIC_DIST_SGI_PENDING_SET,
+ vgic_mmio_read_sgipend, vgic_mmio_write_sgipends, 16,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+};
+
+static const struct vgic_register_region vgic_v2_cpu_registers[] = {
+ REGISTER_DESC_WITH_LENGTH(GIC_CPU_CTRL,
+ vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GIC_CPU_PRIMASK,
+ vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GIC_CPU_BINPOINT,
+ vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GIC_CPU_ALIAS_BINPOINT,
+ vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GIC_CPU_ACTIVEPRIO,
+ vgic_mmio_read_raz, vgic_mmio_write_wi, 16,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GIC_CPU_IDENT,
+ vgic_mmio_read_vcpuif, vgic_mmio_write_vcpuif, 4,
+ VGIC_ACCESS_32bit),
+};
+
+unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev)
+{
+ dev->regions = vgic_v2_dist_registers;
+ dev->nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
+
+ kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
+
+ return SZ_4K;
+}
+
+int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
+{
+ int nr_irqs = dev->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+ const struct vgic_register_region *regions;
+ gpa_t addr;
+ int nr_regions, i, len;
+
+ addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+
+ switch (attr->group) {
+ case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+ regions = vgic_v2_dist_registers;
+ nr_regions = ARRAY_SIZE(vgic_v2_dist_registers);
+ break;
+ case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
+ regions = vgic_v2_cpu_registers;
+ nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers);
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ /* We only support aligned 32-bit accesses. */
+ if (addr & 3)
+ return -ENXIO;
+
+ for (i = 0; i < nr_regions; i++) {
+ if (regions[i].bits_per_irq)
+ len = (regions[i].bits_per_irq * nr_irqs) / 8;
+ else
+ len = regions[i].len;
+
+ if (regions[i].reg_offset <= addr &&
+ regions[i].reg_offset + len > addr)
+ return 0;
+ }
+
+ return -ENXIO;
+}
+
+/*
+ * When userland tries to access the VGIC register handlers, we need to
+ * create a usable struct vgic_io_device to be passed to the handlers and we
+ * have to set up a buffer similar to what would have happened if a guest MMIO
+ * access occurred, including doing endian conversions on BE systems.
+ */
+static int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
+ bool is_write, int offset, u32 *val)
+{
+ unsigned int len = 4;
+ u8 buf[4];
+ int ret;
+
+ if (is_write) {
+ vgic_data_host_to_mmio_bus(buf, len, *val);
+ ret = kvm_io_gic_ops.write(vcpu, &dev->dev, offset, len, buf);
+ } else {
+ ret = kvm_io_gic_ops.read(vcpu, &dev->dev, offset, len, buf);
+ if (!ret)
+ *val = vgic_data_mmio_bus_to_host(buf, len);
+ }
+
+ return ret;
+}
+
+int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+ int offset, u32 *val)
+{
+ struct vgic_io_device dev = {
+ .regions = vgic_v2_cpu_registers,
+ .nr_regions = ARRAY_SIZE(vgic_v2_cpu_registers),
+ };
+
+ return vgic_uaccess(vcpu, &dev, is_write, offset, val);
+}
+
+int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+ int offset, u32 *val)
+{
+ struct vgic_io_device dev = {
+ .regions = vgic_v2_dist_registers,
+ .nr_regions = ARRAY_SIZE(vgic_v2_dist_registers),
+ };
+
+ return vgic_uaccess(vcpu, &dev, is_write, offset, val);
+}
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
new file mode 100644
index 000000000000..a0c515a412a7
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -0,0 +1,455 @@
+/*
+ * VGICv3 MMIO handling functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <kvm/iodev.h>
+#include <kvm/arm_vgic.h>
+
+#include <asm/kvm_emulate.h>
+
+#include "vgic.h"
+#include "vgic-mmio.h"
+
+/* extract @num bytes at @offset bytes offset in data */
+static unsigned long extract_bytes(unsigned long data, unsigned int offset,
+ unsigned int num)
+{
+ return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
+}
+
+static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 value = 0;
+
+ switch (addr & 0x0c) {
+ case GICD_CTLR:
+ if (vcpu->kvm->arch.vgic.enabled)
+ value |= GICD_CTLR_ENABLE_SS_G1;
+ value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
+ break;
+ case GICD_TYPER:
+ value = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
+ value = (value >> 5) - 1;
+ value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
+ break;
+ case GICD_IIDR:
+ value = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ break;
+ default:
+ return 0;
+ }
+
+ return value;
+}
+
+static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+ bool was_enabled = dist->enabled;
+
+ switch (addr & 0x0c) {
+ case GICD_CTLR:
+ dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
+
+ if (!was_enabled && dist->enabled)
+ vgic_kick_vcpus(vcpu->kvm);
+ break;
+ case GICD_TYPER:
+ case GICD_IIDR:
+ return;
+ }
+}
+
+static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ int intid = VGIC_ADDR_TO_INTID(addr, 64);
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+
+ if (!irq)
+ return 0;
+
+ /* The upper word is RAZ for us. */
+ if (addr & 4)
+ return 0;
+
+ return extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
+}
+
+static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ int intid = VGIC_ADDR_TO_INTID(addr, 64);
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+
+ if (!irq)
+ return;
+
+ /* The upper word is WI for us since we don't implement Aff3. */
+ if (addr & 4)
+ return;
+
+ spin_lock(&irq->irq_lock);
+
+ /* We only care about and preserve Aff0, Aff1 and Aff2. */
+ irq->mpidr = val & GENMASK(23, 0);
+ irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
+
+ spin_unlock(&irq->irq_lock);
+}
+
+static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+ int target_vcpu_id = vcpu->vcpu_id;
+ u64 value;
+
+ value = (mpidr & GENMASK(23, 0)) << 32;
+ value |= ((target_vcpu_id & 0xffff) << 8);
+ if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
+ value |= GICR_TYPER_LAST;
+
+ return extract_bytes(value, addr & 7, len);
+}
+
+static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+}
+
+static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ switch (addr & 0xffff) {
+ case GICD_PIDR2:
+ /* report a GICv3 compliant implementation */
+ return 0x3b;
+ }
+
+ return 0;
+}
+
+/*
+ * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
+ * redistributors, while SPIs are covered by registers in the distributor
+ * block. Trying to set private IRQs in this block gets ignored.
+ * We take some special care here to fix the calculation of the register
+ * offset.
+ */
+#define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, bpi, acc) \
+ { \
+ .reg_offset = off, \
+ .bits_per_irq = bpi, \
+ .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
+ .access_flags = acc, \
+ .read = vgic_mmio_read_raz, \
+ .write = vgic_mmio_write_wi, \
+ }, { \
+ .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \
+ .bits_per_irq = bpi, \
+ .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \
+ .access_flags = acc, \
+ .read = rd, \
+ .write = wr, \
+ }
+
+static const struct vgic_register_region vgic_v3_dist_registers[] = {
+ REGISTER_DESC_WITH_LENGTH(GICD_CTLR,
+ vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, 16,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
+ vgic_mmio_read_rao, vgic_mmio_write_wi, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
+ vgic_mmio_read_enable, vgic_mmio_write_senable, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
+ vgic_mmio_read_enable, vgic_mmio_write_cenable, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
+ vgic_mmio_read_pending, vgic_mmio_write_spending, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
+ vgic_mmio_read_pending, vgic_mmio_write_cpending, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
+ vgic_mmio_read_active, vgic_mmio_write_sactive, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
+ vgic_mmio_read_active, vgic_mmio_write_cactive, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
+ vgic_mmio_read_priority, vgic_mmio_write_priority, 8,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
+ vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
+ vgic_mmio_read_config, vgic_mmio_write_config, 2,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
+ vgic_mmio_read_raz, vgic_mmio_write_wi, 1,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
+ vgic_mmio_read_irouter, vgic_mmio_write_irouter, 64,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
+ vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
+ VGIC_ACCESS_32bit),
+};
+
+static const struct vgic_register_region vgic_v3_rdbase_registers[] = {
+ REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
+ vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
+ vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
+ vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
+ vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
+ vgic_mmio_read_raz, vgic_mmio_write_wi, 8,
+ VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
+ vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
+ VGIC_ACCESS_32bit),
+};
+
+static const struct vgic_register_region vgic_v3_sgibase_registers[] = {
+ REGISTER_DESC_WITH_LENGTH(GICR_IGROUPR0,
+ vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_ISENABLER0,
+ vgic_mmio_read_enable, vgic_mmio_write_senable, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_ICENABLER0,
+ vgic_mmio_read_enable, vgic_mmio_write_cenable, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_ISPENDR0,
+ vgic_mmio_read_pending, vgic_mmio_write_spending, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_ICPENDR0,
+ vgic_mmio_read_pending, vgic_mmio_write_cpending, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_ISACTIVER0,
+ vgic_mmio_read_active, vgic_mmio_write_sactive, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_ICACTIVER0,
+ vgic_mmio_read_active, vgic_mmio_write_cactive, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_IPRIORITYR0,
+ vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
+ VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_ICFGR0,
+ vgic_mmio_read_config, vgic_mmio_write_config, 8,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_IGRPMODR0,
+ vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+ VGIC_ACCESS_32bit),
+ REGISTER_DESC_WITH_LENGTH(GICR_NSACR,
+ vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+ VGIC_ACCESS_32bit),
+};
+
+unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
+{
+ dev->regions = vgic_v3_dist_registers;
+ dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
+
+ kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
+
+ return SZ_64K;
+}
+
+int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t redist_base_address)
+{
+ int nr_vcpus = atomic_read(&kvm->online_vcpus);
+ struct kvm_vcpu *vcpu;
+ struct vgic_io_device *devices;
+ int c, ret = 0;
+
+ devices = kmalloc(sizeof(struct vgic_io_device) * nr_vcpus * 2,
+ GFP_KERNEL);
+ if (!devices)
+ return -ENOMEM;
+
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ gpa_t rd_base = redist_base_address + c * SZ_64K * 2;
+ gpa_t sgi_base = rd_base + SZ_64K;
+ struct vgic_io_device *rd_dev = &devices[c * 2];
+ struct vgic_io_device *sgi_dev = &devices[c * 2 + 1];
+
+ kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
+ rd_dev->base_addr = rd_base;
+ rd_dev->regions = vgic_v3_rdbase_registers;
+ rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rdbase_registers);
+ rd_dev->redist_vcpu = vcpu;
+
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
+ SZ_64K, &rd_dev->dev);
+ mutex_unlock(&kvm->slots_lock);
+
+ if (ret)
+ break;
+
+ kvm_iodevice_init(&sgi_dev->dev, &kvm_io_gic_ops);
+ sgi_dev->base_addr = sgi_base;
+ sgi_dev->regions = vgic_v3_sgibase_registers;
+ sgi_dev->nr_regions = ARRAY_SIZE(vgic_v3_sgibase_registers);
+ sgi_dev->redist_vcpu = vcpu;
+
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, sgi_base,
+ SZ_64K, &sgi_dev->dev);
+ mutex_unlock(&kvm->slots_lock);
+ if (ret) {
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
+ &rd_dev->dev);
+ break;
+ }
+ }
+
+ if (ret) {
+ /* The current c failed, so we start with the previous one. */
+ for (c--; c >= 0; c--) {
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
+ &devices[c * 2].dev);
+ kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS,
+ &devices[c * 2 + 1].dev);
+ }
+ kfree(devices);
+ } else {
+ kvm->arch.vgic.redist_iodevs = devices;
+ }
+
+ return ret;
+}
+
+/*
+ * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
+ * generation register ICC_SGI1R_EL1) with a given VCPU.
+ * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
+ * return -1.
+ */
+static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
+{
+ unsigned long affinity;
+ int level0;
+
+ /*
+ * Split the current VCPU's MPIDR into affinity level 0 and the
+ * rest as this is what we have to compare against.
+ */
+ affinity = kvm_vcpu_get_mpidr_aff(vcpu);
+ level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
+ affinity &= ~MPIDR_LEVEL_MASK;
+
+ /* bail out if the upper three levels don't match */
+ if (sgi_aff != affinity)
+ return -1;
+
+ /* Is this VCPU's bit set in the mask ? */
+ if (!(sgi_cpu_mask & BIT(level0)))
+ return -1;
+
+ return level0;
+}
+
+/*
+ * The ICC_SGI* registers encode the affinity differently from the MPIDR,
+ * so provide a wrapper to use the existing defines to isolate a certain
+ * affinity level.
+ */
+#define SGI_AFFINITY_LEVEL(reg, level) \
+ ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
+ >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
+
+/**
+ * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
+ * @vcpu: The VCPU requesting a SGI
+ * @reg: The value written into the ICC_SGI1R_EL1 register by that VCPU
+ *
+ * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
+ * This will trap in sys_regs.c and call this function.
+ * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
+ * target processors as well as a bitmask of 16 Aff0 CPUs.
+ * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
+ * check for matching ones. If this bit is set, we signal all, but not the
+ * calling VCPU.
+ */
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *c_vcpu;
+ u16 target_cpus;
+ u64 mpidr;
+ int sgi, c;
+ int vcpu_id = vcpu->vcpu_id;
+ bool broadcast;
+
+ sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
+ broadcast = reg & BIT(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
+ target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
+ mpidr = SGI_AFFINITY_LEVEL(reg, 3);
+ mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
+ mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
+
+ /*
+ * We iterate over all VCPUs to find the MPIDRs matching the request.
+ * If we have handled one CPU, we clear its bit to detect early
+ * if we are already finished. This avoids iterating through all
+ * VCPUs when most of the times we just signal a single VCPU.
+ */
+ kvm_for_each_vcpu(c, c_vcpu, kvm) {
+ struct vgic_irq *irq;
+
+ /* Exit early if we have dealt with all requested CPUs */
+ if (!broadcast && target_cpus == 0)
+ break;
+
+ /* Don't signal the calling VCPU */
+ if (broadcast && c == vcpu_id)
+ continue;
+
+ if (!broadcast) {
+ int level0;
+
+ level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
+ if (level0 == -1)
+ continue;
+
+ /* remove this matching VCPU from the mask */
+ target_cpus &= ~BIT(level0);
+ }
+
+ irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
+
+ spin_lock(&irq->irq_lock);
+ irq->pending = true;
+
+ vgic_queue_irq_unlock(vcpu->kvm, irq);
+ }
+}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
new file mode 100644
index 000000000000..059595ec3da0
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -0,0 +1,526 @@
+/*
+ * VGIC MMIO handling functions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/bsearch.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <kvm/iodev.h>
+#include <kvm/arm_vgic.h>
+
+#include "vgic.h"
+#include "vgic-mmio.h"
+
+unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ return 0;
+}
+
+unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ return -1UL;
+}
+
+void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
+ unsigned int len, unsigned long val)
+{
+ /* Ignore */
+}
+
+/*
+ * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
+ * of the enabled bit, so there is only one function for both here.
+ */
+unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ u32 value = 0;
+ int i;
+
+ /* Loop over all IRQs affected by this read */
+ for (i = 0; i < len * 8; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ if (irq->enabled)
+ value |= (1U << i);
+ }
+
+ return value;
+}
+
+void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+
+ for_each_set_bit(i, &val, len * 8) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ spin_lock(&irq->irq_lock);
+ irq->enabled = true;
+ vgic_queue_irq_unlock(vcpu->kvm, irq);
+ }
+}
+
+void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+
+ for_each_set_bit(i, &val, len * 8) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ spin_lock(&irq->irq_lock);
+
+ irq->enabled = false;
+
+ spin_unlock(&irq->irq_lock);
+ }
+}
+
+unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ u32 value = 0;
+ int i;
+
+ /* Loop over all IRQs affected by this read */
+ for (i = 0; i < len * 8; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ if (irq->pending)
+ value |= (1U << i);
+ }
+
+ return value;
+}
+
+void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+
+ for_each_set_bit(i, &val, len * 8) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ spin_lock(&irq->irq_lock);
+ irq->pending = true;
+ if (irq->config == VGIC_CONFIG_LEVEL)
+ irq->soft_pending = true;
+
+ vgic_queue_irq_unlock(vcpu->kvm, irq);
+ }
+}
+
+void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+
+ for_each_set_bit(i, &val, len * 8) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ spin_lock(&irq->irq_lock);
+
+ if (irq->config == VGIC_CONFIG_LEVEL) {
+ irq->soft_pending = false;
+ irq->pending = irq->line_level;
+ } else {
+ irq->pending = false;
+ }
+
+ spin_unlock(&irq->irq_lock);
+ }
+}
+
+unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ u32 value = 0;
+ int i;
+
+ /* Loop over all IRQs affected by this read */
+ for (i = 0; i < len * 8; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ if (irq->active)
+ value |= (1U << i);
+ }
+
+ return value;
+}
+
+static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ bool new_active_state)
+{
+ spin_lock(&irq->irq_lock);
+ /*
+ * If this virtual IRQ was written into a list register, we
+ * have to make sure the CPU that runs the VCPU thread has
+ * synced back LR state to the struct vgic_irq. We can only
+ * know this for sure, when either this irq is not assigned to
+ * anyone's AP list anymore, or the VCPU thread is not
+ * running on any CPUs.
+ *
+ * In the opposite case, we know the VCPU thread may be on its
+ * way back from the guest and still has to sync back this
+ * IRQ, so we release and re-acquire the spin_lock to let the
+ * other thread sync back the IRQ.
+ */
+ while (irq->vcpu && /* IRQ may have state in an LR somewhere */
+ irq->vcpu->cpu != -1) { /* VCPU thread is running */
+ BUG_ON(irq->intid < VGIC_NR_PRIVATE_IRQS);
+ cond_resched_lock(&irq->irq_lock);
+ }
+
+ irq->active = new_active_state;
+ if (new_active_state)
+ vgic_queue_irq_unlock(vcpu->kvm, irq);
+ else
+ spin_unlock(&irq->irq_lock);
+}
+
+/*
+ * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
+ * is not queued on some running VCPU's LRs, because then the change to the
+ * active state can be overwritten when the VCPU's state is synced coming back
+ * from the guest.
+ *
+ * For shared interrupts, we have to stop all the VCPUs because interrupts can
+ * be migrated while we don't hold the IRQ locks and we don't want to be
+ * chasing moving targets.
+ *
+ * For private interrupts, we only have to make sure the single and only VCPU
+ * that can potentially queue the IRQ is stopped.
+ */
+static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
+{
+ if (intid < VGIC_NR_PRIVATE_IRQS)
+ kvm_arm_halt_vcpu(vcpu);
+ else
+ kvm_arm_halt_guest(vcpu->kvm);
+}
+
+/* See vgic_change_active_prepare */
+static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
+{
+ if (intid < VGIC_NR_PRIVATE_IRQS)
+ kvm_arm_resume_vcpu(vcpu);
+ else
+ kvm_arm_resume_guest(vcpu->kvm);
+}
+
+void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+
+ vgic_change_active_prepare(vcpu, intid);
+ for_each_set_bit(i, &val, len * 8) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ vgic_mmio_change_active(vcpu, irq, false);
+ }
+ vgic_change_active_finish(vcpu, intid);
+}
+
+void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+ int i;
+
+ vgic_change_active_prepare(vcpu, intid);
+ for_each_set_bit(i, &val, len * 8) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+ vgic_mmio_change_active(vcpu, irq, true);
+ }
+ vgic_change_active_finish(vcpu, intid);
+}
+
+unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
+ int i;
+ u64 val = 0;
+
+ for (i = 0; i < len; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ val |= (u64)irq->priority << (i * 8);
+ }
+
+ return val;
+}
+
+/*
+ * We currently don't handle changing the priority of an interrupt that
+ * is already pending on a VCPU. If there is a need for this, we would
+ * need to make this VCPU exit and re-evaluate the priorities, potentially
+ * leading to this interrupt getting presented now to the guest (if it has
+ * been masked by the priority mask before).
+ */
+void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
+ int i;
+
+ for (i = 0; i < len; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ spin_lock(&irq->irq_lock);
+ /* Narrow the priority range to what we actually support */
+ irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
+ spin_unlock(&irq->irq_lock);
+ }
+}
+
+unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
+ u32 value = 0;
+ int i;
+
+ for (i = 0; i < len * 4; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ if (irq->config == VGIC_CONFIG_EDGE)
+ value |= (2U << (i * 2));
+ }
+
+ return value;
+}
+
+void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val)
+{
+ u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
+ int i;
+
+ for (i = 0; i < len * 4; i++) {
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+
+ /*
+ * The configuration cannot be changed for SGIs in general,
+ * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
+ * code relies on PPIs being level triggered, so we also
+ * make them read-only here.
+ */
+ if (intid + i < VGIC_NR_PRIVATE_IRQS)
+ continue;
+
+ spin_lock(&irq->irq_lock);
+ if (test_bit(i * 2 + 1, &val)) {
+ irq->config = VGIC_CONFIG_EDGE;
+ } else {
+ irq->config = VGIC_CONFIG_LEVEL;
+ irq->pending = irq->line_level | irq->soft_pending;
+ }
+ spin_unlock(&irq->irq_lock);
+ }
+}
+
+static int match_region(const void *key, const void *elt)
+{
+ const unsigned int offset = (unsigned long)key;
+ const struct vgic_register_region *region = elt;
+
+ if (offset < region->reg_offset)
+ return -1;
+
+ if (offset >= region->reg_offset + region->len)
+ return 1;
+
+ return 0;
+}
+
+/* Find the proper register handler entry given a certain address offset. */
+static const struct vgic_register_region *
+vgic_find_mmio_region(const struct vgic_register_region *region, int nr_regions,
+ unsigned int offset)
+{
+ return bsearch((void *)(uintptr_t)offset, region, nr_regions,
+ sizeof(region[0]), match_region);
+}
+
+/*
+ * kvm_mmio_read_buf() returns a value in a format where it can be converted
+ * to a byte array and be directly observed as the guest wanted it to appear
+ * in memory if it had done the store itself, which is LE for the GIC, as the
+ * guest knows the GIC is always LE.
+ *
+ * We convert this value to the CPUs native format to deal with it as a data
+ * value.
+ */
+unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
+{
+ unsigned long data = kvm_mmio_read_buf(val, len);
+
+ switch (len) {
+ case 1:
+ return data;
+ case 2:
+ return le16_to_cpu(data);
+ case 4:
+ return le32_to_cpu(data);
+ default:
+ return le64_to_cpu(data);
+ }
+}
+
+/*
+ * kvm_mmio_write_buf() expects a value in a format such that if converted to
+ * a byte array it is observed as the guest would see it if it could perform
+ * the load directly. Since the GIC is LE, and the guest knows this, the
+ * guest expects a value in little endian format.
+ *
+ * We convert the data value from the CPUs native format to LE so that the
+ * value is returned in the proper format.
+ */
+void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
+ unsigned long data)
+{
+ switch (len) {
+ case 1:
+ break;
+ case 2:
+ data = cpu_to_le16(data);
+ break;
+ case 4:
+ data = cpu_to_le32(data);
+ break;
+ default:
+ data = cpu_to_le64(data);
+ }
+
+ kvm_mmio_write_buf(buf, len, data);
+}
+
+static
+struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
+{
+ return container_of(dev, struct vgic_io_device, dev);
+}
+
+static bool check_region(const struct vgic_register_region *region,
+ gpa_t addr, int len)
+{
+ if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
+ return true;
+ if ((region->access_flags & VGIC_ACCESS_32bit) &&
+ len == sizeof(u32) && !(addr & 3))
+ return true;
+ if ((region->access_flags & VGIC_ACCESS_64bit) &&
+ len == sizeof(u64) && !(addr & 7))
+ return true;
+
+ return false;
+}
+
+static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, void *val)
+{
+ struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
+ const struct vgic_register_region *region;
+ struct kvm_vcpu *r_vcpu;
+ unsigned long data;
+
+ region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
+ addr - iodev->base_addr);
+ if (!region || !check_region(region, addr, len)) {
+ memset(val, 0, len);
+ return 0;
+ }
+
+ r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
+ data = region->read(r_vcpu, addr, len);
+ vgic_data_host_to_mmio_bus(val, len, data);
+ return 0;
+}
+
+static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
+ gpa_t addr, int len, const void *val)
+{
+ struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
+ const struct vgic_register_region *region;
+ struct kvm_vcpu *r_vcpu;
+ unsigned long data = vgic_data_mmio_bus_to_host(val, len);
+
+ region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
+ addr - iodev->base_addr);
+ if (!region)
+ return 0;
+
+ if (!check_region(region, addr, len))
+ return 0;
+
+ r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
+ region->write(r_vcpu, addr, len, data);
+ return 0;
+}
+
+struct kvm_io_device_ops kvm_io_gic_ops = {
+ .read = dispatch_mmio_read,
+ .write = dispatch_mmio_write,
+};
+
+int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
+ enum vgic_type type)
+{
+ struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
+ int ret = 0;
+ unsigned int len;
+
+ switch (type) {
+ case VGIC_V2:
+ len = vgic_v2_init_dist_iodev(io_device);
+ break;
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+ case VGIC_V3:
+ len = vgic_v3_init_dist_iodev(io_device);
+ break;
+#endif
+ default:
+ BUG_ON(1);
+ }
+
+ io_device->base_addr = dist_base_address;
+ io_device->redist_vcpu = NULL;
+
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
+ len, &io_device->dev);
+ mutex_unlock(&kvm->slots_lock);
+
+ return ret;
+}
diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h
new file mode 100644
index 000000000000..850901482aec
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-mmio.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __KVM_ARM_VGIC_MMIO_H__
+#define __KVM_ARM_VGIC_MMIO_H__
+
+struct vgic_register_region {
+ unsigned int reg_offset;
+ unsigned int len;
+ unsigned int bits_per_irq;
+ unsigned int access_flags;
+ unsigned long (*read)(struct kvm_vcpu *vcpu, gpa_t addr,
+ unsigned int len);
+ void (*write)(struct kvm_vcpu *vcpu, gpa_t addr, unsigned int len,
+ unsigned long val);
+};
+
+extern struct kvm_io_device_ops kvm_io_gic_ops;
+
+#define VGIC_ACCESS_8bit 1
+#define VGIC_ACCESS_32bit 2
+#define VGIC_ACCESS_64bit 4
+
+/*
+ * Generate a mask that covers the number of bytes required to address
+ * up to 1024 interrupts, each represented by <bits> bits. This assumes
+ * that <bits> is a power of two.
+ */
+#define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1)
+
+/*
+ * (addr & mask) gives us the byte offset for the INT ID, so we want to
+ * divide this with 'bytes per irq' to get the INT ID, which is given
+ * by '(bits) / 8'. But we do this with fixed-point-arithmetic and
+ * take advantage of the fact that division by a fraction equals
+ * multiplication with the inverted fraction, and scale up both the
+ * numerator and denominator with 8 to support at most 64 bits per IRQ:
+ */
+#define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \
+ 64 / (bits) / 8)
+
+/*
+ * Some VGIC registers store per-IRQ information, with a different number
+ * of bits per IRQ. For those registers this macro is used.
+ * The _WITH_LENGTH version instantiates registers with a fixed length
+ * and is mutually exclusive with the _PER_IRQ version.
+ */
+#define REGISTER_DESC_WITH_BITS_PER_IRQ(off, rd, wr, bpi, acc) \
+ { \
+ .reg_offset = off, \
+ .bits_per_irq = bpi, \
+ .len = bpi * 1024 / 8, \
+ .access_flags = acc, \
+ .read = rd, \
+ .write = wr, \
+ }
+
+#define REGISTER_DESC_WITH_LENGTH(off, rd, wr, length, acc) \
+ { \
+ .reg_offset = off, \
+ .bits_per_irq = 0, \
+ .len = length, \
+ .access_flags = acc, \
+ .read = rd, \
+ .write = wr, \
+ }
+
+int kvm_vgic_register_mmio_region(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ struct vgic_register_region *reg_desc,
+ struct vgic_io_device *region,
+ int nr_irqs, bool offset_private);
+
+unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len);
+
+void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
+ unsigned long data);
+
+unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len);
+
+unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
+ unsigned int len, unsigned long val);
+
+unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
+void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
+unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
+void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
+unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
+void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
+unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
+unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len);
+
+void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
+ gpa_t addr, unsigned int len,
+ unsigned long val);
+
+unsigned int vgic_v2_init_dist_iodev(struct vgic_io_device *dev);
+
+unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev);
+
+#endif
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
new file mode 100644
index 000000000000..8ad42c217770
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irqchip/arm-gic.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <kvm/arm_vgic.h>
+#include <asm/kvm_mmu.h>
+
+#include "vgic.h"
+
+/*
+ * Call this function to convert a u64 value to an unsigned long * bitmask
+ * in a way that works on both 32-bit and 64-bit LE and BE platforms.
+ *
+ * Warning: Calling this function may modify *val.
+ */
+static unsigned long *u64_to_bitmask(u64 *val)
+{
+#if defined(CONFIG_CPU_BIG_ENDIAN) && BITS_PER_LONG == 32
+ *val = (*val >> 32) | (*val << 32);
+#endif
+ return (unsigned long *)val;
+}
+
+void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
+
+ if (cpuif->vgic_misr & GICH_MISR_EOI) {
+ u64 eisr = cpuif->vgic_eisr;
+ unsigned long *eisr_bmap = u64_to_bitmask(&eisr);
+ int lr;
+
+ for_each_set_bit(lr, eisr_bmap, kvm_vgic_global_state.nr_lr) {
+ u32 intid = cpuif->vgic_lr[lr] & GICH_LR_VIRTUALID;
+
+ WARN_ON(cpuif->vgic_lr[lr] & GICH_LR_STATE);
+
+ kvm_notify_acked_irq(vcpu->kvm, 0,
+ intid - VGIC_NR_PRIVATE_IRQS);
+ }
+ }
+
+ /* check and disable underflow maintenance IRQ */
+ cpuif->vgic_hcr &= ~GICH_HCR_UIE;
+
+ /*
+ * In the next iterations of the vcpu loop, if we sync the
+ * vgic state after flushing it, but before entering the guest
+ * (this happens for pending signals and vmid rollovers), then
+ * make sure we don't pick up any old maintenance interrupts
+ * here.
+ */
+ cpuif->vgic_eisr = 0;
+}
+
+void vgic_v2_set_underflow(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
+
+ cpuif->vgic_hcr |= GICH_HCR_UIE;
+}
+
+/*
+ * transfer the content of the LRs back into the corresponding ap_list:
+ * - active bit is transferred as is
+ * - pending bit is
+ * - transferred as is in case of edge sensitive IRQs
+ * - set to the line-level (resample time) for level sensitive IRQs
+ */
+void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v2_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v2;
+ int lr;
+
+ for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
+ u32 val = cpuif->vgic_lr[lr];
+ u32 intid = val & GICH_LR_VIRTUALID;
+ struct vgic_irq *irq;
+
+ irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
+
+ spin_lock(&irq->irq_lock);
+
+ /* Always preserve the active bit */
+ irq->active = !!(val & GICH_LR_ACTIVE_BIT);
+
+ /* Edge is the only case where we preserve the pending bit */
+ if (irq->config == VGIC_CONFIG_EDGE &&
+ (val & GICH_LR_PENDING_BIT)) {
+ irq->pending = true;
+
+ if (vgic_irq_is_sgi(intid)) {
+ u32 cpuid = val & GICH_LR_PHYSID_CPUID;
+
+ cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+ irq->source |= (1 << cpuid);
+ }
+ }
+
+ /* Clear soft pending state when level IRQs have been acked */
+ if (irq->config == VGIC_CONFIG_LEVEL &&
+ !(val & GICH_LR_PENDING_BIT)) {
+ irq->soft_pending = false;
+ irq->pending = irq->line_level;
+ }
+
+ spin_unlock(&irq->irq_lock);
+ }
+}
+
+/*
+ * Populates the particular LR with the state of a given IRQ:
+ * - for an edge sensitive IRQ the pending state is cleared in struct vgic_irq
+ * - for a level sensitive IRQ the pending state value is unchanged;
+ * it is dictated directly by the input level
+ *
+ * If @irq describes an SGI with multiple sources, we choose the
+ * lowest-numbered source VCPU and clear that bit in the source bitmap.
+ *
+ * The irq_lock must be held by the caller.
+ */
+void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+{
+ u32 val = irq->intid;
+
+ if (irq->pending) {
+ val |= GICH_LR_PENDING_BIT;
+
+ if (irq->config == VGIC_CONFIG_EDGE)
+ irq->pending = false;
+
+ if (vgic_irq_is_sgi(irq->intid)) {
+ u32 src = ffs(irq->source);
+
+ BUG_ON(!src);
+ val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
+ irq->source &= ~(1 << (src - 1));
+ if (irq->source)
+ irq->pending = true;
+ }
+ }
+
+ if (irq->active)
+ val |= GICH_LR_ACTIVE_BIT;
+
+ if (irq->hw) {
+ val |= GICH_LR_HW;
+ val |= irq->hwintid << GICH_LR_PHYSID_CPUID_SHIFT;
+ } else {
+ if (irq->config == VGIC_CONFIG_LEVEL)
+ val |= GICH_LR_EOI;
+ }
+
+ /* The GICv2 LR only holds five bits of priority. */
+ val |= (irq->priority >> 3) << GICH_LR_PRIORITY_SHIFT;
+
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = val;
+}
+
+void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr)
+{
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = 0;
+}
+
+void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ u32 vmcr;
+
+ vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
+ vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) &
+ GICH_VMCR_ALIAS_BINPOINT_MASK;
+ vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) &
+ GICH_VMCR_BINPOINT_MASK;
+ vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) &
+ GICH_VMCR_PRIMASK_MASK;
+
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
+}
+
+void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
+
+ vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >>
+ GICH_VMCR_CTRL_SHIFT;
+ vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >>
+ GICH_VMCR_ALIAS_BINPOINT_SHIFT;
+ vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >>
+ GICH_VMCR_BINPOINT_SHIFT;
+ vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >>
+ GICH_VMCR_PRIMASK_SHIFT;
+}
+
+void vgic_v2_enable(struct kvm_vcpu *vcpu)
+{
+ /*
+ * By forcing VMCR to zero, the GIC will restore the binary
+ * points to their reset values. Anything else resets to zero
+ * anyway.
+ */
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr = ~0;
+
+ /* Get the show on the road... */
+ vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
+}
+
+/* check for overlapping regions and for regions crossing the end of memory */
+static bool vgic_v2_check_base(gpa_t dist_base, gpa_t cpu_base)
+{
+ if (dist_base + KVM_VGIC_V2_DIST_SIZE < dist_base)
+ return false;
+ if (cpu_base + KVM_VGIC_V2_CPU_SIZE < cpu_base)
+ return false;
+
+ if (dist_base + KVM_VGIC_V2_DIST_SIZE <= cpu_base)
+ return true;
+ if (cpu_base + KVM_VGIC_V2_CPU_SIZE <= dist_base)
+ return true;
+
+ return false;
+}
+
+int vgic_v2_map_resources(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ int ret = 0;
+
+ if (vgic_ready(kvm))
+ goto out;
+
+ if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
+ kvm_err("Need to set vgic cpu and dist addresses first\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
+ kvm_err("VGIC CPU and dist frames overlap\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Initialize the vgic if this hasn't already been done on demand by
+ * accessing the vgic state from userspace.
+ */
+ ret = vgic_init(kvm);
+ if (ret) {
+ kvm_err("Unable to initialize VGIC dynamic data structures\n");
+ goto out;
+ }
+
+ ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
+ if (ret) {
+ kvm_err("Unable to register VGIC MMIO regions\n");
+ goto out;
+ }
+
+ ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
+ kvm_vgic_global_state.vcpu_base,
+ KVM_VGIC_V2_CPU_SIZE, true);
+ if (ret) {
+ kvm_err("Unable to remap VGIC CPU to VCPU\n");
+ goto out;
+ }
+
+ dist->ready = true;
+
+out:
+ if (ret)
+ kvm_vgic_destroy(kvm);
+ return ret;
+}
+
+/**
+ * vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
+ * @node: pointer to the DT node
+ *
+ * Returns 0 if a GICv2 has been found, returns an error code otherwise
+ */
+int vgic_v2_probe(const struct gic_kvm_info *info)
+{
+ int ret;
+ u32 vtr;
+
+ if (!info->vctrl.start) {
+ kvm_err("GICH not present in the firmware table\n");
+ return -ENXIO;
+ }
+
+ if (!PAGE_ALIGNED(info->vcpu.start)) {
+ kvm_err("GICV physical address 0x%llx not page aligned\n",
+ (unsigned long long)info->vcpu.start);
+ return -ENXIO;
+ }
+
+ if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
+ kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+ (unsigned long long)resource_size(&info->vcpu),
+ PAGE_SIZE);
+ return -ENXIO;
+ }
+
+ kvm_vgic_global_state.vctrl_base = ioremap(info->vctrl.start,
+ resource_size(&info->vctrl));
+ if (!kvm_vgic_global_state.vctrl_base) {
+ kvm_err("Cannot ioremap GICH\n");
+ return -ENOMEM;
+ }
+
+ vtr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VTR);
+ kvm_vgic_global_state.nr_lr = (vtr & 0x3f) + 1;
+
+ ret = create_hyp_io_mappings(kvm_vgic_global_state.vctrl_base,
+ kvm_vgic_global_state.vctrl_base +
+ resource_size(&info->vctrl),
+ info->vctrl.start);
+
+ if (ret) {
+ kvm_err("Cannot map VCTRL into hyp\n");
+ iounmap(kvm_vgic_global_state.vctrl_base);
+ return ret;
+ }
+
+ kvm_vgic_global_state.can_emulate_gicv2 = true;
+ kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
+
+ kvm_vgic_global_state.vcpu_base = info->vcpu.start;
+ kvm_vgic_global_state.type = VGIC_V2;
+ kvm_vgic_global_state.max_gic_vcpus = VGIC_V2_MAX_CPUS;
+
+ kvm_info("vgic-v2@%llx\n", info->vctrl.start);
+
+ return 0;
+}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
new file mode 100644
index 000000000000..336a46115937
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -0,0 +1,330 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <kvm/arm_vgic.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_asm.h>
+
+#include "vgic.h"
+
+void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
+ u32 model = vcpu->kvm->arch.vgic.vgic_model;
+
+ if (cpuif->vgic_misr & ICH_MISR_EOI) {
+ unsigned long eisr_bmap = cpuif->vgic_eisr;
+ int lr;
+
+ for_each_set_bit(lr, &eisr_bmap, kvm_vgic_global_state.nr_lr) {
+ u32 intid;
+ u64 val = cpuif->vgic_lr[lr];
+
+ if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ intid = val & ICH_LR_VIRTUAL_ID_MASK;
+ else
+ intid = val & GICH_LR_VIRTUALID;
+
+ WARN_ON(cpuif->vgic_lr[lr] & ICH_LR_STATE);
+
+ kvm_notify_acked_irq(vcpu->kvm, 0,
+ intid - VGIC_NR_PRIVATE_IRQS);
+ }
+
+ /*
+ * In the next iterations of the vcpu loop, if we sync
+ * the vgic state after flushing it, but before
+ * entering the guest (this happens for pending
+ * signals and vmid rollovers), then make sure we
+ * don't pick up any old maintenance interrupts here.
+ */
+ cpuif->vgic_eisr = 0;
+ }
+
+ cpuif->vgic_hcr &= ~ICH_HCR_UIE;
+}
+
+void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ cpuif->vgic_hcr |= ICH_HCR_UIE;
+}
+
+void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
+ u32 model = vcpu->kvm->arch.vgic.vgic_model;
+ int lr;
+
+ for (lr = 0; lr < vcpu->arch.vgic_cpu.used_lrs; lr++) {
+ u64 val = cpuif->vgic_lr[lr];
+ u32 intid;
+ struct vgic_irq *irq;
+
+ if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ intid = val & ICH_LR_VIRTUAL_ID_MASK;
+ else
+ intid = val & GICH_LR_VIRTUALID;
+ irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
+
+ spin_lock(&irq->irq_lock);
+
+ /* Always preserve the active bit */
+ irq->active = !!(val & ICH_LR_ACTIVE_BIT);
+
+ /* Edge is the only case where we preserve the pending bit */
+ if (irq->config == VGIC_CONFIG_EDGE &&
+ (val & ICH_LR_PENDING_BIT)) {
+ irq->pending = true;
+
+ if (vgic_irq_is_sgi(intid) &&
+ model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+ u32 cpuid = val & GICH_LR_PHYSID_CPUID;
+
+ cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
+ irq->source |= (1 << cpuid);
+ }
+ }
+
+ /* Clear soft pending state when level irqs have been acked */
+ if (irq->config == VGIC_CONFIG_LEVEL &&
+ !(val & ICH_LR_PENDING_BIT)) {
+ irq->soft_pending = false;
+ irq->pending = irq->line_level;
+ }
+
+ spin_unlock(&irq->irq_lock);
+ }
+}
+
+/* Requires the irq to be locked already */
+void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
+{
+ u32 model = vcpu->kvm->arch.vgic.vgic_model;
+ u64 val = irq->intid;
+
+ if (irq->pending) {
+ val |= ICH_LR_PENDING_BIT;
+
+ if (irq->config == VGIC_CONFIG_EDGE)
+ irq->pending = false;
+
+ if (vgic_irq_is_sgi(irq->intid) &&
+ model == KVM_DEV_TYPE_ARM_VGIC_V2) {
+ u32 src = ffs(irq->source);
+
+ BUG_ON(!src);
+ val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
+ irq->source &= ~(1 << (src - 1));
+ if (irq->source)
+ irq->pending = true;
+ }
+ }
+
+ if (irq->active)
+ val |= ICH_LR_ACTIVE_BIT;
+
+ if (irq->hw) {
+ val |= ICH_LR_HW;
+ val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
+ } else {
+ if (irq->config == VGIC_CONFIG_LEVEL)
+ val |= ICH_LR_EOI;
+ }
+
+ /*
+ * We currently only support Group1 interrupts, which is a
+ * known defect. This needs to be addressed at some point.
+ */
+ if (model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ val |= ICH_LR_GROUP;
+
+ val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
+
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
+}
+
+void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
+{
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
+}
+
+void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ u32 vmcr;
+
+ vmcr = (vmcrp->ctlr << ICH_VMCR_CTLR_SHIFT) & ICH_VMCR_CTLR_MASK;
+ vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
+ vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
+ vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
+
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr = vmcr;
+}
+
+void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
+{
+ u32 vmcr = vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr;
+
+ vmcrp->ctlr = (vmcr & ICH_VMCR_CTLR_MASK) >> ICH_VMCR_CTLR_SHIFT;
+ vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
+ vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
+ vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
+}
+
+void vgic_v3_enable(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ /*
+ * By forcing VMCR to zero, the GIC will restore the binary
+ * points to their reset values. Anything else resets to zero
+ * anyway.
+ */
+ vgic_v3->vgic_vmcr = 0;
+ vgic_v3->vgic_elrsr = ~0;
+
+ /*
+ * If we are emulating a GICv3, we do it in an non-GICv2-compatible
+ * way, so we force SRE to 1 to demonstrate this to the guest.
+ * This goes with the spec allowing the value to be RAO/WI.
+ */
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+ vgic_v3->vgic_sre = ICC_SRE_EL1_SRE;
+ else
+ vgic_v3->vgic_sre = 0;
+
+ /* Get the show on the road... */
+ vgic_v3->vgic_hcr = ICH_HCR_EN;
+}
+
+/* check for overlapping regions and for regions crossing the end of memory */
+static bool vgic_v3_check_base(struct kvm *kvm)
+{
+ struct vgic_dist *d = &kvm->arch.vgic;
+ gpa_t redist_size = KVM_VGIC_V3_REDIST_SIZE;
+
+ redist_size *= atomic_read(&kvm->online_vcpus);
+
+ if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
+ return false;
+ if (d->vgic_redist_base + redist_size < d->vgic_redist_base)
+ return false;
+
+ if (d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE <= d->vgic_redist_base)
+ return true;
+ if (d->vgic_redist_base + redist_size <= d->vgic_dist_base)
+ return true;
+
+ return false;
+}
+
+int vgic_v3_map_resources(struct kvm *kvm)
+{
+ int ret = 0;
+ struct vgic_dist *dist = &kvm->arch.vgic;
+
+ if (vgic_ready(kvm))
+ goto out;
+
+ if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
+ IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
+ kvm_err("Need to set vgic distributor addresses first\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ if (!vgic_v3_check_base(kvm)) {
+ kvm_err("VGIC redist and dist frames overlap\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * For a VGICv3 we require the userland to explicitly initialize
+ * the VGIC before we need to use it.
+ */
+ if (!vgic_initialized(kvm)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
+ if (ret) {
+ kvm_err("Unable to register VGICv3 dist MMIO regions\n");
+ goto out;
+ }
+
+ ret = vgic_register_redist_iodevs(kvm, dist->vgic_redist_base);
+ if (ret) {
+ kvm_err("Unable to register VGICv3 redist MMIO regions\n");
+ goto out;
+ }
+
+ dist->ready = true;
+
+out:
+ if (ret)
+ kvm_vgic_destroy(kvm);
+ return ret;
+}
+
+/**
+ * vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
+ * @node: pointer to the DT node
+ *
+ * Returns 0 if a GICv3 has been found, returns an error code otherwise
+ */
+int vgic_v3_probe(const struct gic_kvm_info *info)
+{
+ u32 ich_vtr_el2 = kvm_call_hyp(__vgic_v3_get_ich_vtr_el2);
+
+ /*
+ * The ListRegs field is 5 bits, but there is a architectural
+ * maximum of 16 list registers. Just ignore bit 4...
+ */
+ kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
+ kvm_vgic_global_state.can_emulate_gicv2 = false;
+
+ if (!info->vcpu.start) {
+ kvm_info("GICv3: no GICV resource entry\n");
+ kvm_vgic_global_state.vcpu_base = 0;
+ } else if (!PAGE_ALIGNED(info->vcpu.start)) {
+ pr_warn("GICV physical address 0x%llx not page aligned\n",
+ (unsigned long long)info->vcpu.start);
+ kvm_vgic_global_state.vcpu_base = 0;
+ } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
+ pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+ (unsigned long long)resource_size(&info->vcpu),
+ PAGE_SIZE);
+ kvm_vgic_global_state.vcpu_base = 0;
+ } else {
+ kvm_vgic_global_state.vcpu_base = info->vcpu.start;
+ kvm_vgic_global_state.can_emulate_gicv2 = true;
+ kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
+ kvm_info("vgic-v2@%llx\n", info->vcpu.start);
+ }
+ if (kvm_vgic_global_state.vcpu_base == 0)
+ kvm_info("disabling GICv2 emulation\n");
+ kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
+
+ kvm_vgic_global_state.vctrl_base = NULL;
+ kvm_vgic_global_state.type = VGIC_V3;
+ kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
+
+ return 0;
+}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
new file mode 100644
index 000000000000..69b61abefa19
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -0,0 +1,619 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/list_sort.h>
+
+#include "vgic.h"
+
+#define CREATE_TRACE_POINTS
+#include "../trace.h"
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+#define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
+#else
+#define DEBUG_SPINLOCK_BUG_ON(p)
+#endif
+
+struct vgic_global __section(.hyp.text) kvm_vgic_global_state;
+
+/*
+ * Locking order is always:
+ * vgic_cpu->ap_list_lock
+ * vgic_irq->irq_lock
+ *
+ * (that is, always take the ap_list_lock before the struct vgic_irq lock).
+ *
+ * When taking more than one ap_list_lock at the same time, always take the
+ * lowest numbered VCPU's ap_list_lock first, so:
+ * vcpuX->vcpu_id < vcpuY->vcpu_id:
+ * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
+ * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
+ */
+
+struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ u32 intid)
+{
+ /* SGIs and PPIs */
+ if (intid <= VGIC_MAX_PRIVATE)
+ return &vcpu->arch.vgic_cpu.private_irqs[intid];
+
+ /* SPIs */
+ if (intid <= VGIC_MAX_SPI)
+ return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
+
+ /* LPIs are not yet covered */
+ if (intid >= VGIC_MIN_LPI)
+ return NULL;
+
+ WARN(1, "Looking up struct vgic_irq for reserved INTID");
+ return NULL;
+}
+
+/**
+ * kvm_vgic_target_oracle - compute the target vcpu for an irq
+ *
+ * @irq: The irq to route. Must be already locked.
+ *
+ * Based on the current state of the interrupt (enabled, pending,
+ * active, vcpu and target_vcpu), compute the next vcpu this should be
+ * given to. Return NULL if this shouldn't be injected at all.
+ *
+ * Requires the IRQ lock to be held.
+ */
+static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
+{
+ DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+
+ /* If the interrupt is active, it must stay on the current vcpu */
+ if (irq->active)
+ return irq->vcpu ? : irq->target_vcpu;
+
+ /*
+ * If the IRQ is not active but enabled and pending, we should direct
+ * it to its configured target VCPU.
+ * If the distributor is disabled, pending interrupts shouldn't be
+ * forwarded.
+ */
+ if (irq->enabled && irq->pending) {
+ if (unlikely(irq->target_vcpu &&
+ !irq->target_vcpu->kvm->arch.vgic.enabled))
+ return NULL;
+
+ return irq->target_vcpu;
+ }
+
+ /* If neither active nor pending and enabled, then this IRQ should not
+ * be queued to any VCPU.
+ */
+ return NULL;
+}
+
+/*
+ * The order of items in the ap_lists defines how we'll pack things in LRs as
+ * well, the first items in the list being the first things populated in the
+ * LRs.
+ *
+ * A hard rule is that active interrupts can never be pushed out of the LRs
+ * (and therefore take priority) since we cannot reliably trap on deactivation
+ * of IRQs and therefore they have to be present in the LRs.
+ *
+ * Otherwise things should be sorted by the priority field and the GIC
+ * hardware support will take care of preemption of priority groups etc.
+ *
+ * Return negative if "a" sorts before "b", 0 to preserve order, and positive
+ * to sort "b" before "a".
+ */
+static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list);
+ struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list);
+ bool penda, pendb;
+ int ret;
+
+ spin_lock(&irqa->irq_lock);
+ spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
+
+ if (irqa->active || irqb->active) {
+ ret = (int)irqb->active - (int)irqa->active;
+ goto out;
+ }
+
+ penda = irqa->enabled && irqa->pending;
+ pendb = irqb->enabled && irqb->pending;
+
+ if (!penda || !pendb) {
+ ret = (int)pendb - (int)penda;
+ goto out;
+ }
+
+ /* Both pending and enabled, sort by priority */
+ ret = irqa->priority - irqb->priority;
+out:
+ spin_unlock(&irqb->irq_lock);
+ spin_unlock(&irqa->irq_lock);
+ return ret;
+}
+
+/* Must be called with the ap_list_lock held */
+static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+
+ DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+
+ list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
+}
+
+/*
+ * Only valid injection if changing level for level-triggered IRQs or for a
+ * rising edge.
+ */
+static bool vgic_validate_injection(struct vgic_irq *irq, bool level)
+{
+ switch (irq->config) {
+ case VGIC_CONFIG_LEVEL:
+ return irq->line_level != level;
+ case VGIC_CONFIG_EDGE:
+ return level;
+ }
+
+ return false;
+}
+
+/*
+ * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
+ * Do the queuing if necessary, taking the right locks in the right order.
+ * Returns true when the IRQ was queued, false otherwise.
+ *
+ * Needs to be entered with the IRQ lock already held, but will return
+ * with all locks dropped.
+ */
+bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
+{
+ struct kvm_vcpu *vcpu;
+
+ DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+
+retry:
+ vcpu = vgic_target_oracle(irq);
+ if (irq->vcpu || !vcpu) {
+ /*
+ * If this IRQ is already on a VCPU's ap_list, then it
+ * cannot be moved or modified and there is no more work for
+ * us to do.
+ *
+ * Otherwise, if the irq is not pending and enabled, it does
+ * not need to be inserted into an ap_list and there is also
+ * no more work for us to do.
+ */
+ spin_unlock(&irq->irq_lock);
+ return false;
+ }
+
+ /*
+ * We must unlock the irq lock to take the ap_list_lock where
+ * we are going to insert this new pending interrupt.
+ */
+ spin_unlock(&irq->irq_lock);
+
+ /* someone can do stuff here, which we re-check below */
+
+ spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ spin_lock(&irq->irq_lock);
+
+ /*
+ * Did something change behind our backs?
+ *
+ * There are two cases:
+ * 1) The irq lost its pending state or was disabled behind our
+ * backs and/or it was queued to another VCPU's ap_list.
+ * 2) Someone changed the affinity on this irq behind our
+ * backs and we are now holding the wrong ap_list_lock.
+ *
+ * In both cases, drop the locks and retry.
+ */
+
+ if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
+ spin_unlock(&irq->irq_lock);
+ spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+
+ spin_lock(&irq->irq_lock);
+ goto retry;
+ }
+
+ list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
+ irq->vcpu = vcpu;
+
+ spin_unlock(&irq->irq_lock);
+ spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+
+ kvm_vcpu_kick(vcpu);
+
+ return true;
+}
+
+static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
+ unsigned int intid, bool level,
+ bool mapped_irq)
+{
+ struct kvm_vcpu *vcpu;
+ struct vgic_irq *irq;
+ int ret;
+
+ trace_vgic_update_irq_pending(cpuid, intid, level);
+
+ ret = vgic_lazy_init(kvm);
+ if (ret)
+ return ret;
+
+ vcpu = kvm_get_vcpu(kvm, cpuid);
+ if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS)
+ return -EINVAL;
+
+ irq = vgic_get_irq(kvm, vcpu, intid);
+ if (!irq)
+ return -EINVAL;
+
+ if (irq->hw != mapped_irq)
+ return -EINVAL;
+
+ spin_lock(&irq->irq_lock);
+
+ if (!vgic_validate_injection(irq, level)) {
+ /* Nothing to see here, move along... */
+ spin_unlock(&irq->irq_lock);
+ return 0;
+ }
+
+ if (irq->config == VGIC_CONFIG_LEVEL) {
+ irq->line_level = level;
+ irq->pending = level || irq->soft_pending;
+ } else {
+ irq->pending = true;
+ }
+
+ vgic_queue_irq_unlock(kvm, irq);
+
+ return 0;
+}
+
+/**
+ * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
+ * @kvm: The VM structure pointer
+ * @cpuid: The CPU for PPIs
+ * @intid: The INTID to inject a new state to.
+ * @level: Edge-triggered: true: to trigger the interrupt
+ * false: to ignore the call
+ * Level-sensitive true: raise the input signal
+ * false: lower the input signal
+ *
+ * The VGIC is not concerned with devices being active-LOW or active-HIGH for
+ * level-sensitive interrupts. You can think of the level parameter as 1
+ * being HIGH and 0 being LOW and all devices being active-HIGH.
+ */
+int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+ bool level)
+{
+ return vgic_update_irq_pending(kvm, cpuid, intid, level, false);
+}
+
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid, unsigned int intid,
+ bool level)
+{
+ return vgic_update_irq_pending(kvm, cpuid, intid, level, true);
+}
+
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
+{
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+
+ BUG_ON(!irq);
+
+ spin_lock(&irq->irq_lock);
+
+ irq->hw = true;
+ irq->hwintid = phys_irq;
+
+ spin_unlock(&irq->irq_lock);
+
+ return 0;
+}
+
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+{
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+
+ BUG_ON(!irq);
+
+ if (!vgic_initialized(vcpu->kvm))
+ return -EAGAIN;
+
+ spin_lock(&irq->irq_lock);
+
+ irq->hw = false;
+ irq->hwintid = 0;
+
+ spin_unlock(&irq->irq_lock);
+
+ return 0;
+}
+
+/**
+ * vgic_prune_ap_list - Remove non-relevant interrupts from the list
+ *
+ * @vcpu: The VCPU pointer
+ *
+ * Go over the list of "interesting" interrupts, and prune those that we
+ * won't have to consider in the near future.
+ */
+static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_irq *irq, *tmp;
+
+retry:
+ spin_lock(&vgic_cpu->ap_list_lock);
+
+ list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
+ struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
+
+ spin_lock(&irq->irq_lock);
+
+ BUG_ON(vcpu != irq->vcpu);
+
+ target_vcpu = vgic_target_oracle(irq);
+
+ if (!target_vcpu) {
+ /*
+ * We don't need to process this interrupt any
+ * further, move it off the list.
+ */
+ list_del(&irq->ap_list);
+ irq->vcpu = NULL;
+ spin_unlock(&irq->irq_lock);
+ continue;
+ }
+
+ if (target_vcpu == vcpu) {
+ /* We're on the right CPU */
+ spin_unlock(&irq->irq_lock);
+ continue;
+ }
+
+ /* This interrupt looks like it has to be migrated. */
+
+ spin_unlock(&irq->irq_lock);
+ spin_unlock(&vgic_cpu->ap_list_lock);
+
+ /*
+ * Ensure locking order by always locking the smallest
+ * ID first.
+ */
+ if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
+ vcpuA = vcpu;
+ vcpuB = target_vcpu;
+ } else {
+ vcpuA = target_vcpu;
+ vcpuB = vcpu;
+ }
+
+ spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+ spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
+ SINGLE_DEPTH_NESTING);
+ spin_lock(&irq->irq_lock);
+
+ /*
+ * If the affinity has been preserved, move the
+ * interrupt around. Otherwise, it means things have
+ * changed while the interrupt was unlocked, and we
+ * need to replay this.
+ *
+ * In all cases, we cannot trust the list not to have
+ * changed, so we restart from the beginning.
+ */
+ if (target_vcpu == vgic_target_oracle(irq)) {
+ struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;
+
+ list_del(&irq->ap_list);
+ irq->vcpu = target_vcpu;
+ list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
+ }
+
+ spin_unlock(&irq->irq_lock);
+ spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
+ spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
+ goto retry;
+ }
+
+ spin_unlock(&vgic_cpu->ap_list_lock);
+}
+
+static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu)
+{
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_process_maintenance(vcpu);
+ else
+ vgic_v3_process_maintenance(vcpu);
+}
+
+static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_fold_lr_state(vcpu);
+ else
+ vgic_v3_fold_lr_state(vcpu);
+}
+
+/* Requires the irq_lock to be held. */
+static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
+ struct vgic_irq *irq, int lr)
+{
+ DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_populate_lr(vcpu, irq, lr);
+ else
+ vgic_v3_populate_lr(vcpu, irq, lr);
+}
+
+static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
+{
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_clear_lr(vcpu, lr);
+ else
+ vgic_v3_clear_lr(vcpu, lr);
+}
+
+static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
+{
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_set_underflow(vcpu);
+ else
+ vgic_v3_set_underflow(vcpu);
+}
+
+/* Requires the ap_list_lock to be held. */
+static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_irq *irq;
+ int count = 0;
+
+ DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+
+ list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+ spin_lock(&irq->irq_lock);
+ /* GICv2 SGIs can count for more than one... */
+ if (vgic_irq_is_sgi(irq->intid) && irq->source)
+ count += hweight8(irq->source);
+ else
+ count++;
+ spin_unlock(&irq->irq_lock);
+ }
+ return count;
+}
+
+/* Requires the VCPU's ap_list_lock to be held. */
+static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_irq *irq;
+ int count = 0;
+
+ DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+
+ if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) {
+ vgic_set_underflow(vcpu);
+ vgic_sort_ap_list(vcpu);
+ }
+
+ list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+ spin_lock(&irq->irq_lock);
+
+ if (unlikely(vgic_target_oracle(irq) != vcpu))
+ goto next;
+
+ /*
+ * If we get an SGI with multiple sources, try to get
+ * them in all at once.
+ */
+ do {
+ vgic_populate_lr(vcpu, irq, count++);
+ } while (irq->source && count < kvm_vgic_global_state.nr_lr);
+
+next:
+ spin_unlock(&irq->irq_lock);
+
+ if (count == kvm_vgic_global_state.nr_lr)
+ break;
+ }
+
+ vcpu->arch.vgic_cpu.used_lrs = count;
+
+ /* Nuke remaining LRs */
+ for ( ; count < kvm_vgic_global_state.nr_lr; count++)
+ vgic_clear_lr(vcpu, count);
+}
+
+/* Sync back the hardware VGIC state into our emulation after a guest's run. */
+void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+ vgic_process_maintenance_interrupt(vcpu);
+ vgic_fold_lr_state(vcpu);
+ vgic_prune_ap_list(vcpu);
+}
+
+/* Flush our emulation state into the GIC hardware before entering the guest. */
+void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+ spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
+ vgic_flush_lr_state(vcpu);
+ spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
+}
+
+int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
+{
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ struct vgic_irq *irq;
+ bool pending = false;
+
+ if (!vcpu->kvm->arch.vgic.enabled)
+ return false;
+
+ spin_lock(&vgic_cpu->ap_list_lock);
+
+ list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+ spin_lock(&irq->irq_lock);
+ pending = irq->pending && irq->enabled;
+ spin_unlock(&irq->irq_lock);
+
+ if (pending)
+ break;
+ }
+
+ spin_unlock(&vgic_cpu->ap_list_lock);
+
+ return pending;
+}
+
+void vgic_kick_vcpus(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int c;
+
+ /*
+ * We've injected an interrupt, time to find out who deserves
+ * a good kick...
+ */
+ kvm_for_each_vcpu(c, vcpu, kvm) {
+ if (kvm_vgic_vcpu_pending_irq(vcpu))
+ kvm_vcpu_kick(vcpu);
+ }
+}
+
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+{
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+ bool map_is_active;
+
+ spin_lock(&irq->irq_lock);
+ map_is_active = irq->hw && irq->active;
+ spin_unlock(&irq->irq_lock);
+
+ return map_is_active;
+}
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
new file mode 100644
index 000000000000..7b300ca370b7
--- /dev/null
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2015, 2016 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __KVM_ARM_VGIC_NEW_H__
+#define __KVM_ARM_VGIC_NEW_H__
+
+#include <linux/irqchip/arm-gic-common.h>
+
+#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
+#define IMPLEMENTER_ARM 0x43b
+
+#define VGIC_ADDR_UNDEF (-1)
+#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
+
+#define INTERRUPT_ID_BITS_SPIS 10
+#define VGIC_PRI_BITS 5
+
+#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
+
+struct vgic_vmcr {
+ u32 ctlr;
+ u32 abpr;
+ u32 bpr;
+ u32 pmr;
+};
+
+struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+ u32 intid);
+bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq);
+void vgic_kick_vcpus(struct kvm *kvm);
+
+void vgic_v2_process_maintenance(struct kvm_vcpu *vcpu);
+void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
+void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
+void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
+void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
+int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
+int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+ int offset, u32 *val);
+int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+ int offset, u32 *val);
+void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v2_enable(struct kvm_vcpu *vcpu);
+int vgic_v2_probe(const struct gic_kvm_info *info);
+int vgic_v2_map_resources(struct kvm *kvm);
+int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
+ enum vgic_type);
+
+#ifdef CONFIG_KVM_ARM_VGIC_V3
+void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu);
+void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
+void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
+void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
+void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
+void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+void vgic_v3_enable(struct kvm_vcpu *vcpu);
+int vgic_v3_probe(const struct gic_kvm_info *info);
+int vgic_v3_map_resources(struct kvm *kvm);
+int vgic_register_redist_iodevs(struct kvm *kvm, gpa_t dist_base_address);
+#else
+static inline void vgic_v3_process_maintenance(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void vgic_v3_populate_lr(struct kvm_vcpu *vcpu,
+ struct vgic_irq *irq, int lr)
+{
+}
+
+static inline void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
+{
+}
+
+static inline void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline
+void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+}
+
+static inline
+void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
+{
+}
+
+static inline void vgic_v3_enable(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline int vgic_v3_probe(const struct gic_kvm_info *info)
+{
+ return -ENODEV;
+}
+
+static inline int vgic_v3_map_resources(struct kvm *kvm)
+{
+ return -ENODEV;
+}
+
+static inline int vgic_register_redist_iodevs(struct kvm *kvm,
+ gpa_t dist_base_address)
+{
+ return -ENODEV;
+}
+#endif
+
+void kvm_register_vgic_device(unsigned long type);
+int vgic_lazy_init(struct kvm *kvm);
+int vgic_init(struct kvm *kvm);
+
+#endif
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index dd4ac9d9e8f5..37af23052470 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -63,6 +63,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
+/* Worst case buffer size needed for holding an integer. */
+#define ITOA_MAX_LEN 12
+
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
@@ -100,6 +103,9 @@ static __read_mostly struct preempt_ops kvm_preempt_ops;
struct dentry *kvm_debugfs_dir;
EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
+static int kvm_debugfs_num_entries;
+static const struct file_operations *stat_fops_per_vm[];
+
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#ifdef CONFIG_KVM_COMPAT
@@ -542,6 +548,58 @@ static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
kvfree(slots);
}
+static void kvm_destroy_vm_debugfs(struct kvm *kvm)
+{
+ int i;
+
+ if (!kvm->debugfs_dentry)
+ return;
+
+ debugfs_remove_recursive(kvm->debugfs_dentry);
+
+ for (i = 0; i < kvm_debugfs_num_entries; i++)
+ kfree(kvm->debugfs_stat_data[i]);
+ kfree(kvm->debugfs_stat_data);
+}
+
+static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
+{
+ char dir_name[ITOA_MAX_LEN * 2];
+ struct kvm_stat_data *stat_data;
+ struct kvm_stats_debugfs_item *p;
+
+ if (!debugfs_initialized())
+ return 0;
+
+ snprintf(dir_name, sizeof(dir_name), "%d-%d", task_pid_nr(current), fd);
+ kvm->debugfs_dentry = debugfs_create_dir(dir_name,
+ kvm_debugfs_dir);
+ if (!kvm->debugfs_dentry)
+ return -ENOMEM;
+
+ kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
+ sizeof(*kvm->debugfs_stat_data),
+ GFP_KERNEL);
+ if (!kvm->debugfs_stat_data)
+ return -ENOMEM;
+
+ for (p = debugfs_entries; p->name; p++) {
+ stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL);
+ if (!stat_data)
+ return -ENOMEM;
+
+ stat_data->kvm = kvm;
+ stat_data->offset = p->offset;
+ kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
+ if (!debugfs_create_file(p->name, 0444,
+ kvm->debugfs_dentry,
+ stat_data,
+ stat_fops_per_vm[p->kind]))
+ return -ENOMEM;
+ }
+ return 0;
+}
+
static struct kvm *kvm_create_vm(unsigned long type)
{
int r, i;
@@ -647,6 +705,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
int i;
struct mm_struct *mm = kvm->mm;
+ kvm_destroy_vm_debugfs(kvm);
kvm_arch_sync_events(kvm);
spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
@@ -2999,8 +3058,15 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
}
#endif
r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC);
- if (r < 0)
+ if (r < 0) {
kvm_put_kvm(kvm);
+ return r;
+ }
+
+ if (kvm_create_vm_debugfs(kvm, r) < 0) {
+ kvm_put_kvm(kvm);
+ return -ENOMEM;
+ }
return r;
}
@@ -3425,15 +3491,114 @@ static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_hotplug,
};
+static int kvm_debugfs_open(struct inode *inode, struct file *file,
+ int (*get)(void *, u64 *), int (*set)(void *, u64),
+ const char *fmt)
+{
+ struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
+ inode->i_private;
+
+ /* The debugfs files are a reference to the kvm struct which
+ * is still valid when kvm_destroy_vm is called.
+ * To avoid the race between open and the removal of the debugfs
+ * directory we test against the users count.
+ */
+ if (!atomic_add_unless(&stat_data->kvm->users_count, 1, 0))
+ return -ENOENT;
+
+ if (simple_attr_open(inode, file, get, set, fmt)) {
+ kvm_put_kvm(stat_data->kvm);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int kvm_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
+ inode->i_private;
+
+ simple_attr_release(inode, file);
+ kvm_put_kvm(stat_data->kvm);
+
+ return 0;
+}
+
+static int vm_stat_get_per_vm(void *data, u64 *val)
+{
+ struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+
+ *val = *(u32 *)((void *)stat_data->kvm + stat_data->offset);
+
+ return 0;
+}
+
+static int vm_stat_get_per_vm_open(struct inode *inode, struct file *file)
+{
+ __simple_attr_check_format("%llu\n", 0ull);
+ return kvm_debugfs_open(inode, file, vm_stat_get_per_vm,
+ NULL, "%llu\n");
+}
+
+static const struct file_operations vm_stat_get_per_vm_fops = {
+ .owner = THIS_MODULE,
+ .open = vm_stat_get_per_vm_open,
+ .release = kvm_debugfs_release,
+ .read = simple_attr_read,
+ .write = simple_attr_write,
+ .llseek = generic_file_llseek,
+};
+
+static int vcpu_stat_get_per_vm(void *data, u64 *val)
+{
+ int i;
+ struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
+ struct kvm_vcpu *vcpu;
+
+ *val = 0;
+
+ kvm_for_each_vcpu(i, vcpu, stat_data->kvm)
+ *val += *(u32 *)((void *)vcpu + stat_data->offset);
+
+ return 0;
+}
+
+static int vcpu_stat_get_per_vm_open(struct inode *inode, struct file *file)
+{
+ __simple_attr_check_format("%llu\n", 0ull);
+ return kvm_debugfs_open(inode, file, vcpu_stat_get_per_vm,
+ NULL, "%llu\n");
+}
+
+static const struct file_operations vcpu_stat_get_per_vm_fops = {
+ .owner = THIS_MODULE,
+ .open = vcpu_stat_get_per_vm_open,
+ .release = kvm_debugfs_release,
+ .read = simple_attr_read,
+ .write = simple_attr_write,
+ .llseek = generic_file_llseek,
+};
+
+static const struct file_operations *stat_fops_per_vm[] = {
+ [KVM_STAT_VCPU] = &vcpu_stat_get_per_vm_fops,
+ [KVM_STAT_VM] = &vm_stat_get_per_vm_fops,
+};
+
static int vm_stat_get(void *_offset, u64 *val)
{
unsigned offset = (long)_offset;
struct kvm *kvm;
+ struct kvm_stat_data stat_tmp = {.offset = offset};
+ u64 tmp_val;
*val = 0;
spin_lock(&kvm_lock);
- list_for_each_entry(kvm, &vm_list, vm_list)
- *val += *(u32 *)((void *)kvm + offset);
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ stat_tmp.kvm = kvm;
+ vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
+ *val += tmp_val;
+ }
spin_unlock(&kvm_lock);
return 0;
}
@@ -3444,15 +3609,16 @@ static int vcpu_stat_get(void *_offset, u64 *val)
{
unsigned offset = (long)_offset;
struct kvm *kvm;
- struct kvm_vcpu *vcpu;
- int i;
+ struct kvm_stat_data stat_tmp = {.offset = offset};
+ u64 tmp_val;
*val = 0;
spin_lock(&kvm_lock);
- list_for_each_entry(kvm, &vm_list, vm_list)
- kvm_for_each_vcpu(i, vcpu, kvm)
- *val += *(u32 *)((void *)vcpu + offset);
-
+ list_for_each_entry(kvm, &vm_list, vm_list) {
+ stat_tmp.kvm = kvm;
+ vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
+ *val += tmp_val;
+ }
spin_unlock(&kvm_lock);
return 0;
}
@@ -3473,7 +3639,8 @@ static int kvm_init_debug(void)
if (kvm_debugfs_dir == NULL)
goto out;
- for (p = debugfs_entries; p->name; ++p) {
+ kvm_debugfs_num_entries = 0;
+ for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
if (!debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
(void *)(long)p->offset,
stat_fops[p->kind]))