summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2014-05-13 08:40:43 +0200
committerLinus Walleij <linus.walleij@linaro.org>2014-05-13 08:40:43 +0200
commit09ffa131c773a041ecbc9ca4a8033493d69b89b9 (patch)
tree7b7713e74608ad8aeea6832890d9cf2e3a320bd6
parent29a1f2333e07bbbecb920cc78fd035fe8f53207a (diff)
parent89ca3b881987f5a4be4c5dbaa7f0df12bbdde2fd (diff)
downloadlinux-09ffa131c773a041ecbc9ca4a8033493d69b89b9.tar.bz2
Merge tag 'v3.15-rc4' into devel
Linux 3.15-rc4
-rw-r--r--Documentation/devicetree/bindings/arm/arch_timer.txt3
-rw-r--r--Documentation/devicetree/bindings/ata/apm-xgene.txt3
-rw-r--r--Documentation/devicetree/bindings/net/socfpga-dwmac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic31xx.txt6
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile2
-rw-r--r--arch/arc/kernel/entry.S8
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi4
-rw-r--r--arch/arm/boot/dts/am4372.dtsi4
-rw-r--r--arch/arm/boot/dts/stih415-pinctrl.dtsi10
-rw-r--r--arch/arm/boot/dts/stih416-pinctrl.dtsi10
-rw-r--r--arch/arm/kvm/Kconfig2
-rw-r--r--arch/arm/kvm/mmu.c15
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi3
-rw-r--r--arch/arm64/kernel/early_printk.c6
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/mm/dma-mapping.c35
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/hexagon/include/asm/barrier.h37
-rw-r--r--arch/parisc/include/uapi/asm/Kbuild3
-rw-r--r--arch/parisc/include/uapi/asm/resource.h7
-rw-r--r--arch/powerpc/boot/main.c8
-rw-r--r--arch/powerpc/boot/ops.h2
-rw-r--r--arch/powerpc/boot/ps3.c4
-rw-r--r--arch/powerpc/include/asm/opal.h42
-rw-r--r--arch/powerpc/include/uapi/asm/setup.h7
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c1
-rw-r--r--arch/powerpc/kernel/rtas_flash.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S18
-rw-r--r--arch/powerpc/mm/hash_native_64.c38
-rw-r--r--arch/powerpc/perf/hv-24x7.c35
-rw-r--r--arch/powerpc/perf/hv-gpci.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c94
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c11
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c118
-rw-r--r--arch/powerpc/platforms/powernv/opal-sysparam.c32
-rw-r--r--arch/powerpc/platforms/powernv/opal.c69
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c3
-rw-r--r--arch/powerpc/platforms/powernv/setup.c48
-rw-r--r--arch/powerpc/platforms/powernv/smp.c3
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c5
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c10
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c1
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/kernel/apic/io_apic.c5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c3
-rw-r--r--arch/x86/kernel/vsmp_64.c11
-rw-r--r--arch/x86/kvm/vmx.c53
-rw-r--r--drivers/acpi/acpi_processor.c7
-rw-r--r--drivers/acpi/ec.c21
-rw-r--r--drivers/base/dd.c17
-rw-r--r--drivers/base/platform.c7
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c2
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/clocksource/zevio-timer.c7
-rw-r--r--drivers/cpufreq/longhaul.c36
-rw-r--r--drivers/cpufreq/powernow-k6.c23
-rw-r--r--drivers/cpufreq/powernow-k7.c4
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c23
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c11
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c54
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c21
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c4
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c22
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig6
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c39
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c13
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h14
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c54
-rw-r--r--drivers/irqchip/irq-crossbar.c2
-rw-r--r--drivers/md/dm-cache-target.c1
-rw-r--r--drivers/md/dm-thin.c77
-rw-r--r--drivers/md/dm-verity.c15
-rw-r--r--drivers/of/irq.c28
-rw-r--r--drivers/of/platform.c4
-rw-r--r--drivers/of/selftest.c32
-rw-r--r--drivers/of/testcase-data/tests-interrupts.dtsi13
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c17
-rw-r--r--drivers/pinctrl/pinctrl-single.c13
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c3
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c2
-rw-r--r--drivers/pnp/pnpacpi/core.c44
-rw-r--r--drivers/pnp/quirks.c4
-rw-r--r--drivers/s390/cio/chsc.c22
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c1
-rw-r--r--drivers/scsi/virtio_scsi.c6
-rw-r--r--fs/aio.c42
-rw-r--r--include/asm-generic/fixmap.h3
-rw-r--r--include/asm-generic/word-at-a-time.h2
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/of_irq.h5
-rw-r--r--include/trace/events/module.h2
-rw-r--r--kernel/hrtimer.c22
-rw-r--r--kernel/irq/irqdesc.c7
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/softirq.c5
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c27
-rw-r--r--kernel/trace/trace_events_trigger.c2
-rw-r--r--mm/vmacache.c8
-rw-r--r--sound/pci/hda/hda_controller.c34
-rw-r--r--sound/pci/hda/hda_intel.c3
-rw-r--r--sound/pci/hda/hda_priv.h1
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/alc5623.c2
-rw-r--r--sound/soc/codecs/cs42l52.c6
-rw-r--r--sound/soc/codecs/cs42l73.c6
-rw-r--r--sound/soc/codecs/tlv320aic3x.c9
-rw-r--r--sound/soc/fsl/fsl_spdif.h4
-rw-r--r--sound/soc/intel/sst-dsp-priv.h2
-rw-r--r--sound/soc/intel/sst-haswell-ipc.c7
-rw-r--r--sound/soc/jz4740/Makefile2
-rw-r--r--sound/soc/sh/rcar/src.c4
-rw-r--r--sound/soc/sh/rcar/ssi.c4
-rw-r--r--sound/soc/soc-dapm.c1
-rw-r--r--tools/lib/api/fs/debugfs.c4
-rw-r--r--tools/lib/traceevent/event-parse.c1
-rw-r--r--tools/lib/traceevent/event-parse.h4
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c3
-rw-r--r--tools/perf/arch/x86/tests/regs_load.S8
-rw-r--r--tools/perf/config/Makefile46
-rw-r--r--tools/perf/tests/make2
-rw-r--r--tools/perf/util/machine.c16
-rw-r--r--virt/kvm/arm/vgic.c15
-rw-r--r--virt/kvm/assigned-dev.c3
-rw-r--r--virt/kvm/async_pf.c8
154 files changed, 1223 insertions, 706 deletions
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index 06fc7602593a..37b2cafa4e52 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -19,6 +19,9 @@ to deliver its interrupts via SPIs.
- clock-frequency : The frequency of the main counter, in Hz. Optional.
+- always-on : a boolean property. If present, the timer is powered through an
+ always-on power domain, therefore it never loses context.
+
Example:
timer {
diff --git a/Documentation/devicetree/bindings/ata/apm-xgene.txt b/Documentation/devicetree/bindings/ata/apm-xgene.txt
index 7bcfbf59810e..a668f0e7d001 100644
--- a/Documentation/devicetree/bindings/ata/apm-xgene.txt
+++ b/Documentation/devicetree/bindings/ata/apm-xgene.txt
@@ -24,6 +24,7 @@ Required properties:
* "sata-phy" for the SATA 6.0Gbps PHY
Optional properties:
+- dma-coherent : Present if dma operations are coherent
- status : Shall be "ok" if enabled or "disabled" if disabled.
Default is "ok".
@@ -55,6 +56,7 @@ Example:
<0x0 0x1f22e000 0x0 0x1000>,
<0x0 0x1f227000 0x0 0x1000>;
interrupts = <0x0 0x87 0x4>;
+ dma-coherent;
status = "ok";
clocks = <&sataclk 0>;
phys = <&phy2 0>;
@@ -69,6 +71,7 @@ Example:
<0x0 0x1f23e000 0x0 0x1000>,
<0x0 0x1f237000 0x0 0x1000>;
interrupts = <0x0 0x88 0x4>;
+ dma-coherent;
status = "ok";
clocks = <&sataclk 0>;
phys = <&phy3 0>;
diff --git a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
index 636f0ac4e223..2a60cd3e8d5d 100644
--- a/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/socfpga-dwmac.txt
@@ -23,5 +23,5 @@ gmac0: ethernet@ff700000 {
interrupt-names = "macirq";
mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
clocks = <&emac_0_clk>;
- clocks-names = "stmmaceth";
+ clock-names = "stmmaceth";
};
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 80c1fb8bfbb8..a2acd2b26baf 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -33,7 +33,7 @@ Optional properties:
- max-frame-size: See ethernet.txt file in the same directory
- clocks: If present, the first clock should be the GMAC main clock,
further clocks may be specified in derived bindings.
-- clocks-names: One name for each entry in the clocks property, the
+- clock-names: One name for each entry in the clocks property, the
first one should be "stmmaceth".
Examples:
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
index 4bd5be0e5e7d..26bcb18f4e60 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
@@ -83,7 +83,7 @@ Example:
reg = <0xfe61f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x5000>;
PIO0: gpio@fe610000 {
@@ -165,7 +165,7 @@ sdhci0:sdhci@fe810000{
interrupt-parent = <&PIO3>;
#interrupt-cells = <2>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */
- interrupts-names = "card-detect";
+ interrupt-names = "card-detect";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_mmc>;
};
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
index 569b26c4a81e..60ca07996458 100644
--- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
+++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
@@ -47,7 +47,7 @@ mcasp0: mcasp0@1d00000 {
reg = <0x100000 0x3000>;
reg-names "mpu";
interrupts = <82>, <83>;
- interrupts-names = "tx", "rx";
+ interrupt-names = "tx", "rx";
op-mode = <0>; /* MCASP_IIS_MODE */
tdm-slots = <2>;
serial-dir = <
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
index 74c66dee3e14..eff12be5e789 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
@@ -13,6 +13,9 @@ Required properties:
"ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
- reg - <int> - I2C slave address
+- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
+ DVDD-supply : power supplies for the device as covered in
+ Documentation/devicetree/bindings/regulator/regulator.txt
Optional properties:
@@ -24,9 +27,6 @@ Optional properties:
3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
If this node is not mentioned or if the value is unknown, then
micbias is set to 2.0V.
-- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
- DVDD-supply : power supplies for the device as covered in
- Documentation/devicetree/bindings/regulator/regulator.txt
CODEC output pins:
* HPL
diff --git a/MAINTAINERS b/MAINTAINERS
index 491a39d28503..d8862392bd89 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3485,6 +3485,12 @@ S: Maintained
F: drivers/extcon/
F: Documentation/extcon/
+EXYNOS DP DRIVER
+M: Jingoo Han <jg1.han@samsung.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/exynos/exynos_dp*
+
EXYNOS MIPI DISPLAY DRIVERS
M: Inki Dae <inki.dae@samsung.com>
M: Donghwa Lee <dh09.lee@samsung.com>
@@ -3550,7 +3556,7 @@ F: include/scsi/libfcoe.h
F: include/uapi/scsi/fc/
FILE LOCKING (flock() and fcntl()/lockf())
-M: Jeff Layton <jlayton@redhat.com>
+M: Jeff Layton <jlayton@poochiereds.net>
M: J. Bruce Fields <bfields@fieldses.org>
L: linux-fsdevel@vger.kernel.org
S: Maintained
@@ -5108,14 +5114,19 @@ F: drivers/s390/kvm/
KERNEL VIRTUAL MACHINE (KVM) FOR ARM
M: Christoffer Dall <christoffer.dall@linaro.org>
+M: Marc Zyngier <marc.zyngier@arm.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
W: http://systems.cs.columbia.edu/projects/kvm-arm
S: Supported
F: arch/arm/include/uapi/asm/kvm*
F: arch/arm/include/asm/kvm*
F: arch/arm/kvm/
+F: virt/kvm/arm/
+F: include/kvm/arm_*
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
+M: Christoffer Dall <christoffer.dall@linaro.org>
M: Marc Zyngier <marc.zyngier@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
diff --git a/Makefile b/Makefile
index 041c685e11ea..28a7259e0f3b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 819dd5f7eb05..29b82adbf0b4 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -614,11 +614,13 @@ resume_user_mode_begin:
resume_kernel_mode:
-#ifdef CONFIG_PREEMPT
-
- ; This is a must for preempt_schedule_irq()
+ ; Disable Interrupts from this point on
+ ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
+ ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
IRQ_DISABLE r9
+#ifdef CONFIG_PREEMPT
+
; Can't preempt if preemption disabled
GET_CURR_THR_INFO_FROM_SP r10
ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 07f283c20eb1..cb6811e5ae5a 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -802,7 +802,7 @@
<0x46000000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <80>, <81>;
- interrupts-names = "tx", "rx";
+ interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 8>,
<&edma 9>;
@@ -816,7 +816,7 @@
<0x46400000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <82>, <83>;
- interrupts-names = "tx", "rx";
+ interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 10>,
<&edma 11>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 36d523a26831..d1f8707ff1df 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -691,7 +691,7 @@
<0x46000000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <80>, <81>;
- interrupts-names = "tx", "rx";
+ interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 8>,
<&edma 9>;
@@ -705,7 +705,7 @@
<0x46400000 0x400000>;
reg-names = "mpu", "dat";
interrupts = <82>, <83>;
- interrupts-names = "tx", "rx";
+ interrupt-names = "tx", "rx";
status = "disabled";
dmas = <&edma 10>,
<&edma 11>;
diff --git a/arch/arm/boot/dts/stih415-pinctrl.dtsi b/arch/arm/boot/dts/stih415-pinctrl.dtsi
index f09fb10a3791..81df870e5ee6 100644
--- a/arch/arm/boot/dts/stih415-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih415-pinctrl.dtsi
@@ -49,7 +49,7 @@
reg = <0xfe61f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x5000>;
PIO0: gpio@fe610000 {
@@ -187,7 +187,7 @@
reg = <0xfee0f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfee00000 0x8000>;
PIO5: gpio@fee00000 {
@@ -282,7 +282,7 @@
reg = <0xfe82f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfe820000 0x8000>;
PIO13: gpio@fe820000 {
@@ -423,7 +423,7 @@
reg = <0xfd6bf080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfd6b0000 0x3000>;
PIO100: gpio@fd6b0000 {
@@ -460,7 +460,7 @@
reg = <0xfd33f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfd330000 0x5000>;
PIO103: gpio@fd330000 {
diff --git a/arch/arm/boot/dts/stih416-pinctrl.dtsi b/arch/arm/boot/dts/stih416-pinctrl.dtsi
index aeea304086eb..250d5ecc951e 100644
--- a/arch/arm/boot/dts/stih416-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih416-pinctrl.dtsi
@@ -53,7 +53,7 @@
reg = <0xfe61f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfe610000 0x6000>;
PIO0: gpio@fe610000 {
@@ -201,7 +201,7 @@
reg = <0xfee0f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfee00000 0x10000>;
PIO5: gpio@fee00000 {
@@ -333,7 +333,7 @@
reg = <0xfe82f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfe820000 0x6000>;
PIO13: gpio@fe820000 {
@@ -461,7 +461,7 @@
reg = <0xfd6bf080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfd6b0000 0x3000>;
PIO100: gpio@fd6b0000 {
@@ -498,7 +498,7 @@
reg = <0xfd33f080 0x4>;
reg-names = "irqmux";
interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
- interrupts-names = "irqmux";
+ interrupt-names = "irqmux";
ranges = <0 0xfd330000 0x5000>;
PIO103: gpio@fd330000 {
diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig
index 466bd299b1a8..4be5bb150bdd 100644
--- a/arch/arm/kvm/Kconfig
+++ b/arch/arm/kvm/Kconfig
@@ -23,7 +23,7 @@ config KVM
select HAVE_KVM_CPU_RELAX_INTERCEPT
select KVM_MMIO
select KVM_ARM_HOST
- depends on ARM_VIRT_EXT && ARM_LPAE
+ depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
---help---
Support hosting virtualized guest machines. You will also
need to select one or more of the processor modules below.
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 80bb1e6c2c29..16f804938b8f 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start;
static unsigned long hyp_idmap_end;
static phys_addr_t hyp_idmap_vector;
+#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
+
#define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
@@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void)
if (boot_hyp_pgd) {
unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
- kfree(boot_hyp_pgd);
+ free_pages((unsigned long)boot_hyp_pgd, pgd_order);
boot_hyp_pgd = NULL;
}
if (hyp_pgd)
unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
- kfree(init_bounce_page);
+ free_page((unsigned long)init_bounce_page);
init_bounce_page = NULL;
mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -330,7 +332,7 @@ void free_hyp_pgds(void)
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
- kfree(hyp_pgd);
+ free_pages((unsigned long)hyp_pgd, pgd_order);
hyp_pgd = NULL;
}
@@ -1024,7 +1026,7 @@ int kvm_mmu_init(void)
size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
phys_addr_t phys_base;
- init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
if (!init_bounce_page) {
kvm_err("Couldn't allocate HYP init bounce page\n");
err = -ENOMEM;
@@ -1050,8 +1052,9 @@ int kvm_mmu_init(void)
(unsigned long)phys_base);
}
- hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
- boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+ hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+ boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+
if (!hyp_pgd || !boot_hyp_pgd) {
kvm_err("Hyp mode PGD not allocated\n");
err = -ENOMEM;
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index 93f4b2dd9248..f8c40a66e65d 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -307,6 +307,7 @@
<0x0 0x1f21e000 0x0 0x1000>,
<0x0 0x1f217000 0x0 0x1000>;
interrupts = <0x0 0x86 0x4>;
+ dma-coherent;
status = "disabled";
clocks = <&sata01clk 0>;
phys = <&phy1 0>;
@@ -321,6 +322,7 @@
<0x0 0x1f22e000 0x0 0x1000>,
<0x0 0x1f227000 0x0 0x1000>;
interrupts = <0x0 0x87 0x4>;
+ dma-coherent;
status = "ok";
clocks = <&sata23clk 0>;
phys = <&phy2 0>;
@@ -334,6 +336,7 @@
<0x0 0x1f23d000 0x0 0x1000>,
<0x0 0x1f23e000 0x0 0x1000>;
interrupts = <0x0 0x88 0x4>;
+ dma-coherent;
status = "ok";
clocks = <&sata45clk 0>;
phys = <&phy3 0>;
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
index ffbbdde7aba1..2dc36d00addf 100644
--- a/arch/arm64/kernel/early_printk.c
+++ b/arch/arm64/kernel/early_printk.c
@@ -143,10 +143,8 @@ static int __init setup_early_printk(char *buf)
}
/* no options parsing yet */
- if (paddr) {
- set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr);
- early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
- }
+ if (paddr)
+ early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr);
printch = match->printch;
early_console = &early_console_dev;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 93e7df8968fe..7ec784653b29 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -396,7 +396,7 @@ static int __init arm64_device_init(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
-arch_initcall(arm64_device_init);
+arch_initcall_sync(arm64_device_init);
static DEFINE_PER_CPU(struct cpu, cpu_data);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 0ba347e59f06..c851eb44dc50 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -22,8 +22,11 @@
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
+#include <linux/amba/bus.h>
#include <asm/cacheflush.h>
@@ -305,17 +308,45 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
};
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
+static int dma_bus_notifier(struct notifier_block *nb,
+ unsigned long event, void *_dev)
+{
+ struct device *dev = _dev;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ if (of_property_read_bool(dev->of_node, "dma-coherent"))
+ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block platform_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
+static struct notifier_block amba_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
extern int swiotlb_late_init_with_default_size(size_t default_size);
static int __init swiotlb_late_init(void)
{
size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
- dma_ops = &coherent_swiotlb_dma_ops;
+ /*
+ * These must be registered before of_platform_populate().
+ */
+ bus_register_notifier(&platform_bus_type, &platform_bus_nb);
+ bus_register_notifier(&amba_bustype, &amba_bus_nb);
+
+ dma_ops = &noncoherent_swiotlb_dma_ops;
return swiotlb_late_init_with_default_size(swiotlb_size);
}
-subsys_initcall(swiotlb_late_init);
+arch_initcall(swiotlb_late_init);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6b7e89569a3a..0a472c41a67f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -374,6 +374,9 @@ int kern_addr_valid(unsigned long addr)
if (pmd_none(*pmd))
return 0;
+ if (pmd_sect(*pmd))
+ return pfn_valid(pmd_pfn(*pmd));
+
pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte))
return 0;
diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h
deleted file mode 100644
index 4e863daea25b..000000000000
--- a/arch/hexagon/include/asm/barrier.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Memory barrier definitions for the Hexagon architecture
- *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#ifndef _ASM_BARRIER_H
-#define _ASM_BARRIER_H
-
-#define rmb() barrier()
-#define read_barrier_depends() barrier()
-#define wmb() barrier()
-#define mb() barrier()
-#define smp_rmb() barrier()
-#define smp_read_barrier_depends() barrier()
-#define smp_wmb() barrier()
-#define smp_mb() barrier()
-
-/* Set a value and use a memory barrier. Used by the scheduler somewhere. */
-#define set_mb(var, value) \
- do { var = value; mb(); } while (0)
-
-#endif /* _ASM_BARRIER_H */
diff --git a/arch/parisc/include/uapi/asm/Kbuild b/arch/parisc/include/uapi/asm/Kbuild
index a580642555b6..348356c99514 100644
--- a/arch/parisc/include/uapi/asm/Kbuild
+++ b/arch/parisc/include/uapi/asm/Kbuild
@@ -1,6 +1,8 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
+generic-y += resource.h
+
header-y += bitsperlong.h
header-y += byteorder.h
header-y += errno.h
@@ -13,7 +15,6 @@ header-y += msgbuf.h
header-y += pdc.h
header-y += posix_types.h
header-y += ptrace.h
-header-y += resource.h
header-y += sembuf.h
header-y += setup.h
header-y += shmbuf.h
diff --git a/arch/parisc/include/uapi/asm/resource.h b/arch/parisc/include/uapi/asm/resource.h
deleted file mode 100644
index 8b06343b62ed..000000000000
--- a/arch/parisc/include/uapi/asm/resource.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _ASM_PARISC_RESOURCE_H
-#define _ASM_PARISC_RESOURCE_H
-
-#define _STK_LIM_MAX 10 * _STK_LIM
-#include <asm-generic/resource.h>
-
-#endif
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index a28f02165e97..d367a0aece2a 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -139,18 +139,18 @@ static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen,
* edit the command line passed to vmlinux (by setting /chosen/bootargs).
* The buffer is put in it's own section so that tools may locate it easier.
*/
-static char cmdline[COMMAND_LINE_SIZE]
+static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline")));
static void prep_cmdline(void *chosen)
{
if (cmdline[0] == '\0')
- getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+ getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
printf("\n\rLinux/PowerPC load: %s", cmdline);
/* If possible, edit the command line */
if (console_ops.edit_cmdline)
- console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE);
+ console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE);
printf("\n\r");
/* Put the command line back into the devtree for the kernel */
@@ -174,7 +174,7 @@ void start(void)
* built-in command line wasn't set by an external tool */
if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0'))
memmove(cmdline, loader_info.cmdline,
- min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1));
+ min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1));
if (console_ops.open && (console_ops.open() < 0))
exit();
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index b3218ce451bb..8aad3c55aeda 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -15,7 +15,7 @@
#include "types.h"
#include "string.h"
-#define COMMAND_LINE_SIZE 512
+#define BOOT_COMMAND_LINE_SIZE 2048
#define MAX_PATH_LEN 256
#define MAX_PROP_LEN 256 /* What should this be? */
diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c
index 9954d98871d0..4ec2d86d3c50 100644
--- a/arch/powerpc/boot/ps3.c
+++ b/arch/powerpc/boot/ps3.c
@@ -47,13 +47,13 @@ BSS_STACK(4096);
* The buffer is put in it's own section so that tools may locate it easier.
*/
-static char cmdline[COMMAND_LINE_SIZE]
+static char cmdline[BOOT_COMMAND_LINE_SIZE]
__attribute__((__section__("__builtin_cmdline")));
static void prep_cmdline(void *chosen)
{
if (cmdline[0] == '\0')
- getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+ getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
else
setprop_str(chosen, "bootargs", cmdline);
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index a2efdaa020b0..66ad7a74116f 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -41,14 +41,14 @@ struct opal_takeover_args {
* size except the last one in the list to be as well.
*/
struct opal_sg_entry {
- void *data;
- long length;
+ __be64 data;
+ __be64 length;
};
-/* sg list */
+/* SG list */
struct opal_sg_list {
- unsigned long num_entries;
- struct opal_sg_list *next;
+ __be64 length;
+ __be64 next;
struct opal_sg_entry entry[];
};
@@ -858,8 +858,8 @@ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, __be32 *data, uint32_t sz);
-int64_t opal_read_elog(uint64_t buffer, size_t size, uint64_t log_id);
-int64_t opal_get_elog_size(uint64_t *log_id, size_t *size, uint64_t *elog_type);
+int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
+int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
int64_t opal_send_ack_elog(uint64_t log_id);
void opal_resend_pending_logs(void);
@@ -868,23 +868,24 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
int64_t opal_manage_flash(uint8_t op);
int64_t opal_update_flash(uint64_t blk_list);
int64_t opal_dump_init(uint8_t dump_type);
-int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size);
-int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type);
+int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
+int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
int64_t opal_dump_ack(uint32_t dump_id);
int64_t opal_dump_resend_notification(void);
-int64_t opal_get_msg(uint64_t buffer, size_t size);
-int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
+int64_t opal_get_msg(uint64_t buffer, uint64_t size);
+int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
int64_t opal_sync_host_reboot(void);
int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
- size_t length);
+ uint64_t length);
int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
- size_t length);
+ uint64_t length);
int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
/* Internal functions */
-extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
+extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
+ int depth, void *data);
extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data);
@@ -893,10 +894,6 @@ extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
extern void hvc_opal_init_early(void);
-/* Internal functions */
-extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
- int depth, void *data);
-
extern int opal_notifier_register(struct notifier_block *nb);
extern int opal_notifier_unregister(struct notifier_block *nb);
@@ -906,9 +903,6 @@ extern void opal_notifier_enable(void);
extern void opal_notifier_disable(void);
extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
-extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
-extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
-
extern int __opal_async_get_token(void);
extern int opal_async_get_token_interruptible(void);
extern int __opal_async_release_token(int token);
@@ -916,8 +910,6 @@ extern int opal_async_release_token(int token);
extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
-extern void hvc_opal_init_early(void);
-
struct rtc_time;
extern int opal_set_rtc_time(struct rtc_time *tm);
extern void opal_get_rtc_time(struct rtc_time *tm);
@@ -937,6 +929,10 @@ extern int opal_resync_timebase(void);
extern void opal_lpc_init(void);
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+ unsigned long vmalloc_size);
+void opal_free_sg_list(struct opal_sg_list *sg);
+
#endif /* __ASSEMBLY__ */
#endif /* __OPAL_H */
diff --git a/arch/powerpc/include/uapi/asm/setup.h b/arch/powerpc/include/uapi/asm/setup.h
index 552df83f1a49..ae3fb68cb28e 100644
--- a/arch/powerpc/include/uapi/asm/setup.h
+++ b/arch/powerpc/include/uapi/asm/setup.h
@@ -1 +1,6 @@
-#include <asm-generic/setup.h>
+#ifndef _UAPI_ASM_POWERPC_SETUP_H
+#define _UAPI_ASM_POWERPC_SETUP_H
+
+#define COMMAND_LINE_SIZE 2048
+
+#endif /* _UAPI_ASM_POWERPC_SETUP_H */
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 3bd77edd7610..450850a49dce 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -120,6 +120,7 @@ EXPORT_SYMBOL(giveup_spe);
EXPORT_SYMBOL(flush_instruction_cache);
#endif
EXPORT_SYMBOL(flush_dcache_range);
+EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_SMP
#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 2f3cdb01506d..658e89d2025b 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -705,7 +705,7 @@ static int __init rtas_flash_init(void)
if (rtas_token("ibm,update-flash-64-and-reboot") ==
RTAS_UNKNOWN_SERVICE) {
pr_info("rtas_flash: no firmware flash support\n");
- return 1;
+ return -EINVAL;
}
rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index ffbb871c2bd8..b031f932c0cc 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -242,6 +242,12 @@ kvm_novcpu_exit:
*/
.globl kvm_start_guest
kvm_start_guest:
+
+ /* Set runlatch bit the minute you wake up from nap */
+ mfspr r1, SPRN_CTRLF
+ ori r1, r1, 1
+ mtspr SPRN_CTRLT, r1
+
ld r2,PACATOC(r13)
li r0,KVM_HWTHREAD_IN_KVM
@@ -309,6 +315,11 @@ kvm_no_guest:
li r0, KVM_HWTHREAD_IN_NAP
stb r0, HSTATE_HWTHREAD_STATE(r13)
kvm_do_nap:
+ /* Clear the runlatch bit before napping */
+ mfspr r2, SPRN_CTRLF
+ clrrdi r2, r2, 1
+ mtspr SPRN_CTRLT, r2
+
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@@ -1999,8 +2010,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
/*
* Take a nap until a decrementer or external or doobell interrupt
- * occurs, with PECE1, PECE0 and PECEDP set in LPCR
+ * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
+ * runlatch bit before napping.
*/
+ mfspr r2, SPRN_CTRLF
+ clrrdi r2, r2, 1
+ mtspr SPRN_CTRLT, r2
+
li r0,1
stb r0,HSTATE_HWTHREAD_REQ(r13)
mfspr r5,SPRN_LPCR
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 3ea26c25590b..cf1d325eae8b 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -82,17 +82,14 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
- /* Add AVAL part */
- if (psize != apsize) {
- /*
- * MPSS, 64K base page size and 16MB parge page size
- * We don't need all the bits, but rest of the bits
- * must be ignored by the processor.
- * vpn cover upto 65 bits of va. (0...65) and we need
- * 58..64 bits of va.
- */
- va |= (vpn & 0xfe);
- }
+ /*
+ * AVAL bits:
+ * We don't need all the bits, but rest of the bits
+ * must be ignored by the processor.
+ * vpn cover upto 65 bits of va. (0...65) and we need
+ * 58..64 bits of va.
+ */
+ va |= (vpn & 0xfe); /* AVAL */
va |= 1; /* L */
asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@@ -133,17 +130,14 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
- /* Add AVAL part */
- if (psize != apsize) {
- /*
- * MPSS, 64K base page size and 16MB parge page size
- * We don't need all the bits, but rest of the bits
- * must be ignored by the processor.
- * vpn cover upto 65 bits of va. (0...65) and we need
- * 58..64 bits of va.
- */
- va |= (vpn & 0xfe);
- }
+ /*
+ * AVAL bits:
+ * We don't need all the bits, but rest of the bits
+ * must be ignored by the processor.
+ * vpn cover upto 65 bits of va. (0...65) and we need
+ * 58..64 bits of va.
+ */
+ va |= (vpn & 0xfe);
va |= 1; /* L */
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
: : "r"(va) : "memory");
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 297c91051413..e0766b82e165 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -155,16 +155,28 @@ static ssize_t read_offset_data(void *dest, size_t dest_len,
return copy_len;
}
-static unsigned long h_get_24x7_catalog_page(char page[static 4096],
- u32 version, u32 index)
+static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
+ unsigned long version,
+ unsigned long index)
{
- WARN_ON(!IS_ALIGNED((unsigned long)page, 4096));
+ pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
+ phys_4096,
+ version,
+ index);
+ WARN_ON(!IS_ALIGNED(phys_4096, 4096));
return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
- virt_to_phys(page),
+ phys_4096,
version,
index);
}
+static unsigned long h_get_24x7_catalog_page(char page[],
+ u64 version, u32 index)
+{
+ return h_get_24x7_catalog_page_(virt_to_phys(page),
+ version, index);
+}
+
static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
@@ -173,7 +185,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
ssize_t ret = 0;
size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
loff_t page_offset = 0;
- uint32_t catalog_version_num = 0;
+ uint64_t catalog_version_num = 0;
void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
struct hv_24x7_catalog_page_0 *page_0 = page;
if (!page)
@@ -185,7 +197,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
goto e_free;
}
- catalog_version_num = be32_to_cpu(page_0->version);
+ catalog_version_num = be64_to_cpu(page_0->version);
catalog_page_len = be32_to_cpu(page_0->length);
catalog_len = catalog_page_len * 4096;
@@ -208,8 +220,9 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
page, 4096, page_offset * 4096);
e_free:
if (hret)
- pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n",
- catalog_version_num, page_offset, hret);
+ pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
+ " rc=%ld\n",
+ catalog_version_num, page_offset, hret);
kfree(page);
pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
@@ -243,7 +256,7 @@ e_free: \
static DEVICE_ATTR_RO(_name)
PAGE_0_ATTR(catalog_version, "%lld\n",
- (unsigned long long)be32_to_cpu(page_0->version));
+ (unsigned long long)be64_to_cpu(page_0->version));
PAGE_0_ATTR(catalog_len, "%lld\n",
(unsigned long long)be32_to_cpu(page_0->length) * 4096);
static BIN_ATTR_RO(catalog, 0/* real length varies */);
@@ -485,13 +498,13 @@ static int hv_24x7_init(void)
struct hv_perf_caps caps;
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
- pr_info("not a virtualized system, not enabling\n");
+ pr_debug("not a virtualized system, not enabling\n");
return -ENODEV;
}
hret = hv_perf_caps_get(&caps);
if (hret) {
- pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
+ pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
hret);
return -ENODEV;
}
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 278ba7b9c2b5..c9d399a2df82 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -78,7 +78,7 @@ static ssize_t kernel_version_show(struct device *dev,
return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
}
-DEVICE_ATTR_RO(kernel_version);
+static DEVICE_ATTR_RO(kernel_version);
HV_CAPS_ATTR(version, "0x%x\n");
HV_CAPS_ATTR(ga, "%d\n");
HV_CAPS_ATTR(expanded, "%d\n");
@@ -273,13 +273,13 @@ static int hv_gpci_init(void)
struct hv_perf_caps caps;
if (!firmware_has_feature(FW_FEATURE_LPAR)) {
- pr_info("not a virtualized system, not enabling\n");
+ pr_debug("not a virtualized system, not enabling\n");
return -ENODEV;
}
hret = hv_perf_caps_get(&caps);
if (hret) {
- pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
+ pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
hret);
return -ENODEV;
}
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index b9827b0d87e4..788a1977b9a5 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -209,89 +209,20 @@ static struct kobj_type dump_ktype = {
.default_attrs = dump_default_attrs,
};
-static void free_dump_sg_list(struct opal_sg_list *list)
-{
- struct opal_sg_list *sg1;
- while (list) {
- sg1 = list->next;
- kfree(list);
- list = sg1;
- }
- list = NULL;
-}
-
-static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
-{
- struct opal_sg_list *sg1, *list = NULL;
- void *addr;
- int64_t size;
-
- addr = dump->buffer;
- size = dump->size;
-
- sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!sg1)
- goto nomem;
-
- list = sg1;
- sg1->num_entries = 0;
- while (size > 0) {
- /* Translate virtual address to physical address */
- sg1->entry[sg1->num_entries].data =
- (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
-
- if (size > PAGE_SIZE)
- sg1->entry[sg1->num_entries].length = PAGE_SIZE;
- else
- sg1->entry[sg1->num_entries].length = size;
-
- sg1->num_entries++;
- if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
- sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!sg1->next)
- goto nomem;
-
- sg1 = sg1->next;
- sg1->num_entries = 0;
- }
- addr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- return list;
-
-nomem:
- pr_err("%s : Failed to allocate memory\n", __func__);
- free_dump_sg_list(list);
- return NULL;
-}
-
-static void sglist_to_phy_addr(struct opal_sg_list *list)
-{
- struct opal_sg_list *sg, *next;
-
- for (sg = list; sg; sg = next) {
- next = sg->next;
- /* Don't translate NULL pointer for last entry */
- if (sg->next)
- sg->next = (struct opal_sg_list *)__pa(sg->next);
- else
- sg->next = NULL;
-
- /* Convert num_entries to length */
- sg->num_entries =
- sg->num_entries * sizeof(struct opal_sg_entry) + 16;
- }
-}
-
-static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
+static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
{
+ __be32 id, size, type;
int rc;
- *type = 0xffffffff;
- rc = opal_dump_info2(id, size, type);
+ type = cpu_to_be32(0xffffffff);
+ rc = opal_dump_info2(&id, &size, &type);
if (rc == OPAL_PARAMETER)
- rc = opal_dump_info(id, size);
+ rc = opal_dump_info(&id, &size);
+
+ *dump_id = be32_to_cpu(id);
+ *dump_size = be32_to_cpu(size);
+ *dump_type = be32_to_cpu(type);
if (rc)
pr_warn("%s: Failed to get dump info (%d)\n",
@@ -314,15 +245,12 @@ static int64_t dump_read_data(struct dump_obj *dump)
}
/* Generate SG list */
- list = dump_data_to_sglist(dump);
+ list = opal_vmalloc_to_sg_list(dump->buffer, dump->size);
if (!list) {
rc = -ENOMEM;
goto out;
}
- /* Translate sg list addr to real address */
- sglist_to_phy_addr(list);
-
/* First entry address */
addr = __pa(list);
@@ -341,7 +269,7 @@ static int64_t dump_read_data(struct dump_obj *dump)
__func__, dump->id);
/* Free SG list */
- free_dump_sg_list(list);
+ opal_free_sg_list(list);
out:
return rc;
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index ef7bc2a97862..10268c41d830 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -238,18 +238,25 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
static void elog_work_fn(struct work_struct *work)
{
- size_t elog_size;
+ __be64 size;
+ __be64 id;
+ __be64 type;
+ uint64_t elog_size;
uint64_t log_id;
uint64_t elog_type;
int rc;
char name[2+16+1];
- rc = opal_get_elog_size(&log_id, &elog_size, &elog_type);
+ rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) {
pr_err("ELOG: Opal log read failed\n");
return;
}
+ elog_size = be64_to_cpu(size);
+ log_id = be64_to_cpu(id);
+ elog_type = be64_to_cpu(type);
+
BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index 714ef972406b..dc487ff04704 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -79,9 +79,6 @@
/* XXX: Assume candidate image size is <= 1GB */
#define MAX_IMAGE_SIZE 0x40000000
-/* Flash sg list version */
-#define SG_LIST_VERSION (1UL)
-
/* Image status */
enum {
IMAGE_INVALID,
@@ -131,11 +128,15 @@ static DEFINE_MUTEX(image_data_mutex);
*/
static inline void opal_flash_validate(void)
{
- struct validate_flash_t *args_buf = &validate_flash_data;
+ long ret;
+ void *buf = validate_flash_data.buf;
+ __be32 size, result;
- args_buf->status = opal_validate_flash(__pa(args_buf->buf),
- &(args_buf->buf_size),
- &(args_buf->result));
+ ret = opal_validate_flash(__pa(buf), &size, &result);
+
+ validate_flash_data.status = ret;
+ validate_flash_data.buf_size = be32_to_cpu(size);
+ validate_flash_data.result = be32_to_cpu(result);
}
/*
@@ -268,93 +269,11 @@ static ssize_t manage_store(struct kobject *kobj,
}
/*
- * Free sg list
- */
-static void free_sg_list(struct opal_sg_list *list)
-{
- struct opal_sg_list *sg1;
- while (list) {
- sg1 = list->next;
- kfree(list);
- list = sg1;
- }
- list = NULL;
-}
-
-/*
- * Build candidate image scatter gather list
- *
- * list format:
- * -----------------------------------
- * | VER (8) | Entry length in bytes |
- * -----------------------------------
- * | Pointer to next entry |
- * -----------------------------------
- * | Address of memory area 1 |
- * -----------------------------------
- * | Length of memory area 1 |
- * -----------------------------------
- * | ......... |
- * -----------------------------------
- * | ......... |
- * -----------------------------------
- * | Address of memory area N |
- * -----------------------------------
- * | Length of memory area N |
- * -----------------------------------
- */
-static struct opal_sg_list *image_data_to_sglist(void)
-{
- struct opal_sg_list *sg1, *list = NULL;
- void *addr;
- int size;
-
- addr = image_data.data;
- size = image_data.size;
-
- sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!sg1)
- return NULL;
-
- list = sg1;
- sg1->num_entries = 0;
- while (size > 0) {
- /* Translate virtual address to physical address */
- sg1->entry[sg1->num_entries].data =
- (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
-
- if (size > PAGE_SIZE)
- sg1->entry[sg1->num_entries].length = PAGE_SIZE;
- else
- sg1->entry[sg1->num_entries].length = size;
-
- sg1->num_entries++;
- if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
- sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!sg1->next) {
- pr_err("%s : Failed to allocate memory\n",
- __func__);
- goto nomem;
- }
-
- sg1 = sg1->next;
- sg1->num_entries = 0;
- }
- addr += PAGE_SIZE;
- size -= PAGE_SIZE;
- }
- return list;
-nomem:
- free_sg_list(list);
- return NULL;
-}
-
-/*
* OPAL update flash
*/
static int opal_flash_update(int op)
{
- struct opal_sg_list *sg, *list, *next;
+ struct opal_sg_list *list;
unsigned long addr;
int64_t rc = OPAL_PARAMETER;
@@ -364,30 +283,13 @@ static int opal_flash_update(int op)
goto flash;
}
- list = image_data_to_sglist();
+ list = opal_vmalloc_to_sg_list(image_data.data, image_data.size);
if (!list)
goto invalid_img;
/* First entry address */
addr = __pa(list);
- /* Translate sg list address to absolute */
- for (sg = list; sg; sg = next) {
- next = sg->next;
- /* Don't translate NULL pointer for last entry */
- if (sg->next)
- sg->next = (struct opal_sg_list *)__pa(sg->next);
- else
- sg->next = NULL;
-
- /*
- * Convert num_entries to version/length format
- * to satisfy OPAL.
- */
- sg->num_entries = (SG_LIST_VERSION << 56) |
- (sg->num_entries * sizeof(struct opal_sg_entry) + 16);
- }
-
pr_alert("FLASH: Image is %u bytes\n", image_data.size);
pr_alert("FLASH: Image update requested\n");
pr_alert("FLASH: Image will be updated during system reboot\n");
diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c
index 6b614726baf2..d202f9bc3683 100644
--- a/arch/powerpc/platforms/powernv/opal-sysparam.c
+++ b/arch/powerpc/platforms/powernv/opal-sysparam.c
@@ -39,10 +39,11 @@ struct param_attr {
struct kobj_attribute kobj_attr;
};
-static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
+static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
{
struct opal_msg msg;
- int ret, token;
+ ssize_t ret;
+ int token;
token = opal_async_get_token_interruptible();
if (token < 0) {
@@ -59,7 +60,7 @@ static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
ret = opal_async_wait_response(token, &msg);
if (ret) {
- pr_err("%s: Failed to wait for the async response, %d\n",
+ pr_err("%s: Failed to wait for the async response, %zd\n",
__func__, ret);
goto out_token;
}
@@ -111,7 +112,7 @@ static ssize_t sys_param_show(struct kobject *kobj,
{
struct param_attr *attr = container_of(kobj_attr, struct param_attr,
kobj_attr);
- int ret;
+ ssize_t ret;
mutex_lock(&opal_sysparam_mutex);
ret = opal_get_sys_param(attr->param_id, attr->param_size,
@@ -121,9 +122,10 @@ static ssize_t sys_param_show(struct kobject *kobj,
memcpy(buf, param_data_buf, attr->param_size);
+ ret = attr->param_size;
out:
mutex_unlock(&opal_sysparam_mutex);
- return ret ? ret : attr->param_size;
+ return ret;
}
static ssize_t sys_param_store(struct kobject *kobj,
@@ -131,14 +133,20 @@ static ssize_t sys_param_store(struct kobject *kobj,
{
struct param_attr *attr = container_of(kobj_attr, struct param_attr,
kobj_attr);
- int ret;
+ ssize_t ret;
+
+ /* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */
+ if (count > MAX_PARAM_DATA_LEN)
+ count = MAX_PARAM_DATA_LEN;
mutex_lock(&opal_sysparam_mutex);
memcpy(param_data_buf, buf, count);
ret = opal_set_sys_param(attr->param_id, attr->param_size,
param_data_buf);
mutex_unlock(&opal_sysparam_mutex);
- return ret ? ret : count;
+ if (!ret)
+ ret = count;
+ return ret;
}
void __init opal_sys_param_init(void)
@@ -214,13 +222,13 @@ void __init opal_sys_param_init(void)
}
if (of_property_read_u32_array(sysparam, "param-len", size, count)) {
- pr_err("SYSPARAM: Missing propery param-len in the DT\n");
+ pr_err("SYSPARAM: Missing property param-len in the DT\n");
goto out_free_perm;
}
if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) {
- pr_err("SYSPARAM: Missing propery param-perm in the DT\n");
+ pr_err("SYSPARAM: Missing property param-perm in the DT\n");
goto out_free_perm;
}
@@ -233,6 +241,12 @@ void __init opal_sys_param_init(void)
/* For each of the parameters, populate the parameter attributes */
for (i = 0; i < count; i++) {
+ if (size[i] > MAX_PARAM_DATA_LEN) {
+ pr_warn("SYSPARAM: Not creating parameter %d as size "
+ "exceeds buffer length\n", i);
+ continue;
+ }
+
sysfs_attr_init(&attr[i].kobj_attr.attr);
attr[i].param_id = id[i];
attr[i].param_size = size[i];
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 49d2f00019e5..360ad80c754c 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -242,14 +242,14 @@ void opal_notifier_update_evt(uint64_t evt_mask,
void opal_notifier_enable(void)
{
int64_t rc;
- uint64_t evt = 0;
+ __be64 evt = 0;
atomic_set(&opal_notifier_hold, 0);
/* Process pending events */
rc = opal_poll_events(&evt);
if (rc == OPAL_SUCCESS && evt)
- opal_do_notifier(evt);
+ opal_do_notifier(be64_to_cpu(evt));
}
void opal_notifier_disable(void)
@@ -529,7 +529,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
opal_handle_interrupt(virq_to_hw(irq), &events);
- opal_do_notifier(events);
+ opal_do_notifier(be64_to_cpu(events));
return IRQ_HANDLED;
}
@@ -638,3 +638,66 @@ void opal_shutdown(void)
/* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call);
+
+/* Convert a region of vmalloc memory to an opal sg list */
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+ unsigned long vmalloc_size)
+{
+ struct opal_sg_list *sg, *first = NULL;
+ unsigned long i = 0;
+
+ sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!sg)
+ goto nomem;
+
+ first = sg;
+
+ while (vmalloc_size > 0) {
+ uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
+ uint64_t length = min(vmalloc_size, PAGE_SIZE);
+
+ sg->entry[i].data = cpu_to_be64(data);
+ sg->entry[i].length = cpu_to_be64(length);
+ i++;
+
+ if (i >= SG_ENTRIES_PER_NODE) {
+ struct opal_sg_list *next;
+
+ next = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!next)
+ goto nomem;
+
+ sg->length = cpu_to_be64(
+ i * sizeof(struct opal_sg_entry) + 16);
+ i = 0;
+ sg->next = cpu_to_be64(__pa(next));
+ sg = next;
+ }
+
+ vmalloc_addr += length;
+ vmalloc_size -= length;
+ }
+
+ sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
+
+ return first;
+
+nomem:
+ pr_err("%s : Failed to allocate memory\n", __func__);
+ opal_free_sg_list(first);
+ return NULL;
+}
+
+void opal_free_sg_list(struct opal_sg_list *sg)
+{
+ while (sg) {
+ uint64_t next = be64_to_cpu(sg->next);
+
+ kfree(sg);
+
+ if (next)
+ sg = __va(next);
+ else
+ sg = NULL;
+ }
+}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 3b2b4fb3585b..98824aa99173 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -343,7 +343,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
pci_name(dev));
continue;
}
- pci_dev_get(dev);
pdn->pcidev = dev;
pdn->pe_number = pe->pe_number;
pe->dma_weight += pnv_ioda_dma_weight(dev);
@@ -462,7 +461,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
- set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
+ set_iommu_table_base(&pdev->dev, &pe->tce32_table);
}
static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 61cf8fa9c61b..8723d32632f5 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -162,18 +162,62 @@ static void pnv_shutdown(void)
}
#ifdef CONFIG_KEXEC
+static void pnv_kexec_wait_secondaries_down(void)
+{
+ int my_cpu, i, notified = -1;
+
+ my_cpu = get_cpu();
+
+ for_each_online_cpu(i) {
+ uint8_t status;
+ int64_t rc;
+
+ if (i == my_cpu)
+ continue;
+
+ for (;;) {
+ rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
+ &status);
+ if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED)
+ break;
+ barrier();
+ if (i != notified) {
+ printk(KERN_INFO "kexec: waiting for cpu %d "
+ "(physical %d) to enter OPAL\n",
+ i, paca[i].hw_cpu_id);
+ notified = i;
+ }
+ }
+ }
+}
+
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
xics_kexec_teardown_cpu(secondary);
- /* Return secondary CPUs to firmware on OPAL v3 */
- if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) {
+ /* On OPAL v3, we return all CPUs to firmware */
+
+ if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ return;
+
+ if (secondary) {
+ /* Return secondary CPUs to firmware on OPAL v3 */
mb();
get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
mb();
/* Return the CPU to OPAL */
opal_return_cpu();
+ } else if (crash_shutdown) {
+ /*
+ * On crash, we don't wait for secondaries to go
+ * down as they might be unreachable or hung, so
+ * instead we just wait a bit and move on.
+ */
+ mdelay(1);
+ } else {
+ /* Primary waits for the secondaries to have reached OPAL */
+ pnv_kexec_wait_secondaries_down();
}
}
#endif /* CONFIG_KEXEC */
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 908672bdcea6..bf5fcd452168 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -30,6 +30,7 @@
#include <asm/cputhreads.h>
#include <asm/xics.h>
#include <asm/opal.h>
+#include <asm/runlatch.h>
#include "powernv.h"
@@ -156,7 +157,9 @@ static void pnv_smp_cpu_kill_self(void)
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) {
+ ppc64_runlatch_off();
power7_nap();
+ ppc64_runlatch_on();
if (!generic_check_cpu_restart(cpu)) {
DBG("CPU%d Unexpected exit while offline !\n", cpu);
/* We may be getting an IPI, so we re-enable
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 9b8e05078a63..20d62975856f 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -88,13 +88,14 @@ void set_default_offline_state(int cpu)
static void rtas_stop_self(void)
{
- struct rtas_args args = {
- .token = cpu_to_be32(rtas_stop_self_token),
+ static struct rtas_args args = {
.nargs = 0,
.nret = 1,
.rets = &args.args[0],
};
+ args.token = cpu_to_be32(rtas_stop_self_token);
+
local_irq_disable();
BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 573b488fc48b..7f75c94af822 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -100,10 +100,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
start_pfn = base >> PAGE_SHIFT;
- if (!pfn_valid(start_pfn)) {
- memblock_remove(base, memblock_size);
- return 0;
- }
+ lock_device_hotplug();
+
+ if (!pfn_valid(start_pfn))
+ goto out;
block_sz = memory_block_size_bytes();
sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
@@ -114,8 +114,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
base += MIN_MEMORY_BLOCK_SIZE;
}
+out:
/* Update memory regions for memory remove */
memblock_remove(base, memblock_size);
+ unlock_device_hotplug();
return 0;
}
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 64603a10b863..4914fd3f41ec 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -1058,7 +1058,7 @@ static int __init apm821xx_pciex_core_init(struct device_node *np)
return 1;
}
-static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
+static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
{
u32 val;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 9c36dc398f90..452d3ebd9d0f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -276,7 +276,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
case BPF_S_LD_W_IND:
case BPF_S_LD_H_IND:
case BPF_S_LD_B_IND:
- case BPF_S_LDX_B_MSH:
case BPF_S_LD_IMM:
case BPF_S_LD_MEM:
case BPF_S_MISC_TXA:
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index d1b7c377a234..ce6ad7e6a7d7 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -83,7 +83,9 @@ else
KBUILD_CFLAGS += -m64
# Don't autogenerate traditional x87, MMX or SSE instructions
- KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387
+ KBUILD_CFLAGS += -mno-mmx -mno-sse
+ KBUILD_CFLAGS += $(call cc-option,-mno-80387)
+ KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
# Use -mpreferred-stack-boundary=3 if supported.
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6ad4658de705..d23aa82e7a7b 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -3425,6 +3425,11 @@ int get_nr_irqs_gsi(void)
return nr_irqs_gsi;
}
+unsigned int arch_dynirq_lower_bound(unsigned int from)
+{
+ return from < nr_irqs_gsi ? nr_irqs_gsi : from;
+}
+
int __init arch_probe_nr_irqs(void)
{
int nr;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 7c87424d4140..619f7699487a 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -543,7 +543,8 @@ static int rapl_cpu_prepare(int cpu)
if (phys_id < 0)
return -1;
- if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
+ /* protect rdmsrl() to handle virtualization */
+ if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
return -1;
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index f6584a90aba3..5edc34b5b951 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -26,6 +26,9 @@
#define TOPOLOGY_REGISTER_OFFSET 0x10
+/* Flag below is initialized once during vSMP PCI initialization. */
+static int irq_routing_comply = 1;
+
#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
/*
* Interrupt control on vSMPowered systems:
@@ -101,6 +104,10 @@ static void __init set_vsmp_pv_ops(void)
#ifdef CONFIG_SMP
if (cap & ctl & BIT(8)) {
ctl &= ~BIT(8);
+
+ /* Interrupt routing set to ignore */
+ irq_routing_comply = 0;
+
#ifdef CONFIG_PROC_FS
/* Don't let users change irq affinity via procfs */
no_irq_affinity = 1;
@@ -218,7 +225,9 @@ static void vsmp_apic_post_init(void)
{
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
- apic->vector_allocation_domain = fill_vector_allocation_domain;
+
+ if (!irq_routing_comply)
+ apic->vector_allocation_domain = fill_vector_allocation_domain;
}
void __init vsmp_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1f68c5831924..33e8c028842f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -503,7 +503,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
[number##_HIGH] = VMCS12_OFFSET(name)+4
-static const unsigned long shadow_read_only_fields[] = {
+static unsigned long shadow_read_only_fields[] = {
/*
* We do NOT shadow fields that are modified when L0
* traps and emulates any vmx instruction (e.g. VMPTRLD,
@@ -526,10 +526,10 @@ static const unsigned long shadow_read_only_fields[] = {
GUEST_LINEAR_ADDRESS,
GUEST_PHYSICAL_ADDRESS
};
-static const int max_shadow_read_only_fields =
+static int max_shadow_read_only_fields =
ARRAY_SIZE(shadow_read_only_fields);
-static const unsigned long shadow_read_write_fields[] = {
+static unsigned long shadow_read_write_fields[] = {
GUEST_RIP,
GUEST_RSP,
GUEST_CR0,
@@ -558,7 +558,7 @@ static const unsigned long shadow_read_write_fields[] = {
HOST_FS_SELECTOR,
HOST_GS_SELECTOR
};
-static const int max_shadow_read_write_fields =
+static int max_shadow_read_write_fields =
ARRAY_SIZE(shadow_read_write_fields);
static const unsigned short vmcs_field_to_offset_table[] = {
@@ -3009,6 +3009,41 @@ static void free_kvm_area(void)
}
}
+static void init_vmcs_shadow_fields(void)
+{
+ int i, j;
+
+ /* No checks for read only fields yet */
+
+ for (i = j = 0; i < max_shadow_read_write_fields; i++) {
+ switch (shadow_read_write_fields[i]) {
+ case GUEST_BNDCFGS:
+ if (!vmx_mpx_supported())
+ continue;
+ break;
+ default:
+ break;
+ }
+
+ if (j < i)
+ shadow_read_write_fields[j] =
+ shadow_read_write_fields[i];
+ j++;
+ }
+ max_shadow_read_write_fields = j;
+
+ /* shadowed fields guest access without vmexit */
+ for (i = 0; i < max_shadow_read_write_fields; i++) {
+ clear_bit(shadow_read_write_fields[i],
+ vmx_vmwrite_bitmap);
+ clear_bit(shadow_read_write_fields[i],
+ vmx_vmread_bitmap);
+ }
+ for (i = 0; i < max_shadow_read_only_fields; i++)
+ clear_bit(shadow_read_only_fields[i],
+ vmx_vmread_bitmap);
+}
+
static __init int alloc_kvm_area(void)
{
int cpu;
@@ -3039,6 +3074,8 @@ static __init int hardware_setup(void)
enable_vpid = 0;
if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0;
+ if (enable_shadow_vmcs)
+ init_vmcs_shadow_fields();
if (!cpu_has_vmx_ept() ||
!cpu_has_vmx_ept_4levels()) {
@@ -8803,14 +8840,6 @@ static int __init vmx_init(void)
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
- /* shadowed read/write fields */
- for (i = 0; i < max_shadow_read_write_fields; i++) {
- clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
- clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
- }
- /* shadowed read only fields */
- for (i = 0; i < max_shadow_read_only_fields; i++)
- clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
/*
* Allow direct access to the PC debug port (it is often used for I/O
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index c29c2c3ec0ad..b06f5f55ada9 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status;
int ret;
+ if (pr->apic_id == -1)
+ return -ENODEV;
+
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
return -ENODEV;
@@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
}
apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
- if (apic_id < 0) {
+ if (apic_id < 0)
acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
- return -ENODEV;
- }
pr->apic_id = apic_id;
cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d7d32c28829b..ad11ba4a412d 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -206,13 +206,13 @@ unlock:
spin_unlock_irqrestore(&ec->lock, flags);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec);
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- return acpi_ec_sync_query(ec);
+ return acpi_ec_sync_query(ec, NULL);
}
return 0;
}
@@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void)
EXPORT_SYMBOL(ec_get_handle);
-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
-
/*
- * Clears stale _Q events that might have accumulated in the EC.
+ * Process _Q events that might have accumulated in the EC.
* Run with locked ec mutex.
*/
static void acpi_ec_clear(struct acpi_ec *ec)
@@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
u8 value = 0;
for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
- status = acpi_ec_query_unlocked(ec, &value);
+ status = acpi_ec_sync_query(ec, &value);
if (status || !value)
break;
}
@@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt)
kfree(handler);
}
-static int acpi_ec_sync_query(struct acpi_ec *ec)
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
{
u8 value = 0;
int status;
struct acpi_ec_query_handler *handler, *copy;
- if ((status = acpi_ec_query_unlocked(ec, &value)))
+
+ status = acpi_ec_query_unlocked(ec, &value);
+ if (data)
+ *data = value;
+ if (status)
return status;
+
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
/* have custom handler for this bit */
@@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
if (!ec)
return;
mutex_lock(&ec->mutex);
- acpi_ec_sync_query(ec);
+ acpi_ec_sync_query(ec, NULL);
mutex_unlock(&ec->mutex);
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 8986b9f22781..62ec61e8f84a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
+static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
/**
* deferred_probe_work_func() - Retry probing devices in the active list.
@@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
* This functions moves all devices from the pending list to the active
* list and schedules the deferred probe workqueue to process them. It
* should be called anytime a driver is successfully bound to a device.
+ *
+ * Note, there is a race condition in multi-threaded probe. In the case where
+ * more than one device is probing at the same time, it is possible for one
+ * probe to complete successfully while another is about to defer. If the second
+ * depends on the first, then it will get put on the pending list after the
+ * trigger event has already occured and will be stuck there.
+ *
+ * The atomic 'deferred_trigger_count' is used to determine if a successful
+ * trigger has occurred in the midst of probing a driver. If the trigger count
+ * changes in the midst of a probe, then deferred processing should be triggered
+ * again.
*/
static void driver_deferred_probe_trigger(void)
{
@@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
* into the active list so they can be retried by the workqueue
*/
mutex_lock(&deferred_probe_mutex);
+ atomic_inc(&deferred_trigger_count);
list_splice_tail_init(&deferred_probe_pending_list,
&deferred_probe_active_list);
mutex_unlock(&deferred_probe_mutex);
@@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
static int really_probe(struct device *dev, struct device_driver *drv)
{
int ret = 0;
+ int local_trigger_count = atomic_read(&deferred_trigger_count);
atomic_inc(&probe_count);
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
@@ -310,6 +324,9 @@ probe_failed:
/* Driver requested deferred probing */
dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
driver_deferred_probe_add(dev);
+ /* Did a trigger occur while probing? Need to re-trigger if yes */
+ if (local_trigger_count != atomic_read(&deferred_trigger_count))
+ driver_deferred_probe_trigger();
} else if (ret != -ENODEV && ret != -ENXIO) {
/* driver matched but the probe failed */
printk(KERN_WARNING
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index e714709704e4..5b47210889e0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -13,6 +13,7 @@
#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
@@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
return -ENXIO;
return dev->archdata.irqs[num];
#else
- struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+ struct resource *r;
+ if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
+ return of_irq_get(dev->dev.of_node, num);
+
+ r = platform_get_resource(dev, IORESOURCE_IRQ, num);
return r ? r->start : -ENXIO;
#endif
diff --git a/drivers/clk/versatile/clk-vexpress-osc.c b/drivers/clk/versatile/clk-vexpress-osc.c
index a535c7bf8574..422391242b39 100644
--- a/drivers/clk/versatile/clk-vexpress-osc.c
+++ b/drivers/clk/versatile/clk-vexpress-osc.c
@@ -100,6 +100,8 @@ void __init vexpress_osc_of_setup(struct device_node *node)
struct clk *clk;
u32 range[2];
+ vexpress_sysreg_of_early_init();
+
osc = kzalloc(sizeof(*osc), GFP_KERNEL);
if (!osc)
return;
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 57e823c44d2a..5163ec13429d 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
static struct clock_event_device __percpu *arch_timer_evt;
static bool arch_timer_use_virtual = true;
+static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
/*
@@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type,
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_CP15_TIMER) {
- clk->features |= CLOCK_EVT_FEAT_C3STOP;
+ if (arch_timer_c3stop)
+ clk->features |= CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->cpumask = cpumask_of(smp_processor_id());
@@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np)
}
}
+ arch_timer_c3stop = !of_property_read_bool(np, "always-on");
+
arch_timer_register();
arch_timer_common_init();
}
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/zevio-timer.c
index ca81809d159d..7ce442148c3f 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/zevio-timer.c
@@ -212,4 +212,9 @@ error_free:
return ret;
}
-CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add);
+static void __init zevio_timer_init(struct device_node *node)
+{
+ BUG_ON(zevio_timer_add(node));
+}
+
+CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index d00e5d1abd25..5c4369b5d834 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
* Sets a new clock ratio.
*/
-static void longhaul_setstate(struct cpufreq_policy *policy,
+static int longhaul_setstate(struct cpufreq_policy *policy,
unsigned int table_index)
{
unsigned int mults_index;
@@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
/* Safety precautions */
mult = mults[mults_index & 0x1f];
if (mult == -1)
- return;
+ return -EINVAL;
+
speed = calc_speed(mult);
if ((speed > highest_speed) || (speed < lowest_speed))
- return;
+ return -EINVAL;
+
/* Voltage transition before frequency transition? */
if (can_scale_voltage && longhaul_index < table_index)
dir = 1;
@@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
freqs.old = calc_speed(longhaul_get_cpu_mult());
freqs.new = speed;
- cpufreq_freq_transition_begin(policy, &freqs);
-
pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
fsb, mult/10, mult%10, print_speed(speed/1000));
retry_loop:
@@ -385,12 +385,14 @@ retry_loop:
goto retry_loop;
}
}
- /* Report true CPU frequency */
- cpufreq_freq_transition_end(policy, &freqs, 0);
- if (!bm_timeout)
+ if (!bm_timeout) {
printk(KERN_INFO PFX "Warning: Timeout while waiting for "
"idle PCI bus.\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
/*
@@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy,
unsigned int i;
unsigned int dir = 0;
u8 vid, current_vid;
+ int retval = 0;
if (!can_scale_voltage)
- longhaul_setstate(policy, table_index);
+ retval = longhaul_setstate(policy, table_index);
else {
/* On test system voltage transitions exceeding single
* step up or down were turning motherboard off. Both
@@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
while (i != table_index) {
vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
if (vid != current_vid) {
- longhaul_setstate(policy, i);
+ retval = longhaul_setstate(policy, i);
current_vid = vid;
msleep(200);
}
@@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy,
else
i--;
}
- longhaul_setstate(policy, table_index);
+ retval = longhaul_setstate(policy, table_index);
}
+
longhaul_index = table_index;
- return 0;
+ return retval;
}
@@ -968,7 +972,15 @@ static void __exit longhaul_exit(void)
for (i = 0; i < numscales; i++) {
if (mults[i] == maxmult) {
+ struct cpufreq_freqs freqs;
+
+ freqs.old = policy->cur;
+ freqs.new = longhaul_table[i].frequency;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
longhaul_setstate(policy, i);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
break;
}
}
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index 49f120e1bc7b..78904e6ca4a0 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
static int powernow_k6_target(struct cpufreq_policy *policy,
unsigned int best_i)
{
- struct cpufreq_freqs freqs;
if (clock_ratio[best_i].driver_data > max_multiplier) {
printk(KERN_ERR PFX "invalid target frequency\n");
return -EINVAL;
}
- freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
- freqs.new = busfreq * clock_ratio[best_i].driver_data;
-
- cpufreq_freq_transition_begin(policy, &freqs);
-
powernow_k6_set_cpu_multiplier(best_i);
- cpufreq_freq_transition_end(policy, &freqs, 0);
-
return 0;
}
@@ -227,9 +219,20 @@ have_busfreq:
static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
{
unsigned int i;
- for (i = 0; i < 8; i++) {
- if (i == max_multiplier)
+
+ for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (clock_ratio[i].driver_data == max_multiplier) {
+ struct cpufreq_freqs freqs;
+
+ freqs.old = policy->cur;
+ freqs.new = clock_ratio[i].frequency;
+ freqs.flags = 0;
+
+ cpufreq_freq_transition_begin(policy, &freqs);
powernow_k6_target(policy, i);
+ cpufreq_freq_transition_end(policy, &freqs, 0);
+ break;
+ }
}
return 0;
}
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index f911645c3f6d..e61e224475ad 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
freqs.new = powernow_table[index].frequency;
- cpufreq_freq_transition_begin(policy, &freqs);
-
/* Now do the magic poking into the MSRs. */
if (have_a0 == 1) /* A0 errata 5 */
@@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
if (have_a0 == 1)
local_irq_enable();
- cpufreq_freq_transition_end(policy, &freqs, 0);
-
return 0;
}
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index a1ca3dd04a8e..0af618abebaf 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
struct cpufreq_frequency_table *table;
struct cpu_data *data;
unsigned int cpu = policy->cpu;
+ u64 transition_latency_hz;
np = of_get_cpu_node(cpu, NULL);
if (!np)
@@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
for_each_cpu(i, per_cpu(cpu_mask, cpu))
per_cpu(cpu_data, i) = data;
+ transition_latency_hz = 12ULL * NSEC_PER_SEC;
policy->cpuinfo.transition_latency =
- (12ULL * NSEC_PER_SEC) / fsl_get_sys_freq();
+ do_div(transition_latency_hz, fsl_get_sys_freq());
+
of_node_put(np);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index e930d4fe29c7..1ef5ab9c9d51 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
plane->crtc = crtc;
plane->fb = crtc->primary->fb;
+ drm_framebuffer_reference(plane->fb);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index c786cd4f457b..2a3ad24276f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
buffer->sgt = sgt;
exynos_gem_obj->base.import_attach = attach;
- DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+ DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
buffer->size);
return &exynos_gem_obj->base;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index eb73e3bf2a0c..4ac438187568 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (!dsi->reg_base) {
+ if (IS_ERR(dsi->reg_base)) {
dev_err(&pdev->dev, "failed to remap io region\n");
- return -EADDRNOTAVAIL;
+ return PTR_ERR(dsi->reg_base);
}
dsi->phy = devm_phy_get(&pdev->dev, "dsim");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 7afead9c3f30..852f2dadaebd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
win_data->enabled = true;
- DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr);
+ DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
if (ctx->vblank_on)
schedule_work(&ctx->work);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ab5e93c30aa2..62a5c3627b90 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -50,7 +50,7 @@ bool intel_enable_ppgtt(struct drm_device *dev, bool full)
/* Full ppgtt disabled by default for now due to issues. */
if (full)
- return false; /* HAS_PPGTT(dev) */
+ return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
else
return HAS_ALIASING_PPGTT(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7753249b3a95..f98ba4e6e70b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
- WARN_ONCE(hpd[i] & hotplug_trigger &&
- dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
- "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
- hotplug_trigger, i, hpd[i]);
+ if (hpd[i] & hotplug_trigger &&
+ dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
+ /*
+ * On GMCH platforms the interrupt mask bits only
+ * prevent irq generation, not the setting of the
+ * hotplug bits itself. So only WARN about unexpected
+ * interrupts on saner platforms.
+ */
+ WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+ "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+ hotplug_trigger, i, hpd[i]);
+
+ continue;
+ }
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 9f5b18d9d885..c77af69c2d8f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -827,6 +827,7 @@ enum punit_power_well {
# define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
# define MODE_IDLE (1 << 9)
+# define STOP_RING (1 << 8)
#define GEN6_GT_MODE 0x20d0
#define GEN7_GT_MODE 0x7008
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dae976f51d83..69bcc42a0e44 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9654,11 +9654,22 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
- PIPE_CONF_CHECK_I(gmch_pfit.control);
- /* pfit ratios are autocomputed by the hw on gen4+ */
- if (INTEL_INFO(dev)->gen < 4)
- PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
- PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+ /*
+ * FIXME: BIOS likes to set up a cloned config with lvds+external
+ * screen. Since we don't yet re-compute the pipe config when moving
+ * just the lvds port away to another pipe the sw tracking won't match.
+ *
+ * Proper atomic modesets with recomputed global state will fix this.
+ * Until then just don't check gmch state for inherited modes.
+ */
+ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
+ PIPE_CONF_CHECK_I(gmch_pfit.control);
+ /* pfit ratios are autocomputed by the hw on gen4+ */
+ if (INTEL_INFO(dev)->gen < 4)
+ PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+ PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+ }
+
PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos);
@@ -11616,6 +11627,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
base.head) {
memset(&crtc->config, 0, sizeof(crtc->config));
+ crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+
crtc->active = dev_priv->display.get_pipe_config(crtc,
&crtc->config);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d2a55884ad52..dfa85289f28f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -3619,7 +3619,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
{
struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct intel_encoder *intel_encoder = &intel_dig_port->base;
+ struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL;
bool has_dpcd;
@@ -3629,6 +3630,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp))
return true;
+ /* The VDD bit needs a power domain reference, so if the bit is already
+ * enabled when we boot, grab this reference. */
+ if (edp_have_panel_vdd(intel_dp)) {
+ enum intel_display_power_domain power_domain;
+ power_domain = intel_display_port_power_domain(intel_encoder);
+ intel_display_power_get(dev_priv, power_domain);
+ }
+
/* Cache DPCD and EDID for edp. */
intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0542de982260..328b1a70264b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -236,7 +236,8 @@ struct intel_crtc_config {
* tracked with quirk flags so that fastboot and state checker can act
* accordingly.
*/
-#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks;
/* User requested mode, only valid as a starting point to
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index b4d44e62f0c7..fce4a0d93c0b 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex);
+ if (intel_fb &&
+ (sizes->fb_width > intel_fb->base.width ||
+ sizes->fb_height > intel_fb->base.height)) {
+ DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
+ " releasing it\n",
+ intel_fb->base.width, intel_fb->base.height,
+ sizes->fb_width, sizes->fb_height);
+ drm_framebuffer_unreference(&intel_fb->base);
+ intel_fb = ifbdev->fb = NULL;
+ }
if (!intel_fb || WARN_ON(!intel_fb->obj)) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index b0413e190625..157267aa3561 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
}
}
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- if (!hdmi->has_hdmi_sink || IS_G4X(dev))
+ if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
@@ -837,7 +837,8 @@ static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
+ if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
+ true))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
@@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
- int portclock_limit = hdmi_portclock_limit(intel_hdmi);
+ int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp;
if (intel_hdmi->color_range_auto) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6bc68bdcf433..79fb4cc2137c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
I915_WRITE(HWS_PGA, addr);
}
-static int init_ring_common(struct intel_ring_buffer *ring)
+static bool stop_ring(struct intel_ring_buffer *ring)
{
- struct drm_device *dev = ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj = ring->obj;
- int ret = 0;
- u32 head;
+ struct drm_i915_private *dev_priv = to_i915(ring->dev);
- gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+ if (!IS_GEN2(ring->dev)) {
+ I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+ if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+ DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+ return false;
+ }
+ }
- /* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
- if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
- DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
- if (I915_NEED_GFX_HWS(dev))
- intel_ring_setup_status_page(ring);
- else
- ring_setup_phys_status_page(ring);
+ if (!IS_GEN2(ring->dev)) {
+ (void)I915_READ_CTL(ring);
+ I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+ }
- head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+}
- /* G45 ring initialization fails to reset head to zero */
- if (head != 0) {
+static int init_ring_common(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
+ int ret = 0;
+
+ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+ if (!stop_ring(ring)) {
+ /* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_KMS("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_TAIL(ring),
I915_READ_START(ring));
- I915_WRITE_HEAD(ring, 0);
-
- if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ if (!stop_ring(ring)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_HEAD(ring),
I915_READ_TAIL(ring),
I915_READ_START(ring));
+ ret = -EIO;
+ goto out;
}
}
+ if (I915_NEED_GFX_HWS(dev))
+ intel_ring_setup_status_page(ring);
+ else
+ ring_setup_phys_status_page(ring);
+
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 270a6a973438..2b91c4b4d34b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -34,6 +34,7 @@ struct intel_hw_status_page {
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
+#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 3e6c0f3ed592..ef9957dbac94 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc)
MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
} else {
/* disable cursor: */
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
- MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+ mdp4_kms->blank_cursor_iova);
}
/* and drop the iova ref + obj rev when done scanning out: */
@@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
if (old_bo) {
/* drop our previous reference: */
- msm_gem_put_iova(old_bo, mdp4_kms->id);
- drm_gem_object_unreference_unlocked(old_bo);
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
}
- crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
index c740ccd1cc67..8edd531cb621 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
@@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
VERB("status=%08x", status);
+ mdp_dispatch_irqs(mdp_kms, status);
+
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp4_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
- mdp_dispatch_irqs(mdp_kms, status);
-
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 272e707c9487..0bb4faa17523 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ if (mdp4_kms->blank_cursor_iova)
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ if (mdp4_kms->blank_cursor_bo)
+ drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
kfree(mdp4_kms);
}
@@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
+ mutex_lock(&dev->struct_mutex);
+ mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
+ ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
+ dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+ mdp4_kms->blank_cursor_bo = NULL;
+ goto fail;
+ }
+
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ &mdp4_kms->blank_cursor_iova);
+ if (ret) {
+ dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+ goto fail;
+ }
+
return kms;
fail:
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 66a4d31aec80..715520c54cde 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -44,6 +44,10 @@ struct mdp4_kms {
struct clk *lut_clk;
struct mdp_irq error_handler;
+
+ /* empty/blank cursor bo to use when cursor is "disabled" */
+ struct drm_gem_object *blank_cursor_bo;
+ uint32_t blank_cursor_iova;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 353d494a497f..f2b985bc2adf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
VERB("status=%08x", status);
+ mdp_dispatch_irqs(mdp_kms, status);
+
for (id = 0; id < priv->num_crtcs; id++)
if (status & mdp5_crtc_vblank(priv->crtcs[id]))
drm_handle_vblank(dev, id);
-
- mdp_dispatch_irqs(mdp_kms, status);
}
irqreturn_t mdp5_irq(struct msm_kms *kms)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 6c6d7d4c9b4e..a752ab83b810 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
dma_addr_t paddr;
int ret, size;
- /* only doing ARGB32 since this is what is needed to alpha-blend
- * with video overlays:
- */
sizes->surface_bpp = 32;
- sizes->surface_depth = 32;
+ sizes->surface_depth = 24;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 3da8264d3039..bb8026daebc9 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj)
if (iommu_present(&platform_bus_type))
drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else
+ else {
drm_mm_remove_node(msm_obj->vram_node);
+ drm_free_large(msm_obj->pages);
+ }
msm_obj->pages = NULL;
}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 36c717af6cf9..edb871d7d395 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
struct drm_device *drm = crtc->dev;
struct drm_plane *plane;
- list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
+ drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
if (plane->crtc == crtc) {
tegra_plane_disable(plane);
plane->crtc = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 931490b9cfed..87df0b3674fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
+ SVGA3dCmdSurfaceDMASuffix *suffix;
+ uint32_t bo_size;
cmd = container_of(header, struct vmw_dma_cmd, header);
+ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+ header->size - sizeof(*suffix));
+
+ /* Make sure device and verifier stays in sync. */
+ if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
+ DRM_ERROR("Invalid DMA suffix size.\n");
+ return -EINVAL;
+ }
+
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
&cmd->dma.guest.ptr,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
+ /* Make sure DMA doesn't cross BO boundaries. */
+ bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+ if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
+ DRM_ERROR("Invalid DMA offset.\n");
+ return -EINVAL;
+ }
+
+ bo_size -= cmd->dma.guest.ptr.offset;
+ if (unlikely(suffix->maximumOffset > bo_size))
+ suffix->maximumOffset = bo_size;
+
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->dma.host.sid,
NULL);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 6d02e3b06375..d76f0b70c6e0 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
if (cpu_has_tjmax(c))
dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
} else {
- val = (eax >> 16) & 0x7f;
+ val = (eax >> 16) & 0xff;
/*
* If the TjMax is not plausible, an assumption
* will be used
*/
- if (val >= 85) {
+ if (val) {
dev_dbg(dev, "TjMax is %d degrees C\n", val);
return val * 1000;
}
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index d4e8983fba53..23f38cf2c5cd 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,10 +1,10 @@
config INFINIBAND_CXGB4
- tristate "Chelsio T4 RDMA Driver"
+ tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
select GENERIC_ALLOCATOR
---help---
- This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
- 10GbE adapters.
+ This is an iWARP/RDMA driver for the Chelsio T4 and T5
+ 1GbE, 10GbE adapters and T5 40GbE adapter.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 185452abf32c..1f863a96a480 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN(1);
+ if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ opt2 |= T5_OPT_2_VALID;
+ opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ }
t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
@@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
{
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
- state_set(&ep->com, ABORTING);
+ __state_set(&ep->com, ABORTING);
set_bit(ABORT_CONN, &ep->com.history);
return send_abort(ep, skb, gfp);
}
@@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
return credits;
}
-static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
struct mpa_v2_conn_params *mpa_v2_params;
@@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
enum c4iw_qp_attr_mask mask;
int err;
+ int disconnect = 0;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* will abort the connection.
*/
if (stop_ep_timer(ep))
- return;
+ return 0;
/*
* If we get more than the supported amount of private data
@@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* if we don't even have the mpa message, then bail.
*/
if (ep->mpa_pkt_len < sizeof(*mpa))
- return;
+ return 0;
mpa = (struct mpa_message *) ep->mpa_pkt;
/* Validate MPA header. */
@@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* We'll continue process when more data arrives.
*/
if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
- return;
+ return 0;
if (mpa->flags & MPA_REJECT) {
err = -ECONNREFUSED;
@@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_NOMATCH_RTR;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ attrs.send_term = 1;
err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
err = -ENOMEM;
+ disconnect = 1;
goto out;
}
@@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_INSUFF_IRD;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
+ attrs.send_term = 1;
err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
err = -ENOMEM;
+ disconnect = 1;
goto out;
}
goto out;
@@ -1366,7 +1375,7 @@ err:
send_abort(ep, skb, GFP_KERNEL);
out:
connect_reply_upcall(ep, err);
- return;
+ return disconnect;
}
static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
@@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int tid = GET_TID(hdr);
struct tid_info *t = dev->rdev.lldi.tids;
__u8 status = hdr->status;
+ int disconnect = 0;
ep = lookup_tid(t, tid);
if (!ep)
@@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
switch (ep->com.state) {
case MPA_REQ_SENT:
ep->rcv_seq += dlen;
- process_mpa_reply(ep, skb);
+ disconnect = process_mpa_reply(ep, skb);
break;
case MPA_REQ_WAIT:
ep->rcv_seq += dlen;
@@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
ep->com.state, ep->hwtid, status);
attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+ disconnect = 1;
break;
}
default:
break;
}
mutex_unlock(&ep->com.mutex);
+ if (disconnect)
+ c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
return 0;
}
@@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN(1);
}
+ if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+ opt2 |= T5_OPT_2_VALID;
+ opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+ }
rpl = cplhdr(skb);
INIT_TP_WR(rpl, ep->hwtid);
@@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep)
__func__, ep, ep->hwtid, ep->com.state);
abort = 0;
}
- mutex_unlock(&ep->com.mutex);
if (abort)
abort_connection(ep, NULL, GFP_KERNEL);
+ mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7b8c5806a09d..7474b490760a 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -435,6 +435,7 @@ struct c4iw_qp_attributes {
u8 ecode;
u16 sq_db_inc;
u16 rq_db_inc;
+ u8 send_term;
};
struct c4iw_qp {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 7b5114cb486f..086f62f5dc9e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
ep = qhp->ep;
- disconnect = 1;
- c4iw_get_ep(&qhp->ep->com);
- if (!internal)
+ if (!internal) {
+ c4iw_get_ep(&qhp->ep->com);
terminate = 1;
- else {
+ disconnect = 1;
+ } else {
+ terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep);
if (ret)
goto err;
@@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
/*
* Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
* ringing the queue db when we're in DB_FULL mode.
+ * Only allow this on T4 devices.
*/
attrs.sq_db_inc = attr->sq_psn;
attrs.rq_db_inc = attr->rq_psn;
mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
+ if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
+ (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
+ return -EINVAL;
return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
}
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index dc193c292671..6121ca08fe58 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -836,4 +836,18 @@ struct ulptx_idata {
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
+enum { /* TCP congestion control algorithms */
+ CONG_ALG_RENO,
+ CONG_ALG_TAHOE,
+ CONG_ALG_NEWRENO,
+ CONG_ALG_HIGHSPEED
+};
+
+#define S_CONG_CNTRL 14
+#define M_CONG_CNTRL 0x3
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+
+#define T5_OPT_2_VALID (1 << 31)
+
#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 41be897df8d5..3899ba7821c5 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -41,6 +41,7 @@
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
+#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
#define ARMADA_375_PPI_CAUSE (0x10)
@@ -132,8 +133,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
struct msi_desc *desc)
{
struct msi_msg msg;
- irq_hw_number_t hwirq;
- int virq;
+ int virq, hwirq;
hwirq = armada_370_xp_alloc_msi();
if (hwirq < 0)
@@ -159,8 +159,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
+ unsigned long hwirq = d->hwirq;
+
irq_dispose_mapping(irq);
- armada_370_xp_free_msi(d->hwirq);
+ armada_370_xp_free_msi(hwirq);
+}
+
+static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
+ int nvec, int type)
+{
+ /* We support MSI, but not MSI-X */
+ if (type == PCI_CAP_ID_MSI)
+ return 0;
+ return -EINVAL;
}
static struct irq_chip armada_370_xp_msi_irq_chip = {
@@ -201,6 +212,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
+ msi_chip->check_device = armada_370_xp_check_msi_device;
msi_chip->of_node = node;
armada_370_xp_msi_domain =
@@ -244,35 +256,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
static int armada_xp_set_affinity(struct irq_data *d,
const struct cpumask *mask_val, bool force)
{
- unsigned long reg;
- unsigned long new_mask = 0;
- unsigned long online_mask = 0;
- unsigned long count = 0;
irq_hw_number_t hwirq = irqd_to_hwirq(d);
+ unsigned long reg, mask;
int cpu;
- for_each_cpu(cpu, mask_val) {
- new_mask |= 1 << cpu_logical_map(cpu);
- count++;
- }
-
- /*
- * Forbid mutlicore interrupt affinity
- * This is required since the MPIC HW doesn't limit
- * several CPUs from acknowledging the same interrupt.
- */
- if (count > 1)
- return -EINVAL;
-
- for_each_cpu(cpu, cpu_online_mask)
- online_mask |= 1 << cpu_logical_map(cpu);
+ /* Select a single core from the affinity mask which is online */
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ mask = 1UL << cpu_logical_map(cpu);
raw_spin_lock(&irq_controller_lock);
-
reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
- reg = (reg & (~online_mask)) | new_mask;
+ reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-
raw_spin_unlock(&irq_controller_lock);
return 0;
@@ -494,15 +489,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
#ifdef CONFIG_SMP
armada_xp_mpic_smp_cpu_init();
-
- /*
- * Set the default affinity from all CPUs to the boot cpu.
- * This is required since the MPIC doesn't limit several CPUs
- * from acknowledging the same interrupt.
- */
- cpumask_clear(irq_default_affinity);
- cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
-
#endif
armada_370_xp_msi_init(node, main_int_res.start);
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index fc817d28d1fe..3d15d16a7088 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node)
int i, size, max, reserved = 0, entry;
const __be32 *irqsr;
- cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL);
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
if (!cb)
return -ENOMEM;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1bf4a71919ec..9380be7b1895 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2488,6 +2488,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
} else {
inc_hit_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
!is_dirty(cache, lookup_result.cblock))
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 53728be84dee..13abade76ad9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -232,6 +232,13 @@ struct thin_c {
struct bio_list deferred_bio_list;
struct bio_list retry_on_resume_list;
struct rb_root sort_bio_list; /* sorted list of deferred bios */
+
+ /*
+ * Ensures the thin is not destroyed until the worker has finished
+ * iterating the active_thins list.
+ */
+ atomic_t refcount;
+ struct completion can_destroy;
};
/*----------------------------------------------------------------*/
@@ -1486,6 +1493,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
blk_finish_plug(&plug);
}
+static void thin_get(struct thin_c *tc);
+static void thin_put(struct thin_c *tc);
+
+/*
+ * We can't hold rcu_read_lock() around code that can block. So we
+ * find a thin with the rcu lock held; bump a refcount; then drop
+ * the lock.
+ */
+static struct thin_c *get_first_thin(struct pool *pool)
+{
+ struct thin_c *tc = NULL;
+
+ rcu_read_lock();
+ if (!list_empty(&pool->active_thins)) {
+ tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
+ thin_get(tc);
+ }
+ rcu_read_unlock();
+
+ return tc;
+}
+
+static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
+{
+ struct thin_c *old_tc = tc;
+
+ rcu_read_lock();
+ list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
+ thin_get(tc);
+ thin_put(old_tc);
+ rcu_read_unlock();
+ return tc;
+ }
+ thin_put(old_tc);
+ rcu_read_unlock();
+
+ return NULL;
+}
+
static void process_deferred_bios(struct pool *pool)
{
unsigned long flags;
@@ -1493,10 +1539,11 @@ static void process_deferred_bios(struct pool *pool)
struct bio_list bios;
struct thin_c *tc;
- rcu_read_lock();
- list_for_each_entry_rcu(tc, &pool->active_thins, list)
+ tc = get_first_thin(pool);
+ while (tc) {
process_thin_deferred_bios(tc);
- rcu_read_unlock();
+ tc = get_next_thin(pool, tc);
+ }
/*
* If there are any deferred flush bios, we must commit
@@ -1578,7 +1625,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
{
struct noflush_work w;
- INIT_WORK(&w.worker, fn);
+ INIT_WORK_ONSTACK(&w.worker, fn);
w.tc = tc;
atomic_set(&w.complete, 0);
init_waitqueue_head(&w.wait);
@@ -3061,11 +3108,25 @@ static struct target_type pool_target = {
/*----------------------------------------------------------------
* Thin target methods
*--------------------------------------------------------------*/
+static void thin_get(struct thin_c *tc)
+{
+ atomic_inc(&tc->refcount);
+}
+
+static void thin_put(struct thin_c *tc)
+{
+ if (atomic_dec_and_test(&tc->refcount))
+ complete(&tc->can_destroy);
+}
+
static void thin_dtr(struct dm_target *ti)
{
struct thin_c *tc = ti->private;
unsigned long flags;
+ thin_put(tc);
+ wait_for_completion(&tc->can_destroy);
+
spin_lock_irqsave(&tc->pool->lock, flags);
list_del_rcu(&tc->list);
spin_unlock_irqrestore(&tc->pool->lock, flags);
@@ -3101,6 +3162,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct thin_c *tc;
struct dm_dev *pool_dev, *origin_dev;
struct mapped_device *pool_md;
+ unsigned long flags;
mutex_lock(&dm_thin_pool_table.mutex);
@@ -3191,9 +3253,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
mutex_unlock(&dm_thin_pool_table.mutex);
- spin_lock(&tc->pool->lock);
+ atomic_set(&tc->refcount, 1);
+ init_completion(&tc->can_destroy);
+
+ spin_lock_irqsave(&tc->pool->lock, flags);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
- spin_unlock(&tc->pool->lock);
+ spin_unlock_irqrestore(&tc->pool->lock, flags);
/*
* This synchronize_rcu() call is needed here otherwise we risk a
* wake_worker() call finding no bios to process (because the newly
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 796007a5e0e1..7a7bab8947ae 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -330,15 +330,17 @@ test_block_hash:
return r;
}
}
-
todo = 1 << v->data_dev_block_bits;
- while (io->iter.bi_size) {
+ do {
u8 *page;
+ unsigned len;
struct bio_vec bv = bio_iter_iovec(bio, io->iter);
page = kmap_atomic(bv.bv_page);
- r = crypto_shash_update(desc, page + bv.bv_offset,
- bv.bv_len);
+ len = bv.bv_len;
+ if (likely(len >= todo))
+ len = todo;
+ r = crypto_shash_update(desc, page + bv.bv_offset, len);
kunmap_atomic(page);
if (r < 0) {
@@ -346,8 +348,9 @@ test_block_hash:
return r;
}
- bio_advance_iter(bio, &io->iter, bv.bv_len);
- }
+ bio_advance_iter(bio, &io->iter, len);
+ todo -= len;
+ } while (todo);
if (!v->version) {
r = crypto_shash_update(desc, v->salt, v->salt_size);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 9bcf2cf19357..5aeb89411350 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -364,7 +364,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
memset(r, 0, sizeof(*r));
/*
- * Get optional "interrupts-names" property to add a name
+ * Get optional "interrupt-names" property to add a name
* to the resource.
*/
of_property_read_string_index(dev, "interrupt-names", index,
@@ -380,6 +380,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
EXPORT_SYMBOL_GPL(of_irq_to_resource);
/**
+ * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * @dev: pointer to device tree node
+ * @index: zero-based index of the irq
+ *
+ * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
+ * is not yet created.
+ *
+ */
+int of_irq_get(struct device_node *dev, int index)
+{
+ int rc;
+ struct of_phandle_args oirq;
+ struct irq_domain *domain;
+
+ rc = of_irq_parse_one(dev, index, &oirq);
+ if (rc)
+ return rc;
+
+ domain = irq_find_host(oirq.np);
+ if (!domain)
+ return -EPROBE_DEFER;
+
+ return irq_create_of_mapping(&oirq);
+}
+
+/**
* of_irq_count - Count the number of IRQs a node uses
* @dev: pointer to device tree node
*/
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 404d1daebefa..bd47fbc53dc9 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np,
rc = of_address_to_resource(np, i, res);
WARN_ON(rc);
}
- WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq);
+ if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
+ pr_debug("not all legacy IRQ resources mapped for %s\n",
+ np->name);
}
dev->dev.of_node = of_node_get(np);
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index ae4450070503..fe70b86bcffb 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/of_platform.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -427,6 +428,36 @@ static void __init of_selftest_match_node(void)
}
}
+static void __init of_selftest_platform_populate(void)
+{
+ int irq;
+ struct device_node *np;
+ struct platform_device *pdev;
+
+ np = of_find_node_by_path("/testcase-data");
+ of_platform_populate(np, of_default_bus_match_table, NULL, NULL);
+
+ /* Test that a missing irq domain returns -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device1");
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ selftest(0, "device 1 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq != -EPROBE_DEFER)
+ selftest(0, "device deferred probe failed - %d\n", irq);
+
+ /* Test that a parsing failure does not return -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device2");
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ selftest(0, "device 2 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0 || irq == -EPROBE_DEFER)
+ selftest(0, "device parsing error failed - %d\n", irq);
+
+ selftest(1, "passed");
+}
+
static int __init of_selftest(void)
{
struct device_node *np;
@@ -445,6 +476,7 @@ static int __init of_selftest(void)
of_selftest_parse_interrupts();
of_selftest_parse_interrupts_extended();
of_selftest_match_node();
+ of_selftest_platform_populate();
pr_info("end of selftest - %i passed, %i failed\n",
selftest_results.passed, selftest_results.failed);
return 0;
diff --git a/drivers/of/testcase-data/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi
index c843720bd3e5..da4695f60351 100644
--- a/drivers/of/testcase-data/tests-interrupts.dtsi
+++ b/drivers/of/testcase-data/tests-interrupts.dtsi
@@ -54,5 +54,18 @@
<&test_intmap1 1 2>;
};
};
+
+ testcase-device1 {
+ compatible = "testcase-device";
+ interrupt-parent = <&test_intc0>;
+ interrupts = <1>;
+ };
+
+ testcase-device2 {
+ compatible = "testcase-device";
+ interrupt-parent = <&test_intc2>;
+ interrupts = <1>; /* invalid specifier - too short */
+ };
};
+
};
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 92ed4b2e3c07..c862f9c0e9ce 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -64,7 +64,6 @@ struct as3722_pin_function {
};
struct as3722_gpio_pin_control {
- bool enable_gpio_invert;
unsigned mode_prop;
int io_function;
};
@@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev,
return mode;
}
- if (as_pci->gpio_control[offset].enable_gpio_invert)
- mode |= AS3722_GPIO_INV;
-
- return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode);
+ return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset),
+ AS3722_GPIO_MODE_MASK, mode);
}
static const struct pinmux_ops as3722_pinmux_ops = {
@@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
{
struct as3722_pctrl_info *as_pci = to_as_pci(chip);
struct as3722 *as3722 = as_pci->as3722;
- int en_invert = as_pci->gpio_control[offset].enable_gpio_invert;
+ int en_invert;
u32 val;
int ret;
+ ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val);
+ if (ret < 0) {
+ dev_err(as_pci->dev,
+ "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
+ return;
+ }
+ en_invert = !!(val & AS3722_GPIO_INV);
+
if (value)
val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
else
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 81075f2a1d3f..2960557bfed9 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -810,6 +810,7 @@ static const struct pinconf_ops pcs_pinconf_ops = {
static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
unsigned pin_pos)
{
+ struct pcs_soc_data *pcs_soc = &pcs->socdata;
struct pinctrl_pin_desc *pin;
struct pcs_name *pn;
int i;
@@ -821,6 +822,18 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
return -ENOMEM;
}
+ if (pcs_soc->irq_enable_mask) {
+ unsigned val;
+
+ val = pcs->read(pcs->base + offset);
+ if (val & pcs_soc->irq_enable_mask) {
+ dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n",
+ (unsigned long)pcs->res->start + offset, val);
+ val &= ~pcs_soc->irq_enable_mask;
+ pcs->write(val, pcs->base + offset);
+ }
+ }
+
pin = &pcs->pins.pa[i];
pn = &pcs->names[i];
sprintf(pn->name, "%lx.%d",
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index c5e0f6973a3b..26ca6855f478 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -629,9 +629,8 @@ static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl,
*/
for (i = 0; i < state->pinfuncgrpcnt; i++) {
const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i];
- unsigned int port = pfg->port;
unsigned int mode = pfg->mode;
- int j;
+ int j, port = pfg->port;
/*
* Skip pin groups which are always mapped and don't need
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 48093719167a..f5cd3f961808 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -4794,8 +4794,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_MSIOF0_SCK_B, 0,
/* IP5_23_21 [3] */
FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4,
- FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B,
- FN_IERX_C, 0,
+ FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C,
/* IP5_20_18 [3] */
FN_WE0_N, FN_IECLK, FN_CAN_CLK,
FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 5186d70c49d4..7868bf3a0f91 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -5288,7 +5288,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
/* SEL_SCIF3 [2] */
FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
/* SEL_IEB [2] */
- FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2,
+ FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
/* SEL_MMC [1] */
FN_SEL_MMC_0, FN_SEL_MMC_1,
/* SEL_SCIF5 [1] */
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 9f611cbbc294..c31aa07b3ba5 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev;
acpi_handle handle;
- struct acpi_buffer buffer;
- int ret;
+ int ret = 0;
pnp_dbg(&dev->dev, "set resources\n");
@@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
if (WARN_ON_ONCE(acpi_dev != dev->data))
dev->data = acpi_dev;
- ret = pnpacpi_build_resource_template(dev, &buffer);
- if (ret)
- return ret;
- ret = pnpacpi_encode_resources(dev, &buffer);
- if (ret) {
+ if (acpi_has_method(handle, METHOD_NAME__SRS)) {
+ struct acpi_buffer buffer;
+
+ ret = pnpacpi_build_resource_template(dev, &buffer);
+ if (ret)
+ return ret;
+
+ ret = pnpacpi_encode_resources(dev, &buffer);
+ if (!ret) {
+ acpi_status status;
+
+ status = acpi_set_current_resources(handle, &buffer);
+ if (ACPI_FAILURE(status))
+ ret = -EIO;
+ }
kfree(buffer.pointer);
- return ret;
}
- if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer)))
- ret = -EINVAL;
- else if (acpi_bus_power_manageable(handle))
+ if (!ret && acpi_bus_power_manageable(handle))
ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
- kfree(buffer.pointer);
+
return ret;
}
@@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
{
struct acpi_device *acpi_dev;
acpi_handle handle;
- int ret;
+ acpi_status status;
dev_dbg(&dev->dev, "disable resources\n");
@@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
}
/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
- ret = 0;
if (acpi_bus_power_manageable(handle))
acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
- /* continue even if acpi_bus_set_power() fails */
- if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
- ret = -ENODEV;
- return ret;
+
+ /* continue even if acpi_bus_set_power() fails */
+ status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
+ return -ENODEV;
+
+ return 0;
}
#ifdef CONFIG_ACPI_SLEEP
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 3736bc408adb..ebf0d6710b5a 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -335,7 +335,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
}
#endif
-#ifdef CONFIG_X86
+#ifdef CONFIG_PCI
/* Device IDs of parts that have 32KB MCH space */
static const unsigned int mch_quirk_devices[] = {
0x0154, /* Ivy Bridge */
@@ -440,7 +440,7 @@ static struct pnp_fixup pnp_fixups[] = {
#ifdef CONFIG_AMD_NB
{"PNP0c01", quirk_amd_mmconfig_area},
#endif
-#ifdef CONFIG_X86
+#ifdef CONFIG_PCI
{"PNP0c02", quirk_intel_mch},
#endif
{""}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 9f0ea6cb6922..e3bf885f4a6c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
{
- do {
+ static int ntsm_unsupported;
+
+ while (true) {
memset(sei, 0, sizeof(*sei));
sei->request.length = 0x0010;
sei->request.code = 0x000e;
- sei->ntsm = ntsm;
+ if (!ntsm_unsupported)
+ sei->ntsm = ntsm;
if (chsc(sei))
break;
if (sei->response.code != 0x0001) {
- CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
- sei->response.code);
+ CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+ sei->response.code, sei->ntsm);
+
+ if (sei->response.code == 3 && sei->ntsm) {
+ /* Fallback for old firmware. */
+ ntsm_unsupported = 1;
+ continue;
+ }
break;
}
@@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
break;
}
- } while (sei->u.nt0_area.flags & 0x80);
+
+ if (!(sei->u.nt0_area.flags & 0x80))
+ break;
+ }
}
/*
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 7f0af4fcc001..6fd7d40b2c4d 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
mpt2sas_base_free_resources(ioc);
pci_save_state(pdev);
- pci_disable_device(pdev);
pci_set_power_state(pdev, device_state);
return 0;
}
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 16bfd50cd3fe..db3b494e5926 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
vscsi->affinity_hint_set = true;
} else {
- for (i = 0; i < vscsi->num_queues; i++)
+ for (i = 0; i < vscsi->num_queues; i++) {
+ if (!vscsi->req_vqs[i].vq)
+ continue;
+
virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
+ }
vscsi->affinity_hint_set = false;
}
diff --git a/fs/aio.c b/fs/aio.c
index 12a3de0ee6da..a0ed6c7d2cd2 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -112,6 +112,11 @@ struct kioctx {
struct work_struct free_work;
+ /*
+ * signals when all in-flight requests are done
+ */
+ struct completion *requests_done;
+
struct {
/*
* This counts the number of available slots in the ringbuffer,
@@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
{
struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
+ /* At this point we know that there are no any in-flight requests */
+ if (ctx->requests_done)
+ complete(ctx->requests_done);
+
INIT_WORK(&ctx->free_work, free_ioctx);
schedule_work(&ctx->free_work);
}
@@ -718,7 +727,8 @@ err:
* when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx.
*/
-static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
+static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+ struct completion *requests_done)
{
if (!atomic_xchg(&ctx->dead, 1)) {
struct kioctx_table *table;
@@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
+ ctx->requests_done = requests_done;
percpu_ref_kill(&ctx->users);
+ } else {
+ if (requests_done)
+ complete(requests_done);
}
}
@@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm)
*/
ctx->mmap_size = 0;
- kill_ioctx(mm, ctx);
+ kill_ioctx(mm, ctx, NULL);
}
}
@@ -1185,7 +1199,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
if (!IS_ERR(ioctx)) {
ret = put_user(ioctx->user_id, ctxp);
if (ret)
- kill_ioctx(current->mm, ioctx);
+ kill_ioctx(current->mm, ioctx, NULL);
percpu_ref_put(&ioctx->users);
}
@@ -1203,8 +1217,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
{
struct kioctx *ioctx = lookup_ioctx(ctx);
if (likely(NULL != ioctx)) {
- kill_ioctx(current->mm, ioctx);
+ struct completion requests_done =
+ COMPLETION_INITIALIZER_ONSTACK(requests_done);
+
+ /* Pass requests_done to kill_ioctx() where it can be set
+ * in a thread-safe way. If we try to set it here then we have
+ * a race condition if two io_destroy() called simultaneously.
+ */
+ kill_ioctx(current->mm, ioctx, &requests_done);
percpu_ref_put(&ioctx->users);
+
+ /* Wait until all IO for the context are done. Otherwise kernel
+ * keep using user-space buffers even if user thinks the context
+ * is destroyed.
+ */
+ wait_for_completion(&requests_done);
+
return 0;
}
pr_debug("EINVAL: io_destroy: invalid context id\n");
@@ -1299,10 +1327,8 @@ rw_common:
&iovec, compat)
: aio_setup_single_vector(req, rw, buf, &nr_segs,
iovec);
- if (ret)
- return ret;
-
- ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
+ if (!ret)
+ ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
if (ret < 0) {
if (iovec != &inline_vec)
kfree(iovec);
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 5a64ca4621f3..f23174fb9ec4 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -93,5 +93,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#define set_fixmap_io(idx, phys) \
__set_fixmap(idx, phys, FIXMAP_PAGE_IO)
+#define set_fixmap_offset_io(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GENERIC_FIXMAP_H */
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index d96deb443f18..94f9ea8abcae 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -50,7 +50,7 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
}
#ifndef zero_bytemask
-#define zero_bytemask(mask) (~0ul << __fls(mask) << 1)
+#define zero_bytemask(mask) (~1ul << __fls(mask))
#endif
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 9212b017bc72..ae9504b4b67d 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -535,6 +535,7 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
+extern void ftrace_module_init(struct module *mod);
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
@@ -544,6 +545,7 @@ static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_release_mod(struct module *mod) {}
+static inline void ftrace_module_init(struct module *mod) {}
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
{
return -EINVAL;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 8834a7e5b944..97ac926c78a7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -210,7 +210,7 @@ extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
/**
* irq_set_affinity - Set the irq affinity of a given irq
* @irq: Interrupt to set affinity
- * @mask: cpumask
+ * @cpumask: cpumask
*
* Fails if cpumask does not contain an online CPU
*/
@@ -223,7 +223,7 @@ irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
/**
* irq_force_affinity - Force the irq affinity of a given irq
* @irq: Interrupt to set affinity
- * @mask: cpumask
+ * @cpumask: cpumask
*
* Same as irq_set_affinity, but without checking the mask against
* online cpus.
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 10a0b1ac4ea0..5c57efb863d0 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -603,6 +603,8 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
return d ? irqd_get_trigger_type(d) : 0;
}
+unsigned int arch_dynirq_lower_bound(unsigned int from);
+
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
struct module *owner);
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 3f23b4472c31..6404253d810d 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -44,11 +44,16 @@ extern void of_irq_init(const struct of_device_id *matches);
#ifdef CONFIG_OF_IRQ
extern int of_irq_count(struct device_node *dev);
+extern int of_irq_get(struct device_node *dev, int index);
#else
static inline int of_irq_count(struct device_node *dev)
{
return 0;
}
+static inline int of_irq_get(struct device_node *dev, int index)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_OF)
diff --git a/include/trace/events/module.h b/include/trace/events/module.h
index 11fd51b413de..ed0b2c599a64 100644
--- a/include/trace/events/module.h
+++ b/include/trace/events/module.h
@@ -25,7 +25,7 @@ struct module;
{ (1UL << TAINT_OOT_MODULE), "O" }, \
{ (1UL << TAINT_FORCED_MODULE), "F" }, \
{ (1UL << TAINT_CRAP), "C" }, \
- { (1UL << TAINT_UNSIGNED_MODULE), "X" })
+ { (1UL << TAINT_UNSIGNED_MODULE), "E" })
TRACE_EVENT(module_load,
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d55092ceee29..6b715c0af1b1 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -234,6 +234,11 @@ again:
goto again;
}
timer->base = new_base;
+ } else {
+ if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
+ cpu = this_cpu;
+ goto again;
+ }
}
return new_base;
}
@@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
cpu_base->expires_next.tv64 = expires_next.tv64;
+ /*
+ * If a hang was detected in the last timer interrupt then we
+ * leave the hang delay active in the hardware. We want the
+ * system to make progress. That also prevents the following
+ * scenario:
+ * T1 expires 50ms from now
+ * T2 expires 5s from now
+ *
+ * T1 is removed, so this code is called and would reprogram
+ * the hardware to 5s from now. Any hrtimer_start after that
+ * will not reprogram the hardware due to hang_detected being
+ * set. So we'd effectivly block all timers until the T2 event
+ * fires.
+ */
+ if (cpu_base->hang_detected)
+ return;
+
if (cpu_base->expires_next.tv64 != KTIME_MAX)
tick_program_event(cpu_base->expires_next, 1);
}
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index a7174617616b..bb07f2928f4b 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -363,6 +363,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
if (from > irq)
return -EINVAL;
from = irq;
+ } else {
+ /*
+ * For interrupts which are freely allocated the
+ * architecture can force a lower bound to the @from
+ * argument. x86 uses this to exclude the GSI space.
+ */
+ from = arch_dynirq_lower_bound(from);
}
mutex_lock(&sparse_irq_lock);
diff --git a/kernel/module.c b/kernel/module.c
index 11869408f79b..079c4615607d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
return -EFAULT;
name[MODULE_NAME_LEN-1] = '\0';
- if (!(flags & O_NONBLOCK))
- pr_warn("waiting module removal not supported: please upgrade\n");
-
if (mutex_lock_interruptible(&module_mutex) != 0)
return -EINTR;
@@ -3271,6 +3268,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
dynamic_debug_setup(info->debug, info->num_debug);
+ /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
+ ftrace_module_init(mod);
+
/* Finally it's fully formed, ready to start executing. */
err = complete_formation(mod, info);
if (err)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b50990a5bea0..33e4648ae0e7 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -779,3 +779,8 @@ int __init __weak arch_early_irq_init(void)
{
return 0;
}
+
+unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
+{
+ return from;
+}
diff --git a/kernel/timer.c b/kernel/timer.c
index 87bd529879c2..3bb01a323b2a 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
bit = find_last_bit(&mask, BITS_PER_LONG);
- mask = (1 << bit) - 1;
+ mask = (1UL << bit) - 1;
expires_limit = expires_limit & ~(mask);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1fd4b9479210..4a54a25afa2f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4330,16 +4330,11 @@ static void ftrace_init_module(struct module *mod,
ftrace_process_locs(mod, start, end);
}
-static int ftrace_module_notify_enter(struct notifier_block *self,
- unsigned long val, void *data)
+void ftrace_module_init(struct module *mod)
{
- struct module *mod = data;
-
- if (val == MODULE_STATE_COMING)
- ftrace_init_module(mod, mod->ftrace_callsites,
- mod->ftrace_callsites +
- mod->num_ftrace_callsites);
- return 0;
+ ftrace_init_module(mod, mod->ftrace_callsites,
+ mod->ftrace_callsites +
+ mod->num_ftrace_callsites);
}
static int ftrace_module_notify_exit(struct notifier_block *self,
@@ -4353,11 +4348,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
return 0;
}
#else
-static int ftrace_module_notify_enter(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
static int ftrace_module_notify_exit(struct notifier_block *self,
unsigned long val, void *data)
{
@@ -4365,11 +4355,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
}
#endif /* CONFIG_MODULES */
-struct notifier_block ftrace_module_enter_nb = {
- .notifier_call = ftrace_module_notify_enter,
- .priority = INT_MAX, /* Run before anything that can use kprobes */
-};
-
struct notifier_block ftrace_module_exit_nb = {
.notifier_call = ftrace_module_notify_exit,
.priority = INT_MIN, /* Run after anything that can remove kprobes */
@@ -4403,10 +4388,6 @@ void __init ftrace_init(void)
__start_mcount_loc,
__stop_mcount_loc);
- ret = register_module_notifier(&ftrace_module_enter_nb);
- if (ret)
- pr_warning("Failed to register trace ftrace module enter notifier\n");
-
ret = register_module_notifier(&ftrace_module_exit_nb);
if (ret)
pr_warning("Failed to register trace ftrace module exit notifier\n");
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 925f537f07d1..4747b476a030 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec)
data->ops->func(data);
continue;
}
- filter = rcu_dereference(data->filter);
+ filter = rcu_dereference_sched(data->filter);
if (filter && !filter_match_preds(filter, rec))
continue;
if (data->cmd_ops->post_trigger) {
diff --git a/mm/vmacache.c b/mm/vmacache.c
index d4224b397c0e..1037a3bab505 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -81,10 +81,12 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
for (i = 0; i < VMACACHE_SIZE; i++) {
struct vm_area_struct *vma = current->vmacache[i];
- if (vma && vma->vm_start <= addr && vma->vm_end > addr) {
- BUG_ON(vma->vm_mm != mm);
+ if (!vma)
+ continue;
+ if (WARN_ON_ONCE(vma->vm_mm != mm))
+ break;
+ if (vma->vm_start <= addr && vma->vm_end > addr)
return vma;
- }
}
return NULL;
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 248b90abb882..480bbddbd801 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1059,24 +1059,26 @@ static void azx_init_cmd_io(struct azx *chip)
/* reset the corb hw read pointer */
azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
- for (timeout = 1000; timeout > 0; timeout--) {
- if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
- break;
- udelay(1);
- }
- if (timeout <= 0)
- dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
- azx_readw(chip, CORBRP));
+ if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
+ for (timeout = 1000; timeout > 0; timeout--) {
+ if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
+ break;
+ udelay(1);
+ }
+ if (timeout <= 0)
+ dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
+ azx_readw(chip, CORBRP));
- azx_writew(chip, CORBRP, 0);
- for (timeout = 1000; timeout > 0; timeout--) {
- if (azx_readw(chip, CORBRP) == 0)
- break;
- udelay(1);
+ azx_writew(chip, CORBRP, 0);
+ for (timeout = 1000; timeout > 0; timeout--) {
+ if (azx_readw(chip, CORBRP) == 0)
+ break;
+ udelay(1);
+ }
+ if (timeout <= 0)
+ dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
+ azx_readw(chip, CORBRP));
}
- if (timeout <= 0)
- dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
- azx_readw(chip, CORBRP));
/* enable corb dma */
azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index d6bca62ef387..b540ad71eb0d 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -249,7 +249,8 @@ enum {
/* quirks for Nvidia */
#define AZX_DCAPS_PRESET_NVIDIA \
(AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
- AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
+ AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT |\
+ AZX_DCAPS_CORBRP_SELF_CLEAR)
#define AZX_DCAPS_PRESET_CTHDA \
(AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index ba38b819f984..4a7cb01fa912 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -189,6 +189,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
+#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
/* position fix mode */
enum {
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c643dfc0a826..c1952c910339 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4621,6 +4621,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index f500905e9373..2acf82f4a08a 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -1018,13 +1018,13 @@ static int alc5623_i2c_probe(struct i2c_client *client,
dev_err(&client->dev, "failed to read vendor ID1: %d\n", ret);
return ret;
}
- vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8);
ret = regmap_read(alc5623->regmap, ALC5623_VENDOR_ID2, &vid2);
if (ret < 0) {
dev_err(&client->dev, "failed to read vendor ID2: %d\n", ret);
return ret;
}
+ vid2 >>= 8;
if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) {
dev_err(&client->dev, "unknown or wrong codec\n");
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 460d35547a68..2213a037c893 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -1229,8 +1229,10 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
}
if (cs42l52->pdata.reset_gpio) {
- ret = gpio_request_one(cs42l52->pdata.reset_gpio,
- GPIOF_OUT_INIT_HIGH, "CS42L52 /RST");
+ ret = devm_gpio_request_one(&i2c_client->dev,
+ cs42l52->pdata.reset_gpio,
+ GPIOF_OUT_INIT_HIGH,
+ "CS42L52 /RST");
if (ret < 0) {
dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
cs42l52->pdata.reset_gpio, ret);
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 0ee60a19a263..ae3717992d56 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1443,8 +1443,10 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
i2c_set_clientdata(i2c_client, cs42l73);
if (cs42l73->pdata.reset_gpio) {
- ret = gpio_request_one(cs42l73->pdata.reset_gpio,
- GPIOF_OUT_INIT_HIGH, "CS42L73 /RST");
+ ret = devm_gpio_request_one(&i2c_client->dev,
+ cs42l73->pdata.reset_gpio,
+ GPIOF_OUT_INIT_HIGH,
+ "CS42L73 /RST");
if (ret < 0) {
dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
cs42l73->pdata.reset_gpio, ret);
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index b1835103e9b4..d7349bc89ad3 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1399,7 +1399,6 @@ static int aic3x_probe(struct snd_soc_codec *codec)
}
aic3x_add_widgets(codec);
- list_add(&aic3x->list, &reset_list);
return 0;
@@ -1569,7 +1568,13 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_aic3x, &aic3x_dai, 1);
- return ret;
+
+ if (ret != 0)
+ goto err_gpio;
+
+ list_add(&aic3x->list, &reset_list);
+
+ return 0;
err_gpio:
if (gpio_is_valid(aic3x->gpio_reset) &&
diff --git a/sound/soc/fsl/fsl_spdif.h b/sound/soc/fsl/fsl_spdif.h
index b1266790d117..605a10b2112b 100644
--- a/sound/soc/fsl/fsl_spdif.h
+++ b/sound/soc/fsl/fsl_spdif.h
@@ -144,8 +144,8 @@ enum spdif_gainsel {
/* SPDIF Clock register */
#define STC_SYSCLK_DIV_OFFSET 11
-#define STC_SYSCLK_DIV_MASK (0x1ff << STC_TXCLK_SRC_OFFSET)
-#define STC_SYSCLK_DIV(x) ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
+#define STC_SYSCLK_DIV_MASK (0x1ff << STC_SYSCLK_DIV_OFFSET)
+#define STC_SYSCLK_DIV(x) ((((x) - 1) << STC_SYSCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
#define STC_TXCLK_SRC_OFFSET 8
#define STC_TXCLK_SRC_MASK (0x7 << STC_TXCLK_SRC_OFFSET)
#define STC_TXCLK_SRC_SET(x) ((x << STC_TXCLK_SRC_OFFSET) & STC_TXCLK_SRC_MASK)
diff --git a/sound/soc/intel/sst-dsp-priv.h b/sound/soc/intel/sst-dsp-priv.h
index fe8e81aad646..30ca14a6a835 100644
--- a/sound/soc/intel/sst-dsp-priv.h
+++ b/sound/soc/intel/sst-dsp-priv.h
@@ -136,7 +136,7 @@ struct sst_module_data {
enum sst_data_type data_type; /* type of module data */
u32 size; /* size in bytes */
- u32 offset; /* offset in FW file */
+ int32_t offset; /* offset in FW file */
u32 data_offset; /* offset in ADSP memory space */
void *data; /* module data */
};
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index f46bb4ddde6f..50e4246d4b57 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -617,7 +617,7 @@ static void hsw_notification_work(struct work_struct *work)
case IPC_POSITION_CHANGED:
trace_ipc_notification("DSP stream position changed for",
stream->reply.stream_hw_id);
- sst_dsp_inbox_read(hsw->dsp, pos, sizeof(pos));
+ sst_dsp_inbox_read(hsw->dsp, pos, sizeof(*pos));
if (stream->notify_position)
stream->notify_position(stream, stream->pdata);
@@ -991,7 +991,8 @@ int sst_hsw_stream_get_volume(struct sst_hsw *hsw, struct sst_hsw_stream *stream
return -EINVAL;
sst_dsp_read(hsw->dsp, volume,
- stream->reply.volume_register_address[channel], sizeof(volume));
+ stream->reply.volume_register_address[channel],
+ sizeof(*volume));
return 0;
}
@@ -1609,7 +1610,7 @@ int sst_hsw_dx_set_state(struct sst_hsw *hsw,
trace_ipc_request("PM enter Dx state", state);
ret = ipc_tx_message_wait(hsw, header, &state_, sizeof(state_),
- dx, sizeof(dx));
+ dx, sizeof(*dx));
if (ret < 0) {
dev_err(hsw->dev, "ipc: error set dx state %d failed\n", state);
return ret;
diff --git a/sound/soc/jz4740/Makefile b/sound/soc/jz4740/Makefile
index be873c1b0c20..d32c540555c4 100644
--- a/sound/soc/jz4740/Makefile
+++ b/sound/soc/jz4740/Makefile
@@ -1,10 +1,8 @@
#
# Jz4740 Platform Support
#
-snd-soc-jz4740-objs := jz4740-pcm.o
snd-soc-jz4740-i2s-objs := jz4740-i2s.o
-obj-$(CONFIG_SND_JZ4740_SOC) += snd-soc-jz4740.o
obj-$(CONFIG_SND_JZ4740_SOC_I2S) += snd-soc-jz4740-i2s.o
# Jz4740 Machine Support
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 6232b7d307aa..4d0720ed5a90 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -258,7 +258,7 @@ static int rsnd_src_init(struct rsnd_mod *mod,
{
struct rsnd_src *src = rsnd_mod_to_src(mod);
- clk_enable(src->clk);
+ clk_prepare_enable(src->clk);
return 0;
}
@@ -269,7 +269,7 @@ static int rsnd_src_quit(struct rsnd_mod *mod,
{
struct rsnd_src *src = rsnd_mod_to_src(mod);
- clk_disable(src->clk);
+ clk_disable_unprepare(src->clk);
return 0;
}
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 4b7e20603dd7..1d8387c25bd8 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -171,7 +171,7 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
u32 cr;
if (0 == ssi->usrcnt) {
- clk_enable(ssi->clk);
+ clk_prepare_enable(ssi->clk);
if (rsnd_dai_is_clk_master(rdai)) {
if (rsnd_ssi_clk_from_parent(ssi))
@@ -230,7 +230,7 @@ static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
rsnd_ssi_master_clk_stop(ssi);
}
- clk_disable(ssi->clk);
+ clk_disable_unprepare(ssi->clk);
}
dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod));
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c8a780d0d057..7769b0a2bc5a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -254,7 +254,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
{
struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
- kfree(data->widget);
kfree(data->wlist);
kfree(data);
}
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c
index 7c4347962353..a74fba6d7743 100644
--- a/tools/lib/api/fs/debugfs.c
+++ b/tools/lib/api/fs/debugfs.c
@@ -12,8 +12,8 @@
char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
static const char * const debugfs_known_mountpoints[] = {
- "/sys/kernel/debug/",
- "/debug/",
+ "/sys/kernel/debug",
+ "/debug",
0,
};
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index baec7d887da4..b83184f2d484 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4344,6 +4344,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
format, len_arg, arg);
trace_seq_terminate(&p);
trace_seq_puts(s, p.buffer);
+ trace_seq_destroy(&p);
arg = arg->next;
break;
default:
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 791c539374c7..feab94281634 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -876,8 +876,8 @@ struct event_filter {
struct event_filter *pevent_filter_alloc(struct pevent *pevent);
/* for backward compatibility */
-#define FILTER_NONE PEVENT_ERRNO__FILTER_NOT_FOUND
-#define FILTER_NOEXIST PEVENT_ERRNO__NO_FILTER
+#define FILTER_NONE PEVENT_ERRNO__NO_FILTER
+#define FILTER_NOEXIST PEVENT_ERRNO__FILTER_NOT_FOUND
#define FILTER_MISS PEVENT_ERRNO__FILTER_MISS
#define FILTER_MATCH PEVENT_ERRNO__FILTER_MATCH
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index e96923310d57..895edd32930c 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -589,7 +589,7 @@ $(GTK_OBJS): $(OUTPUT)%.o: %.c $(LIB_H)
$(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $<
$(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS)
- $(QUIET_LINK)$(CC) -o $@ -shared $(ALL_LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
+ $(QUIET_LINK)$(CC) -o $@ -shared $(LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
$(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index b602ad93ce63..83bc2385e6d3 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -23,9 +23,10 @@ static int sample_ustack(struct perf_sample *sample,
sp = (unsigned long) regs[PERF_REG_X86_SP];
- map = map_groups__find(&thread->mg, MAP__FUNCTION, (u64) sp);
+ map = map_groups__find(&thread->mg, MAP__VARIABLE, (u64) sp);
if (!map) {
pr_debug("failed to get stack map\n");
+ free(buf);
return -1;
}
diff --git a/tools/perf/arch/x86/tests/regs_load.S b/tools/perf/arch/x86/tests/regs_load.S
index 99167bf644ea..60875d5c556c 100644
--- a/tools/perf/arch/x86/tests/regs_load.S
+++ b/tools/perf/arch/x86/tests/regs_load.S
@@ -1,4 +1,3 @@
-
#include <linux/linkage.h>
#define AX 0
@@ -90,3 +89,10 @@ ENTRY(perf_regs_load)
ret
ENDPROC(perf_regs_load)
#endif
+
+/*
+ * We need to provide note.GNU-stack section, saying that we want
+ * NOT executable stack. Otherwise the final linking will assume that
+ * the ELF stack should not be restricted at all and set it RWX.
+ */
+.section .note.GNU-stack,"",@progbits
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index ee21fa95ebcf..802cf544202b 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -34,6 +34,14 @@ ifeq ($(ARCH),arm)
LIBUNWIND_LIBS = -lunwind -lunwind-arm
endif
+# So far there's only x86 libdw unwind support merged in perf.
+# Disable it on all other architectures in case libdw unwind
+# support is detected in system. Add supported architectures
+# to the check.
+ifneq ($(ARCH),x86)
+ NO_LIBDW_DWARF_UNWIND := 1
+endif
+
ifeq ($(LIBUNWIND_LIBS),)
NO_LIBUNWIND := 1
else
@@ -109,6 +117,10 @@ CFLAGS += -Wall
CFLAGS += -Wextra
CFLAGS += -std=gnu99
+# Enforce a non-executable stack, as we may regress (again) in the future by
+# adding assembler files missing the .GNU-stack linker note.
+LDFLAGS += -Wl,-z,noexecstack
+
EXTLIBS = -lelf -lpthread -lrt -lm -ldl
ifneq ($(OUTPUT),)
@@ -186,7 +198,10 @@ VF_FEATURE_TESTS = \
stackprotector-all \
timerfd \
libunwind-debug-frame \
- bionic
+ bionic \
+ liberty \
+ liberty-z \
+ cplus-demangle
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
@@ -504,7 +519,21 @@ else
endif
ifeq ($(feature-libbfd), 1)
- EXTLIBS += -lbfd -lz -liberty
+ EXTLIBS += -lbfd
+
+ # call all detections now so we get correct
+ # status in VF output
+ $(call feature_check,liberty)
+ $(call feature_check,liberty-z)
+ $(call feature_check,cplus-demangle)
+
+ ifeq ($(feature-liberty), 1)
+ EXTLIBS += -liberty
+ else
+ ifeq ($(feature-liberty-z), 1)
+ EXTLIBS += -liberty -lz
+ endif
+ endif
endif
ifdef NO_DEMANGLE
@@ -515,15 +544,10 @@ else
CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
else
ifneq ($(feature-libbfd), 1)
- $(call feature_check,liberty)
- ifeq ($(feature-liberty), 1)
- EXTLIBS += -lbfd -liberty
- else
- $(call feature_check,liberty-z)
- ifeq ($(feature-liberty-z), 1)
- EXTLIBS += -lbfd -liberty -lz
- else
- $(call feature_check,cplus-demangle)
+ ifneq ($(feature-liberty), 1)
+ ifneq ($(feature-liberty-z), 1)
+ # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
+ # or any of 'bfd iberty z' trinity
ifeq ($(feature-cplus-demangle), 1)
EXTLIBS += -liberty
CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 5daeae1cb4c0..2f92d6e7ee00 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -46,6 +46,7 @@ make_install_man := install-man
make_install_html := install-html
make_install_info := install-info
make_install_pdf := install-pdf
+make_static := LDFLAGS=-static
# all the NO_* variable combined
make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
@@ -87,6 +88,7 @@ run += make_install_bin
# run += make_install_info
# run += make_install_pdf
run += make_minimal
+run += make_static
ifneq ($(call has,ctags),)
run += make_tags
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index a53cd0b8c151..27c2a5efe450 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -717,7 +717,7 @@ static char *get_kernel_version(const char *root_dir)
}
static int map_groups__set_modules_path_dir(struct map_groups *mg,
- const char *dir_name)
+ const char *dir_name, int depth)
{
struct dirent *dent;
DIR *dir = opendir(dir_name);
@@ -742,7 +742,15 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
!strcmp(dent->d_name, ".."))
continue;
- ret = map_groups__set_modules_path_dir(mg, path);
+ /* Do not follow top-level source and build symlinks */
+ if (depth == 0) {
+ if (!strcmp(dent->d_name, "source") ||
+ !strcmp(dent->d_name, "build"))
+ continue;
+ }
+
+ ret = map_groups__set_modules_path_dir(mg, path,
+ depth + 1);
if (ret < 0)
goto out;
} else {
@@ -786,11 +794,11 @@ static int machine__set_modules_path(struct machine *machine)
if (!version)
return -1;
- snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
+ snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
machine->root_dir, version);
free(version);
- return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
+ return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
}
static int machine__create_module(void *arg, const char *name, u64 start)
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 47b29834a6b6..56ff9bebb577 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
u32 val;
u32 *reg;
- offset >>= 1;
reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset);
+ vcpu->vcpu_id, offset >> 1);
- if (offset & 2)
+ if (offset & 4)
val = *reg >> 16;
else
val = *reg & 0xffff;
@@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
vgic_reg_access(mmio, &val, offset,
ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
if (mmio->is_write) {
- if (offset < 4) {
+ if (offset < 8) {
*reg = ~0U; /* Force PPIs/SGIs to 1 */
return false;
}
val = vgic_cfg_compress(val);
- if (offset & 2) {
+ if (offset & 4) {
*reg &= 0xffff;
*reg |= val << 16;
} else {
@@ -916,6 +915,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
case 0:
if (!target_cpus)
return;
+ break;
case 1:
target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
@@ -1667,10 +1667,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
if (addr + size < addr)
return -EINVAL;
+ *ioaddr = addr;
ret = vgic_ioaddr_overlap(kvm);
if (ret)
- return ret;
- *ioaddr = addr;
+ *ioaddr = VGIC_ADDR_UNDEF;
+
return ret;
}
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 8db43701016f..bf06577fea51 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -395,7 +395,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
if (dev->entries_nr == 0)
return r;
- r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
+ r = pci_enable_msix_exact(dev->dev,
+ dev->host_msix_entries, dev->entries_nr);
if (r)
return r;
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 10df100c4514..06e6401d6ef4 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -101,7 +101,7 @@ static void async_pf_execute(struct work_struct *work)
if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq);
- mmdrop(mm);
+ mmput(mm);
kvm_put_kvm(vcpu->kvm);
}
@@ -118,7 +118,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
flush_work(&work->work);
#else
if (cancel_work_sync(&work->work)) {
- mmdrop(work->mm);
+ mmput(work->mm);
kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
kmem_cache_free(async_pf_cache, work);
}
@@ -183,7 +183,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
work->addr = hva;
work->arch = *arch;
work->mm = current->mm;
- atomic_inc(&work->mm->mm_count);
+ atomic_inc(&work->mm->mm_users);
kvm_get_kvm(work->vcpu->kvm);
/* this can't really happen otherwise gfn_to_pfn_async
@@ -201,7 +201,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
return 1;
retry_sync:
kvm_put_kvm(work->vcpu->kvm);
- mmdrop(work->mm);
+ mmput(work->mm);
kmem_cache_free(async_pf_cache, work);
return 0;
}