summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-devices-system-xen_memory9
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst8
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt12
-rw-r--r--Documentation/arm64/elf_hwcaps.txt12
-rw-r--r--Documentation/arm64/hugetlbpage.txt38
-rw-r--r--Documentation/arm64/silicon-errata.txt1
-rw-r--r--Documentation/blockdev/README.DAC960756
-rw-r--r--Documentation/blockdev/zram.txt2
-rw-r--r--Documentation/core-api/idr.rst2
-rw-r--r--Documentation/device-mapper/dm-raid.txt4
-rw-r--r--Documentation/device-mapper/log-writes.txt2
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt6
-rw-r--r--Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt1
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.txt2
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt1
-rw-r--r--Documentation/driver-api/fpga/fpga-mgr.rst5
-rw-r--r--Documentation/fb/uvesafb.txt5
-rw-r--r--Documentation/filesystems/vfs.txt21
-rw-r--r--Documentation/media/uapi/dvb/video_function_calls.rst1
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rw-r--r--Documentation/process/changes.rst2
-rw-r--r--Documentation/process/code-of-conduct-interpretation.rst156
-rw-r--r--Documentation/process/code-of-conduct.rst86
-rw-r--r--Documentation/process/code-of-conflict.rst28
-rw-r--r--Documentation/process/index.rst3
-rw-r--r--Documentation/virtual/kvm/api.txt12
-rw-r--r--Documentation/x86/earlyprintk.txt25
-rw-r--r--LICENSES/other/CC-BY-SA-4.0397
-rw-r--r--MAINTAINERS115
-rw-r--r--Makefile34
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/Makefile26
-rw-r--r--arch/arc/kernel/process.c20
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts2
-rw-r--r--arch/arm/boot/dts/bcm63138.dtsi14
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi11
-rw-r--r--arch/arm/boot/dts/sama5d3_emac.dtsi2
-rw-r--r--arch/arm/boot/dts/stm32mp157c.dtsi4
-rw-r--r--arch/arm/boot/dts/sun8i-r40.dtsi3
-rw-r--r--arch/arm/include/asm/io.h15
-rw-r--r--arch/arm/include/asm/kvm_arm.h1
-rw-r--r--arch/arm/include/asm/kvm_host.h1
-rw-r--r--arch/arm/include/asm/kvm_mmu.h5
-rw-r--r--arch/arm/kernel/vmlinux.lds.h2
-rw-r--r--arch/arm/kvm/coproc.c8
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm64/Kconfig34
-rw-r--r--arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts2
-rw-r--r--arch/arm64/include/asm/assembler.h7
-rw-r--r--arch/arm64/include/asm/cache.h40
-rw-r--r--arch/arm64/include/asm/compat.h1
-rw-r--r--arch/arm64/include/asm/compiler.h30
-rw-r--r--arch/arm64/include/asm/cpucaps.h7
-rw-r--r--arch/arm64/include/asm/cpufeature.h9
-rw-r--r--arch/arm64/include/asm/cputype.h2
-rw-r--r--arch/arm64/include/asm/daifflags.h15
-rw-r--r--arch/arm64/include/asm/esr.h77
-rw-r--r--arch/arm64/include/asm/io.h9
-rw-r--r--arch/arm64/include/asm/jump_label.h4
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h2
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h2
-rw-r--r--arch/arm64/include/asm/kvm_host.h15
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h5
-rw-r--r--arch/arm64/include/asm/mmu.h3
-rw-r--r--arch/arm64/include/asm/mmu_context.h17
-rw-r--r--arch/arm64/include/asm/page.h2
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h48
-rw-r--r--arch/arm64/include/asm/processor.h11
-rw-r--r--arch/arm64/include/asm/ptrace.h1
-rw-r--r--arch/arm64/include/asm/sysreg.h44
-rw-r--r--arch/arm64/include/asm/tlb.h34
-rw-r--r--arch/arm64/include/asm/tlbflush.h112
-rw-r--r--arch/arm64/include/asm/uaccess.h1
-rw-r--r--arch/arm64/include/asm/xen/events.h2
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/Makefile1
-rw-r--r--arch/arm64/kernel/cpu_errata.c96
-rw-r--r--arch/arm64/kernel/cpufeature.c195
-rw-r--r--arch/arm64/kernel/cpuinfo.c11
-rw-r--r--arch/arm64/kernel/crash_core.c19
-rw-r--r--arch/arm64/kernel/entry.S18
-rw-r--r--arch/arm64/kernel/head.S40
-rw-r--r--arch/arm64/kernel/machine_kexec.c11
-rw-r--r--arch/arm64/kernel/perf_event.c7
-rw-r--r--arch/arm64/kernel/probes/kprobes.c2
-rw-r--r--arch/arm64/kernel/process.c4
-rw-r--r--arch/arm64/kernel/psci.c1
-rw-r--r--arch/arm64/kernel/setup.c60
-rw-r--r--arch/arm64/kernel/sleep.S1
-rw-r--r--arch/arm64/kernel/ssbd.c24
-rw-r--r--arch/arm64/kernel/suspend.c4
-rw-r--r--arch/arm64/kernel/traps.c211
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S35
-rw-r--r--arch/arm64/kvm/guest.c55
-rw-r--r--arch/arm64/kvm/hyp-init.S3
-rw-r--r--arch/arm64/kvm/hyp/switch.c9
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c11
-rw-r--r--arch/arm64/lib/Makefile4
-rw-r--r--arch/arm64/lib/crc32.S60
-rw-r--r--arch/arm64/mm/context.c11
-rw-r--r--arch/arm64/mm/dump.c6
-rw-r--r--arch/arm64/mm/fault.c38
-rw-r--r--arch/arm64/mm/hugetlbpage.c50
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/kasan_init.c2
-rw-r--r--arch/arm64/mm/mmu.c46
-rw-r--r--arch/arm64/mm/numa.c13
-rw-r--r--arch/arm64/mm/proc.S11
-rw-r--r--arch/hexagon/include/asm/bitops.h4
-rw-r--r--arch/hexagon/kernel/dma.c2
-rw-r--r--arch/m68k/emu/nfblock.c2
-rw-r--r--arch/m68k/include/asm/atafd.h13
-rw-r--r--arch/m68k/include/asm/atafdreg.h80
-rw-r--r--arch/mips/include/asm/kvm_host.h1
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/xway_dma.h1
-rw-r--r--arch/mips/include/asm/processor.h10
-rw-r--r--arch/mips/kernel/process.c25
-rw-r--r--arch/mips/kernel/setup.c48
-rw-r--r--arch/mips/kernel/vdso.c18
-rw-r--r--arch/mips/kvm/mmu.c10
-rw-r--r--arch/mips/lantiq/xway/dma.c4
-rw-r--r--arch/mips/lib/memset.S4
-rw-r--r--arch/parisc/kernel/unwind.c2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/iommu.h2
-rw-r--r--arch/powerpc/include/asm/mmu_context.h1
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S4
-rw-r--r--arch/powerpc/kernel/iommu.c25
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/tm.S20
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c107
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c39
-rw-r--r--arch/powerpc/lib/checksum_64.S3
-rw-r--r--arch/powerpc/lib/code-patching.c14
-rw-r--r--arch/powerpc/mm/init_64.c49
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c34
-rw-r--r--arch/powerpc/mm/numa.c12
-rw-r--r--arch/powerpc/mm/pkeys.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda-tce.c2
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h7
-rw-r--r--arch/riscv/kernel/setup.c9
-rw-r--r--arch/s390/crypto/paes_s390.c2
-rw-r--r--arch/s390/include/asm/mmu.h8
-rw-r--r--arch/s390/include/asm/sclp.h3
-rw-r--r--arch/s390/kernel/early_printk.c2
-rw-r--r--arch/s390/kernel/swsusp.S8
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/priv.c30
-rw-r--r--arch/s390/kvm/vsie.c3
-rw-r--r--arch/s390/mm/gmap.c4
-rw-r--r--arch/sparc/include/asm/cpudata_64.h2
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/kgdb_32.c2
-rw-r--r--arch/sparc/kernel/kgdb_64.c2
-rw-r--r--arch/sparc/kernel/perf_event.c26
-rw-r--r--arch/sparc/kernel/rtrap_64.S3
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/kernel/viohs.c12
-rw-r--r--arch/sparc/vdso/Makefile8
-rw-r--r--arch/sparc/vdso/vclock_gettime.c12
-rw-r--r--arch/sparc/vdso/vma.c4
-rw-r--r--arch/um/drivers/ubd_kern.c236
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/boot/compressed/mem_encrypt.S19
-rw-r--r--arch/x86/crypto/aegis128-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aegis128l-aesni-glue.c1
-rw-r--r--arch/x86/crypto/aegis256-aesni-glue.c1
-rw-r--r--arch/x86/crypto/morus1280-sse2-glue.c1
-rw-r--r--arch/x86/crypto/morus640-sse2-glue.c1
-rw-r--r--arch/x86/entry/entry_32.S13
-rw-r--r--arch/x86/entry/entry_64.S13
-rw-r--r--arch/x86/entry/vdso/Makefile16
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c26
-rw-r--r--arch/x86/events/amd/uncore.c10
-rw-r--r--arch/x86/events/intel/lbr.c4
-rw-r--r--arch/x86/events/intel/uncore_snbep.c14
-rw-r--r--arch/x86/hyperv/hv_apic.c8
-rw-r--r--arch/x86/include/asm/atomic.h12
-rw-r--r--arch/x86/include/asm/atomic64_32.h8
-rw-r--r--arch/x86/include/asm/atomic64_64.h12
-rw-r--r--arch/x86/include/asm/fixmap.h10
-rw-r--r--arch/x86/include/asm/fpu/internal.h2
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h16
-rw-r--r--arch/x86/include/asm/io.h12
-rw-r--r--arch/x86/include/asm/kdebug.h12
-rw-r--r--arch/x86/include/asm/kvm_host.h27
-rw-r--r--arch/x86/include/asm/mem_encrypt.h7
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/include/asm/perf_event.h8
-rw-r--r--arch/x86/include/asm/pgtable-2level.h9
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pgtable_64.h23
-rw-r--r--arch/x86/include/asm/pgtable_types.h2
-rw-r--r--arch/x86/include/asm/uv/uv.h6
-rw-r--r--arch/x86/include/asm/xen/events.h2
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/kernel/apic/vector.c2
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.h23
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c27
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c20
-rw-r--r--arch/x86/kernel/cpu/intel_rdt_rdtgroup.c89
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c24
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c17
-rw-r--r--arch/x86/kernel/dumpstack.c11
-rw-r--r--arch/x86/kernel/eisa.c10
-rw-r--r--arch/x86/kernel/fpu/signal.c1
-rw-r--r--arch/x86/kernel/head64.c20
-rw-r--r--arch/x86/kernel/head_64.S16
-rw-r--r--arch/x86/kernel/kvmclock.c52
-rw-r--r--arch/x86/kernel/paravirt.c4
-rw-r--r--arch/x86/kernel/pci-swiotlb.c2
-rw-r--r--arch/x86/kernel/process_32.c4
-rw-r--r--arch/x86/kernel/process_64.c12
-rw-r--r--arch/x86/kernel/time.c2
-rw-r--r--arch/x86/kernel/topology.c4
-rw-r--r--arch/x86/kernel/tsc.c12
-rw-r--r--arch/x86/kernel/vmlinux.lds.S19
-rw-r--r--arch/x86/kvm/lapic.c49
-rw-r--r--arch/x86/kvm/mmu.c59
-rw-r--r--arch/x86/kvm/svm.c32
-rw-r--r--arch/x86/kvm/vmx.c300
-rw-r--r--arch/x86/kvm/x86.c131
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/mem_encrypt.c24
-rw-r--r--arch/x86/mm/pgtable.c27
-rw-r--r--arch/x86/platform/efi/efi_32.c3
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/enlighten_pvh.c1
-rw-r--r--arch/x86/xen/mmu_pv.c8
-rw-r--r--arch/x86/xen/platform-pci-unplug.c1
-rw-r--r--arch/x86/xen/pmu.c3
-rw-r--r--arch/xtensa/Kconfig1
-rw-r--r--arch/xtensa/Makefile4
-rw-r--r--arch/xtensa/platforms/iss/setup.c25
-rw-r--r--block/Kconfig10
-rw-r--r--block/Kconfig.iosched3
-rw-r--r--block/Makefile1
-rw-r--r--block/bfq-cgroup.c4
-rw-r--r--block/bfq-iosched.c291
-rw-r--r--block/bfq-iosched.h53
-rw-r--r--block/bfq-wf2q.c49
-rw-r--r--block/bio-integrity.c12
-rw-r--r--block/bio.c220
-rw-r--r--block/blk-cgroup.c127
-rw-r--r--block/blk-core.c280
-rw-r--r--block/blk-flush.c6
-rw-r--r--block/blk-integrity.c12
-rw-r--r--block/blk-iolatency.c230
-rw-r--r--block/blk-lib.c28
-rw-r--r--block/blk-merge.c88
-rw-r--r--block/blk-mq-debugfs.c13
-rw-r--r--block/blk-mq-sched.h4
-rw-r--r--block/blk-mq-tag.c78
-rw-r--r--block/blk-mq.c215
-rw-r--r--block/blk-pm.c216
-rw-r--r--block/blk-pm.h69
-rw-r--r--block/blk-softirq.c5
-rw-r--r--block/blk-stat.c1
-rw-r--r--block/blk-throttle.c54
-rw-r--r--block/blk-wbt.c2
-rw-r--r--block/blk.h73
-rw-r--r--block/bounce.c41
-rw-r--r--block/cfq-iosched.c16
-rw-r--r--block/elevator.c24
-rw-r--r--block/genhd.c25
-rw-r--r--block/kyber-iosched.c547
-rw-r--r--block/partition-generic.c6
-rw-r--r--drivers/android/binder_alloc.c43
-rw-r--r--drivers/ata/Kconfig5
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_brcm.c8
-rw-r--r--drivers/ata/ahci_platform.c15
-rw-r--r--drivers/ata/ahci_sunxi.c3
-rw-r--r--drivers/ata/libahci_platform.c54
-rw-r--r--drivers/ata/libata-core.c14
-rw-r--r--drivers/ata/libata-scsi.c4
-rw-r--r--drivers/ata/pata_atiixp.c2
-rw-r--r--drivers/ata/pata_ep93xx.c8
-rw-r--r--drivers/base/firmware_loader/main.c35
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/block/DAC960.c7229
-rw-r--r--drivers/block/DAC960.h4414
-rw-r--r--drivers/block/Kconfig13
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/amiflop.c318
-rw-r--r--drivers/block/aoe/aoe.h5
-rw-r--r--drivers/block/aoe/aoeblk.c70
-rw-r--r--drivers/block/aoe/aoecmd.c19
-rw-r--r--drivers/block/aoe/aoedev.c15
-rw-r--r--drivers/block/ataflop.c273
-rw-r--r--drivers/block/drbd/Kconfig1
-rw-r--r--drivers/block/drbd/drbd_int.h15
-rw-r--r--drivers/block/drbd/drbd_main.c16
-rw-r--r--drivers/block/drbd/drbd_nl.c39
-rw-r--r--drivers/block/drbd/drbd_protocol.h4
-rw-r--r--drivers/block/drbd/drbd_receiver.c35
-rw-r--r--drivers/block/drbd/drbd_req.c2
-rw-r--r--drivers/block/drbd/drbd_worker.c65
-rw-r--r--drivers/block/floppy.c71
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c49
-rw-r--r--drivers/block/null_blk.h17
-rw-r--r--drivers/block/null_blk_main.c154
-rw-r--r--drivers/block/null_blk_zoned.c34
-rw-r--r--drivers/block/paride/pcd.c88
-rw-r--r--drivers/block/paride/pd.c94
-rw-r--r--drivers/block/paride/pf.c56
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/ps3disk.c88
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rsxx/core.c2
-rw-r--r--drivers/block/rsxx/cregs.c2
-rw-r--r--drivers/block/rsxx/dev.c2
-rw-r--r--drivers/block/rsxx/dma.c52
-rw-r--r--drivers/block/skd_main.c69
-rw-r--r--drivers/block/sunvdc.c16
-rw-r--r--drivers/block/swim.c106
-rw-r--r--drivers/block/swim3.c211
-rw-r--r--drivers/block/sx8.c166
-rw-r--r--drivers/block/umem.c42
-rw-r--r--drivers/block/virtio_blk.c68
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/block/xsysace.c80
-rw-r--r--drivers/block/z2ram.c87
-rw-r--r--drivers/block/zram/Kconfig2
-rw-r--r--drivers/block/zram/zram_drv.c28
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_qca.c2
-rw-r--r--drivers/cdrom/cdrom.c29
-rw-r--r--drivers/cdrom/gdrom.c174
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c92
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c53
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c17
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c32
-rw-r--r--drivers/char/ipmi/kcs_bmc.c7
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c10
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c18
-rw-r--r--drivers/clocksource/arm_arch_timer.c15
-rw-r--r--drivers/clocksource/timer-atmel-pit.c20
-rw-r--r--drivers/clocksource/timer-fttmr010.c18
-rw-r--r--drivers/clocksource/timer-ti-32k.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c4
-rw-r--r--drivers/crypto/caam/caamalg.c8
-rw-r--r--drivers/crypto/ccp/psp-dev.c46
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c32
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c8
-rw-r--r--drivers/crypto/mxs-dcp.c53
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c3xxxvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_c62xvf/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c6
-rw-r--r--drivers/crypto/qat/qat_dh895xccvf/adf_drv.c6
-rw-r--r--drivers/dax/device.c6
-rw-r--r--drivers/dma/mic_x100_dma.c4
-rw-r--r--drivers/firmware/arm_scmi/perf.c8
-rw-r--r--drivers/firmware/efi/Kconfig9
-rw-r--r--drivers/fpga/dfl-fme-pr.c2
-rw-r--r--drivers/fpga/dfl-fme-region.c4
-rw-r--r--drivers/fpga/fpga-bridge.c2
-rw-r--r--drivers/fpga/of-fpga-region.c3
-rw-r--r--drivers/gpio/gpiolib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c149
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c12
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c25
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c25
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h2
-rw-r--r--drivers/gpu/drm/drm_atomic.c7
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c12
-rw-r--r--drivers/gpu/drm/drm_client.c35
-rw-r--r--drivers/gpu/drm/drm_crtc.c10
-rw-r--r--drivers/gpu/drm/drm_debugfs.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c5
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c98
-rw-r--r--drivers/gpu/drm/drm_lease.c6
-rw-r--r--drivers/gpu/drm/drm_panel.c10
-rw-r--r--drivers/gpu/drm/drm_syncobj.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c27
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h34
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c88
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c33
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c228
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c82
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c110
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c44
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c57
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c24
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_dotclock.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c24
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c1
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c24
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/hid/hid-apple.c9
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-ids.h7
-rw-r--r--drivers/hid/hid-input.c5
-rw-r--r--drivers/hid/hid-multitouch.c19
-rw-r--r--drivers/hid/hid-saitek.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c23
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c34
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c2
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/vmbus_drv.c3
-rw-r--r--drivers/hwmon/nct6775.c72
-rw-r--r--drivers/hwmon/npcm750-pwm-fan.c2
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c1
-rw-r--r--drivers/hwtracing/intel_th/core.c16
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c4
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c22
-rw-r--r--drivers/i2c/busses/i2c-rcar.c6
-rw-r--r--drivers/i2c/busses/i2c-scmi.c1
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/ide/ide-cd.c2
-rw-r--r--drivers/ide/ide-gd.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c13
-rw-r--r--drivers/iio/temperature/maxim_thermocouple.c1
-rw-r--r--drivers/infiniband/core/cache.c68
-rw-r--r--drivers/infiniband/core/cma.c12
-rw-r--r--drivers/infiniband/core/rdma_core.c2
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/ucma.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c68
-rw-r--r--drivers/infiniband/core/uverbs_main.c6
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c93
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c6
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c6
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c11
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c51
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h2
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c8
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
-rw-r--r--drivers/input/evdev.c1
-rw-r--r--drivers/input/joystick/xpad.c3
-rw-r--r--drivers/input/keyboard/atakbd.c74
-rw-r--r--drivers/input/misc/uinput.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c1
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/mousedev.c1
-rw-r--r--drivers/input/serio/i8042.c29
-rw-r--r--drivers/input/touchscreen/egalax_ts.c6
-rw-r--r--drivers/iommu/amd_iommu.c8
-rw-r--r--drivers/iommu/intel-iommu.c6
-rw-r--r--drivers/iommu/intel-pasid.h2
-rw-r--r--drivers/iommu/rockchip-iommu.c6
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c4
-rw-r--r--drivers/lightnvm/Kconfig3
-rw-r--r--drivers/lightnvm/core.c334
-rw-r--r--drivers/lightnvm/pblk-cache.c1
-rw-r--r--drivers/lightnvm/pblk-core.c587
-rw-r--r--drivers/lightnvm/pblk-gc.c11
-rw-r--r--drivers/lightnvm/pblk-init.c321
-rw-r--r--drivers/lightnvm/pblk-map.c13
-rw-r--r--drivers/lightnvm/pblk-rb.c110
-rw-r--r--drivers/lightnvm/pblk-read.c86
-rw-r--r--drivers/lightnvm/pblk-recovery.c471
-rw-r--r--drivers/lightnvm/pblk-rl.c5
-rw-r--r--drivers/lightnvm/pblk-sysfs.c12
-rw-r--r--drivers/lightnvm/pblk-trace.h145
-rw-r--r--drivers/lightnvm/pblk-write.c90
-rw-r--r--drivers/lightnvm/pblk.h221
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h3
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/closure.h3
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/md/bcache/journal.c6
-rw-r--r--drivers/md/bcache/request.c9
-rw-r--r--drivers/md/bcache/request.h2
-rw-r--r--drivers/md/bcache/super.c121
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-cache-metadata.c4
-rw-r--r--drivers/md/dm-cache-target.c14
-rw-r--r--drivers/md/dm-crypt.c10
-rw-r--r--drivers/md/dm-flakey.c2
-rw-r--r--drivers/md/dm-integrity.c7
-rw-r--r--drivers/md/dm-linear.c8
-rw-r--r--drivers/md/dm-mpath.c14
-rw-r--r--drivers/md/dm-raid.c154
-rw-r--r--drivers/md/dm-thin-metadata.c34
-rw-r--r--drivers/md/dm-thin.c73
-rw-r--r--drivers/md/dm-verity-target.c24
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/media/i2c/mt9v111.c41
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/memory/ti-aemif.c2
-rw-r--r--drivers/memstick/core/ms_block.c2
-rw-r--r--drivers/memstick/core/mspro_block.c2
-rw-r--r--drivers/mfd/omap-usb-host.c11
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/ibmvmc.c2
-rw-r--r--drivers/misc/mei/bus.c12
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/misc/mei/hbm.c9
-rw-r--r--drivers/mmc/core/block.c12
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c2
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c8
-rw-r--r--drivers/mmc/host/omap_hsmmc.c1
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c3
-rw-r--r--drivers/mtd/devices/m25p80.c26
-rw-r--r--drivers/mtd/mtd_blkdevs.c102
-rw-r--r--drivers/mtd/mtdpart.c5
-rw-r--r--drivers/mtd/nand/raw/denali.c6
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c4
-rw-r--r--drivers/mux/adgs1408.c2
-rw-r--r--drivers/net/appletalk/ipddp.c8
-rw-r--r--drivers/net/bonding/bond_main.c76
-rw-r--r--drivers/net/dsa/b53/b53_common.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c14
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c24
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c14
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h8
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c126
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h11
-rw-r--r--drivers/net/ethernet/amd/declance.c10
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/apple/mace.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c32
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c54
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c20
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c7
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c24
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c48
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c20
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c5
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c15
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c16
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c22
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c27
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c30
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c37
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c21
-rw-r--r--drivers/net/ethernet/lantiq_etop.c1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c16
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c57
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c35
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c5
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c45
-rw-r--r--drivers/net/ethernet/qualcomm/qca_7k.c76
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c110
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c108
-rw-r--r--drivers/net/ethernet/renesas/Kconfig1
-rw-r--r--drivers/net/ethernet/renesas/Makefile1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c11
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c8
-rw-r--r--drivers/net/ethernet/seeq/ether3.c5
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c3
-rw-r--r--drivers/net/ethernet/sfc/efx.c26
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c26
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/socionext/netsec.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c238
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/geneve.c14
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c67
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/ieee802154/mcr20a.c8
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/phy/phylink.c48
-rw-r--r--drivers/net/phy/sfp-bus.c4
-rw-r--r--drivers/net/phy/sfp.c9
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/tun.c80
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c3
-rw-r--r--drivers/net/usb/lan78xx.c17
-rw-r--r--drivers/net/usb/qmi_wwan.c45
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/sr9800.c3
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/virtio_net.c19
-rw-r--r--drivers/net/vxlan.c15
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c36
-rw-r--r--drivers/net/wireless/marvell/libertas/if_sdio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c10
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/hash.c51
-rw-r--r--drivers/net/xen-netback/interface.c3
-rw-r--r--drivers/net/xen-netfront.c32
-rw-r--r--drivers/nvdimm/blk.c2
-rw-r--r--drivers/nvdimm/btt.c2
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/host/core.c49
-rw-r--r--drivers/nvme/host/fabrics.c37
-rw-r--r--drivers/nvme/host/fabrics.h2
-rw-r--r--drivers/nvme/host/fc.c153
-rw-r--r--drivers/nvme/host/lightnvm.c137
-rw-r--r--drivers/nvme/host/multipath.c85
-rw-r--r--drivers/nvme/host/nvme.h35
-rw-r--r--drivers/nvme/host/pci.c9
-rw-r--r--drivers/nvme/host/rdma.c78
-rw-r--r--drivers/nvme/host/trace.h28
-rw-r--r--drivers/nvme/target/admin-cmd.c8
-rw-r--r--drivers/nvme/target/core.c3
-rw-r--r--drivers/nvme/target/discovery.c6
-rw-r--r--drivers/nvme/target/fc.c136
-rw-r--r--drivers/nvme/target/fcloop.c1
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c9
-rw-r--r--drivers/nvme/target/io-cmd-file.c3
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c46
-rw-r--r--drivers/of/base.c3
-rw-r--r--drivers/of/unittest.c26
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h3
-rw-r--r--drivers/pci/controller/pci-hyperv.c39
-rw-r--r--drivers/pci/controller/pci-mvebu.c52
-rw-r--r--drivers/pci/controller/pcie-cadence.c4
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c11
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c18
-rw-r--r--drivers/pci/pci.c30
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/quirks.c6
-rw-r--r--drivers/pci/switch/switchtec.c4
-rw-r--r--drivers/perf/arm_pmu.c8
-rw-r--r--drivers/perf/arm_pmu_platform.c6
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c35
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c143
-rw-r--r--drivers/pinctrl/pinctrl-amd.c33
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c4
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c13
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c24
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c2
-rw-r--r--drivers/platform/x86/alienware-wmi.c1
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c1
-rw-r--r--drivers/ptp/ptp_chardev.c4
-rw-r--r--drivers/regulator/bd71837-regulator.c19
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/of_regulator.c2
-rw-r--r--drivers/s390/block/dasd_genhd.c2
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/scm_blk.c2
-rw-r--r--drivers/s390/char/sclp_early_core.c11
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c24
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h2
-rw-r--r--drivers/s390/crypto/ap_bus.c86
-rw-r--r--drivers/s390/net/qeth_core_main.c13
-rw-r--r--drivers/s390/net/qeth_core_mpc.c33
-rw-r--r--drivers/s390/net/qeth_core_mpc.h4
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/sbus/char/openprom.c11
-rw-r--r--drivers/sbus/char/oradax.c3
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/ipr.c106
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c11
-rw-r--r--drivers/scsi/qedi/qedi.h7
-rw-r--r--drivers/scsi/qedi/qedi_main.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/scsi_lib.c11
-rw-r--r--drivers/scsi/scsi_pm.c1
-rw-r--r--drivers/scsi/sd.c9
-rw-r--r--drivers/scsi/sr.c3
-rw-r--r--drivers/scsi/ufs/ufshcd.c7
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c11
-rw-r--r--drivers/soc/fsl/qbman/qman.c3
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c11
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c8
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/soundwire/stream.c23
-rw-r--r--drivers/spi/spi-fsl-dspi.c6
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-rspi.c34
-rw-r--r--drivers/spi/spi-sh-msiof.c28
-rw-r--r--drivers/spi/spi-tegra20-slink.c31
-rw-r--r--drivers/spi/spi.c13
-rw-r--r--drivers/staging/erofs/Kconfig2
-rw-r--r--drivers/staging/erofs/super.c4
-rw-r--r--drivers/staging/fbtft/TODO4
-rw-r--r--drivers/staging/gasket/TODO13
-rw-r--r--drivers/staging/media/mt9t031/Kconfig6
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c7
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c5
-rw-r--r--drivers/staging/wilc1000/Makefile3
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c6
-rw-r--r--drivers/staging/wilc1000/wilc_debugfs.c7
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c6
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h2
-rw-r--r--drivers/target/iscsi/iscsi_target.c31
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c149
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h2
-rw-r--r--drivers/target/target_core_spc.c6
-rw-r--r--drivers/thunderbolt/icm.c49
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/hvc/hvc_console.c38
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c10
-rw-r--r--drivers/tty/serial/fsl_lpuart.c3
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/mvebu-uart.c1
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c4
-rw-r--r--drivers/tty/serial/sh-sci.c56
-rw-r--r--drivers/tty/tty_io.c11
-rw-r--r--drivers/tty/vt/vt_ioctl.c4
-rw-r--r--drivers/usb/class/cdc-acm.c95
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/common/common.c25
-rw-r--r--drivers/usb/common/roles.c15
-rw-r--r--drivers/usb/core/devio.c24
-rw-r--r--drivers/usb/core/driver.c28
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/message.c11
-rw-r--r--drivers/usb/core/of.c26
-rw-r--r--drivers/usb/core/quirks.c10
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/dwc2/platform.c4
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c3
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c15
-rw-r--r--drivers/usb/gadget/udc/net2280.c16
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c5
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-mem.c4
-rw-r--r--drivers/usb/host/xhci-mtk.c4
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-plat.c27
-rw-r--r--drivers/usb/host/xhci.c30
-rw-r--r--drivers/usb/misc/uss720.c4
-rw-r--r--drivers/usb/misc/yurex.c8
-rw-r--r--drivers/usb/mtu3/mtu3_core.c6
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h1
-rw-r--r--drivers/usb/musb/musb_dsps.c12
-rw-r--r--drivers/usb/roles/intel-xhci-usb-role-switch.c2
-rw-r--r--drivers/usb/serial/io_ti.h2
-rw-r--r--drivers/usb/serial/option.c15
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c2
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/storage/scsiglue.c9
-rw-r--r--drivers/usb/storage/uas.c21
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/typec/bus.c7
-rw-r--r--drivers/usb/typec/class.c1
-rw-r--r--drivers/usb/typec/mux.c17
-rw-r--r--drivers/usb/usbip/vhci_hcd.c57
-rw-r--r--drivers/video/fbdev/aty/atyfb.h3
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c7
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c10
-rw-r--r--drivers/video/fbdev/efifb.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c6
-rw-r--r--drivers/video/fbdev/stifb.c2
-rw-r--r--drivers/xen/Kconfig10
-rw-r--r--drivers/xen/biomerge.c3
-rw-r--r--drivers/xen/cpu_hotplug.c15
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntdev.c26
-rw-r--r--drivers/xen/grant-table.c27
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--drivers/xen/mem-reservation.c4
-rw-r--r--drivers/xen/xen-acpi-pad.c1
-rw-r--r--drivers/xen/xen-balloon.c3
-rw-r--r--fs/afs/cell.c17
-rw-r--r--fs/afs/dynroot.c2
-rw-r--r--fs/afs/internal.h4
-rw-r--r--fs/afs/main.c2
-rw-r--r--fs/afs/proc.c7
-rw-r--r--fs/afs/rxrpc.c2
-rw-r--r--fs/buffer.c10
-rw-r--r--fs/cachefiles/namei.c2
-rw-r--r--fs/cifs/Kconfig1
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/cifssmb.c11
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/misc.c8
-rw-r--r--fs/cifs/readdir.c11
-rw-r--r--fs/cifs/smb2ops.c2
-rw-r--r--fs/cifs/smb2pdu.c29
-rw-r--r--fs/cifs/transport.c21
-rw-r--r--fs/dax.c27
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext4/dir.c20
-rw-r--r--fs/ext4/ext4.h20
-rw-r--r--fs/ext4/inline.c4
-rw-r--r--fs/ext4/inode.c20
-rw-r--r--fs/ext4/mmp.c1
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/ext4/page-io.c2
-rw-r--r--fs/ext4/resize.c23
-rw-r--r--fs/ext4/super.c4
-rw-r--r--fs/fat/fatent.c1
-rw-r--r--fs/fscache/cookie.c31
-rw-r--r--fs/fscache/internal.h1
-rw-r--r--fs/fscache/main.c4
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/iomap.c2
-rw-r--r--fs/nfs/nfs4proc.c31
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/nfs4trace.h4
-rw-r--r--fs/nfs/pnfs.c26
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/ocfs2/buffer_head_io.c1
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c4
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/refcounttree.c16
-rw-r--r--fs/overlayfs/copy_up.c2
-rw-r--r--fs/overlayfs/file.c25
-rw-r--r--fs/overlayfs/inode.c10
-rw-r--r--fs/overlayfs/namei.c2
-rw-r--r--fs/overlayfs/overlayfs.h4
-rw-r--r--fs/overlayfs/super.c26
-rw-r--r--fs/overlayfs/util.c3
-rw-r--r--fs/proc/base.c14
-rw-r--r--fs/proc/kcore.c1
-rw-r--r--fs/pstore/ram.c29
-rw-r--r--fs/pstore/ram_core.c17
-rw-r--r--fs/read_write.c17
-rw-r--r--fs/ubifs/super.c11
-rw-r--r--fs/ubifs/xattr.c24
-rw-r--r--fs/xattr.c24
-rw-r--r--fs/xfs/libxfs/xfs_attr.c28
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c10
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c24
-rw-r--r--fs/xfs/libxfs/xfs_format.h2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c30
-rw-r--r--fs/xfs/scrub/alloc.c1
-rw-r--r--fs/xfs/scrub/inode.c4
-rw-r--r--fs/xfs/xfs_bmap_util.c20
-rw-r--r--fs/xfs/xfs_buf_item.c119
-rw-r--r--fs/xfs/xfs_buf_item.h1
-rw-r--r--fs/xfs/xfs_inode.c10
-rw-r--r--fs/xfs/xfs_iops.c12
-rw-r--r--fs/xfs/xfs_log_recover.c10
-rw-r--r--fs/xfs/xfs_reflink.c337
-rw-r--r--fs/xfs/xfs_trace.h1
-rw-r--r--fs/xfs/xfs_trans.c10
-rw-r--r--fs/xfs/xfs_trans_buf.c99
-rw-r--r--include/asm-generic/io.h3
-rw-r--r--include/asm-generic/tlb.h86
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/drm/drm_atomic.h11
-rw-r--r--include/drm/drm_client.h5
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/drm/drm_edid.h6
-rw-r--r--include/drm/drm_panel.h1
-rw-r--r--include/linux/amifd.h63
-rw-r--r--include/linux/amifdreg.h82
-rw-r--r--include/linux/bio.h78
-rw-r--r--include/linux/blk-cgroup.h145
-rw-r--r--include/linux/blk-mq.h4
-rw-r--r--include/linux/blk-pm.h24
-rw-r--r--include/linux/blk_types.h1
-rw-r--r--include/linux/blkdev.h166
-rw-r--r--include/linux/bvec.h3
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/compiler-gcc.h14
-rw-r--r--include/linux/compiler_types.h8
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/fpga/fpga-mgr.h20
-rw-r--r--include/linux/fs.h22
-rw-r--r--include/linux/genhd.h10
-rw-r--r--include/linux/gpio/driver.h7
-rw-r--r--include/linux/hid.h1
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h14
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/lightnvm.h166
-rw-r--r--include/linux/mfd/da9063/pdata.h16
-rw-r--r--include/linux/mfd/rohm-bd718x7.h33
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/mlx5/transobj.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mm_types_task.h2
-rw-r--r--include/linux/mmzone.h10
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/module.h3
-rw-r--r--include/linux/mtd/blktrans.h5
-rw-r--r--include/linux/netdevice.h10
-rw-r--r--include/linux/netfilter.h2
-rw-r--r--include/linux/netpoll.h5
-rw-r--r--include/linux/nvme.h1
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/percpu-refcount.h1
-rw-r--r--include/linux/perf/arm_pmu.h1
-rw-r--r--include/linux/regulator/machine.h6
-rw-r--r--include/linux/serial_sci.h1
-rw-r--r--include/linux/spi/spi-mem.h7
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/timekeeping.h4
-rw-r--r--include/linux/tracepoint-defs.h6
-rw-r--r--include/linux/tracepoint.h36
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/vga_switcheroo.h3
-rw-r--r--include/linux/virtio_net.h18
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/vmacache.h5
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/media/v4l2-fh.h4
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/cfg80211.h2
-rw-r--r--include/net/devlink.h12
-rw-r--r--include/net/dst.h10
-rw-r--r--include/net/inet_sock.h6
-rw-r--r--include/net/ip6_fib.h4
-rw-r--r--include/net/ip_fib.h1
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h2
-rw-r--r--include/net/netlink.h2
-rw-r--r--include/net/nfc/hci.h2
-rw-r--r--include/net/sctp/sm.h2
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/tls.h19
-rw-r--r--include/soc/fsl/bman.h8
-rw-r--r--include/soc/fsl/qman.h8
-rw-r--r--include/sound/hdaudio.h1
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/trace/events/kyber.h96
-rw-r--r--include/trace/events/migrate.h27
-rw-r--r--include/trace/events/rxrpc.h5
-rw-r--r--include/uapi/asm-generic/hugetlb_encode.h2
-rw-r--r--include/uapi/linux/keyctl.h2
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/memfd.h2
-rw-r--r--include/uapi/linux/mman.h2
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/sctp.h1
-rw-r--r--include/uapi/linux/shm.h2
-rw-r--r--include/uapi/linux/smc_diag.h25
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--include/uapi/sound/skl-tplg-interface.h106
-rw-r--r--include/xen/mem-reservation.h7
-rw-r--r--include/xen/xen.h4
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/bpf/btf.c2
-rw-r--r--kernel/bpf/local_storage.c5
-rw-r--r--kernel/bpf/sockmap.c91
-rw-r--r--kernel/bpf/verifier.c12
-rw-r--r--kernel/bpf/xskmap.c10
-rw-r--r--kernel/cgroup/cgroup.c73
-rw-r--r--kernel/cpu.c11
-rw-r--r--kernel/events/core.c32
-rw-r--r--kernel/events/hw_breakpoint.c13
-rw-r--r--kernel/jump_label.c2
-rw-r--r--kernel/locking/lockdep.c1
-rw-r--r--kernel/locking/mutex.c3
-rw-r--r--kernel/locking/test-ww_mutex.c12
-rw-r--r--kernel/pid.c2
-rw-r--r--kernel/power/suspend.c6
-rw-r--r--kernel/printk/printk.c12
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/debug.c6
-rw-r--r--kernel/sched/fair.c154
-rw-r--r--kernel/sched/sched.h5
-rw-r--r--kernel/sched/topology.c5
-rw-r--r--kernel/signal.c14
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/time/clocksource.c40
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/preemptirq_delay_test.c10
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace_events_hist.c32
-rw-r--r--kernel/tracepoint.c24
-rw-r--r--lib/Makefile1
-rw-r--r--lib/bch.c17
-rw-r--r--lib/crc32.c11
-rw-r--r--lib/percpu-refcount.c28
-rw-r--r--lib/test_ida.c4
-rw-r--r--lib/vsprintf.c2
-rw-r--r--lib/xz/xz_crc32.c1
-rw-r--r--lib/xz/xz_private.h4
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/Makefile9
-rw-r--r--mm/debug.c4
-rw-r--r--mm/fadvise.c81
-rw-r--r--mm/gup_benchmark.c3
-rw-r--r--mm/huge_memory.c18
-rw-r--r--mm/hugetlb.c90
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memory.c247
-rw-r--r--mm/migrate.c62
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/mmu_gather.c261
-rw-r--r--mm/mremap.c30
-rw-r--r--mm/page_alloc.c12
-rw-r--r--mm/page_io.c2
-rw-r--r--mm/percpu.c1
-rw-r--r--mm/readahead.c45
-rw-r--r--mm/rmap.c42
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/vmacache.c38
-rw-r--r--mm/vmscan.c18
-rw-r--r--mm/vmstat.c4
-rw-r--r--net/batman-adv/bat_v_elp.c10
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c10
-rw-r--r--net/batman-adv/gateway_client.c11
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/network-coding.c27
-rw-r--r--net/batman-adv/soft-interface.c25
-rw-r--r--net/batman-adv/sysfs.c30
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/batman-adv/tvlv.c8
-rw-r--r--net/bluetooth/mgmt.c7
-rw-r--r--net/bluetooth/smp.c45
-rw-r--r--net/bluetooth/smp.h3
-rw-r--r--net/bpfilter/bpfilter_kern.c10
-rw-r--r--net/bridge/br_netfilter_hooks.c3
-rw-r--r--net/core/dev.c28
-rw-r--r--net/core/devlink.c46
-rw-r--r--net/core/ethtool.c21
-rw-r--r--net/core/filter.c3
-rw-r--r--net/core/neighbour.c16
-rw-r--r--net/core/netpoll.c39
-rw-r--r--net/core/rtnetlink.c43
-rw-r--r--net/core/skbuff.c20
-rw-r--r--net/dccp/input.c4
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/fib_frontend.c12
-rw-r--r--net/ipv4/fib_semantics.c50
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/ip_fragment.c1
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/ip_tunnel.c9
-rw-r--r--net/ipv4/ipmr_base.c2
-rw-r--r--net/ipv4/netfilter/Kconfig8
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/udp.c51
-rw-r--r--net/ipv4/xfrm4_input.c1
-rw-r--r--net/ipv4/xfrm4_mode_transport.c4
-rw-r--r--net/ipv6/addrconf.c10
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6_tunnel.c23
-rw-r--r--net/ipv6/mcast.c16
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c1
-rw-r--r--net/ipv6/raw.c29
-rw-r--r--net/ipv6/route.c70
-rw-r--r--net/ipv6/udp.c63
-rw-r--r--net/ipv6/xfrm6_input.c1
-rw-r--r--net/ipv6/xfrm6_mode_transport.c4
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/iucv/af_iucv.c38
-rw-r--r--net/iucv/iucv.c2
-rw-r--r--net/llc/llc_conn.c1
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/iface.c3
-rw-r--r--net/mac80211/mesh.h3
-rw-r--r--net/mac80211/mesh_hwmp.c9
-rw-r--r--net/mac80211/status.c11
-rw-r--r--net/mac80211/tdls.c8
-rw-r--r--net/mac80211/tx.c6
-rw-r--r--net/mpls/af_mpls.c6
-rw-r--r--net/netfilter/Kconfig12
-rw-r--r--net/netfilter/nf_conntrack_proto.c26
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c19
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_icmp.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c21
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c23
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c21
-rw-r--r--net/netfilter/nf_tables_api.c1
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c1
-rw-r--r--net/netfilter/nft_ct.c59
-rw-r--r--net/netfilter/nft_osf.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c28
-rw-r--r--net/netfilter/xt_CHECKSUM.c22
-rw-r--r--net/netfilter/xt_cluster.c14
-rw-r--r--net/netfilter/xt_hashlimit.c18
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/nfc/hci/core.c10
-rw-r--r--net/openvswitch/conntrack.c10
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/rds/bind.c5
-rw-r--r--net/rds/ib.h2
-rw-r--r--net/rds/send.c13
-rw-r--r--net/rxrpc/ar-internal.h55
-rw-r--r--net/rxrpc/call_accept.c68
-rw-r--r--net/rxrpc/call_object.c7
-rw-r--r--net/rxrpc/conn_client.c14
-rw-r--r--net/rxrpc/conn_event.c26
-rw-r--r--net/rxrpc/conn_object.c14
-rw-r--r--net/rxrpc/input.c329
-rw-r--r--net/rxrpc/local_object.c62
-rw-r--r--net/rxrpc/output.c57
-rw-r--r--net/rxrpc/peer_event.c52
-rw-r--r--net/rxrpc/peer_object.c77
-rw-r--r--net/rxrpc/protocol.h15
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_sample.c2
-rw-r--r--net/sched/act_tunnel_key.c28
-rw-r--r--net/sched/cls_api.c15
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_api.c27
-rw-r--r--net/sched/sch_cake.c2
-rw-r--r--net/sctp/associola.c3
-rw-r--r--net/sctp/input.c1
-rw-r--r--net/sctp/output.c6
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/socket.c17
-rw-r--r--net/sctp/transport.c12
-rw-r--r--net/smc/af_smc.c26
-rw-r--r--net/smc/smc_clc.c14
-rw-r--r--net/smc/smc_close.c14
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/socket.c33
-rw-r--r--net/tipc/bearer.c12
-rw-r--r--net/tipc/group.c1
-rw-r--r--net/tipc/link.c69
-rw-r--r--net/tipc/link.h3
-rw-r--r--net/tipc/name_distr.c4
-rw-r--r--net/tipc/netlink_compat.c5
-rw-r--r--net/tipc/node.c14
-rw-r--r--net/tipc/socket.c36
-rw-r--r--net/tipc/socket.h1
-rw-r--r--net/tls/tls_device.c6
-rw-r--r--net/tls/tls_device_fallback.c2
-rw-r--r--net/tls/tls_main.c22
-rw-r--r--net/tls/tls_sw.c27
-rw-r--r--net/wireless/nl80211.c20
-rw-r--r--net/wireless/reg.c8
-rw-r--r--net/wireless/scan.c58
-rw-r--r--net/wireless/wext-compat.c14
-rw-r--r--net/xdp/xsk.c2
-rw-r--r--net/xfrm/xfrm_input.c1
-rw-r--r--net/xfrm/xfrm_interface.c3
-rw-r--r--net/xfrm/xfrm_output.c4
-rw-r--r--net/xfrm/xfrm_policy.c12
-rw-r--r--net/xfrm/xfrm_user.c15
-rw-r--r--samples/Kconfig1
-rw-r--r--scripts/Makefile.build2
-rwxr-xr-xscripts/depmod.sh5
-rw-r--r--scripts/kconfig/Makefile1
-rw-r--r--scripts/kconfig/check-pkgconfig.sh8
-rwxr-xr-xscripts/kconfig/gconf-cfg.sh7
-rwxr-xr-xscripts/kconfig/mconf-cfg.sh25
-rw-r--r--scripts/kconfig/mconf.c1
-rw-r--r--scripts/kconfig/nconf-cfg.sh25
-rwxr-xr-xscripts/kconfig/qconf-cfg.sh7
-rwxr-xr-xscripts/setlocalversion2
-rw-r--r--scripts/subarch.include13
-rw-r--r--security/Kconfig2
-rw-r--r--security/keys/dh.c2
-rw-r--r--sound/firewire/bebob/bebob.c2
-rw-r--r--sound/firewire/bebob/bebob_maudio.c28
-rw-r--r--sound/firewire/digi00x/digi00x.c1
-rw-r--r--sound/firewire/fireface/ff-protocol-ff400.c9
-rw-r--r--sound/firewire/fireworks/fireworks.c2
-rw-r--r--sound/firewire/oxfw/oxfw.c10
-rw-r--r--sound/firewire/tascam/tascam.c1
-rw-r--r--sound/hda/hdac_controller.c15
-rw-r--r--sound/hda/hdac_i915.c4
-rw-r--r--sound/pci/emu10k1/emufx.c2
-rw-r--r--sound/pci/hda/hda_intel.c86
-rw-r--r--sound/pci/hda/hda_intel.h1
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/amd/acp-pcm-dma.c21
-rw-r--r--sound/soc/codecs/cs4265.c4
-rw-r--r--sound/soc/codecs/max98373.c3
-rw-r--r--sound/soc/codecs/rt5514.c8
-rw-r--r--sound/soc/codecs/rt5682.c8
-rw-r--r--sound/soc/codecs/sigmadsp.c3
-rw-r--r--sound/soc/codecs/tas6424.c12
-rw-r--r--sound/soc/codecs/wm8804-i2c.c15
-rw-r--r--sound/soc/codecs/wm9712.c2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c26
-rw-r--r--sound/soc/intel/skylake/skl.c2
-rw-r--r--sound/soc/qcom/qdsp6/q6routing.c4
-rw-r--r--sound/soc/sh/rcar/adg.c5
-rw-r--r--sound/soc/sh/rcar/core.c21
-rw-r--r--sound/soc/sh/rcar/dma.c4
-rw-r--r--sound/soc/sh/rcar/rsnd.h7
-rw-r--r--sound/soc/sh/rcar/ssi.c16
-rw-r--r--sound/soc/soc-core.c4
-rw-r--r--sound/soc/soc-dapm.c4
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h13
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h13
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h5
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h38
-rw-r--r--tools/hv/hv_fcopy_daemon.c1
-rw-r--r--tools/hv/hv_kvp_daemon.c2
-rw-r--r--tools/include/linux/lockdep.h3
-rw-r--r--tools/include/linux/nmi.h0
-rw-r--r--tools/include/tools/libc_compat.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h4
-rw-r--r--tools/include/uapi/drm/drm.h9
-rw-r--r--tools/include/uapi/linux/if_link.h17
-rw-r--r--tools/include/uapi/linux/kvm.h7
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/include/uapi/linux/vhost.h18
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat61
-rw-r--r--tools/lib/api/fs/tracing_path.c4
-rw-r--r--tools/lib/bpf/Build2
-rw-r--r--tools/lib/bpf/libbpf.c20
-rw-r--r--tools/lib/bpf/str_error.c18
-rw-r--r--tools/lib/bpf/str_error.h6
-rw-r--r--tools/perf/Documentation/Makefile2
-rw-r--r--tools/perf/Makefile.config2
-rw-r--r--tools/perf/Makefile.perf16
-rw-r--r--tools/perf/arch/arm64/Makefile5
-rwxr-xr-xtools/perf/arch/arm64/entry/syscalls/mksyscalltbl6
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c4
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h1
-rw-r--r--tools/perf/arch/x86/tests/Build1
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c6
-rw-r--r--tools/perf/arch/x86/tests/bp-modify.c213
-rw-r--r--tools/perf/builtin-report.c1
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json16
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py9
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py6
-rw-r--r--tools/perf/util/annotate.c32
-rw-r--r--tools/perf/util/annotate.h1
-rw-r--r--tools/perf/util/event.c22
-rw-r--r--tools/perf/util/evsel.c8
-rw-r--r--tools/perf/util/machine.c8
-rw-r--r--tools/perf/util/map.c11
-rw-r--r--tools/perf/util/pmu.c13
-rw-r--r--tools/perf/util/setup.py2
-rw-r--r--tools/perf/util/srcline.c3
-rw-r--r--tools/perf/util/trace-event-info.c2
-rw-r--r--tools/perf/util/trace-event-parse.c7
-rw-r--r--tools/testing/selftests/android/Makefile2
-rw-r--r--tools/testing/selftests/android/config (renamed from tools/testing/selftests/android/ion/config)0
-rw-r--r--tools/testing/selftests/android/ion/Makefile2
-rw-r--r--tools/testing/selftests/bpf/test_maps.c10
-rw-r--r--tools/testing/selftests/cgroup/.gitignore1
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c38
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h1
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c205
-rwxr-xr-xtools/testing/selftests/drivers/usb/usbip/usbip_test.sh4
-rw-r--r--tools/testing/selftests/efivarfs/config1
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc80
-rw-r--r--tools/testing/selftests/futex/functional/Makefile1
-rw-r--r--tools/testing/selftests/gpio/Makefile7
-rw-r--r--tools/testing/selftests/kselftest.h1
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile12
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h4
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c89
-rw-r--r--tools/testing/selftests/kvm/platform_info_test.c110
-rw-r--r--tools/testing/selftests/lib.mk12
-rw-r--r--tools/testing/selftests/memory-hotplug/config1
-rw-r--r--tools/testing/selftests/net/Makefile1
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh4
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c13
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh2
-rw-r--r--tools/testing/selftests/net/tls.c49
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh2
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/alignment/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/cache_shape/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/dscr/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/primitives/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/stringloops/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/syscalls/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile1
-rw-r--r--tools/testing/selftests/powerpc/vphn/Makefile1
-rw-r--r--tools/testing/selftests/rseq/param_test.c19
-rw-r--r--tools/testing/selftests/vm/Makefile4
-rw-r--r--tools/testing/selftests/x86/test_vdso.c172
-rw-r--r--virt/kvm/arm/arm.c4
-rw-r--r--virt/kvm/arm/mmu.c21
-rw-r--r--virt/kvm/arm/trace.h15
1479 files changed, 19638 insertions, 24932 deletions
diff --git a/Documentation/ABI/stable/sysfs-devices-system-xen_memory b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
index caa311d59ac1..6d83f95a8a8e 100644
--- a/Documentation/ABI/stable/sysfs-devices-system-xen_memory
+++ b/Documentation/ABI/stable/sysfs-devices-system-xen_memory
@@ -75,3 +75,12 @@ Contact: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Description:
Amount (in KiB) of low (or normal) memory in the
balloon.
+
+What: /sys/devices/system/xen_memory/xen_memory0/scrub_pages
+Date: September 2018
+KernelVersion: 4.20
+Contact: xen-devel@lists.xenproject.org
+Description:
+ Control scrubbing pages before returning them to Xen for others domains
+ use. Can be set with xen_scrub_pages cmdline
+ parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 184193bcb262..caf36105a1c7 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1857,8 +1857,10 @@ following two functions.
wbc_init_bio(@wbc, @bio)
Should be called for each bio carrying writeback data and
- associates the bio with the inode's owner cgroup. Can be
- called anytime between bio allocation and submission.
+ associates the bio with the inode's owner cgroup and the
+ corresponding request queue. This must be called after
+ a queue (device) has been associated with the bio and
+ before submission.
wbc_account_io(@wbc, @page, @bytes)
Should be called for each data segment being written out.
@@ -1877,7 +1879,7 @@ the configuration, the bio may be executed at a lower priority and if
the writeback session is holding shared resources, e.g. a journal
entry, may lead to priority inversion. There is no one easy solution
for the problem. Filesystems can try to work around specific problem
-cases by skipping wbc_init_bio() or using bio_associate_blkcg()
+cases by skipping wbc_init_bio() or using bio_associate_create_blkg()
directly.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 9871e649ffef..92eb1f42240d 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3523,6 +3523,12 @@
ramdisk_size= [RAM] Sizes of RAM disks in kilobytes
See Documentation/blockdev/ramdisk.txt.
+ random.trust_cpu={on,off}
+ [KNL] Enable or disable trusting the use of the
+ CPU's random number generator (if available) to
+ fully seed the kernel's CRNG. Default is controlled
+ by CONFIG_RANDOM_TRUST_CPU.
+
ras=option[,option,...] [KNL] RAS-specific options
cec_disable [X86]
@@ -4994,6 +5000,12 @@
Disables the PV optimizations forcing the HVM guest to
run as generic HVM guest with no PV drivers.
+ xen_scrub_pages= [XEN]
+ Boolean option to control scrubbing pages before giving them back
+ to Xen, for use by other domains. Can be also changed at runtime
+ with /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
+ Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT.
+
xirc2ps_cs= [NET,PCMCIA]
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
diff --git a/Documentation/arm64/elf_hwcaps.txt b/Documentation/arm64/elf_hwcaps.txt
index d6aff2c5e9e2..ea819ae024dd 100644
--- a/Documentation/arm64/elf_hwcaps.txt
+++ b/Documentation/arm64/elf_hwcaps.txt
@@ -78,11 +78,11 @@ HWCAP_EVTSTRM
HWCAP_AES
- Functionality implied by ID_AA64ISAR1_EL1.AES == 0b0001.
+ Functionality implied by ID_AA64ISAR0_EL1.AES == 0b0001.
HWCAP_PMULL
- Functionality implied by ID_AA64ISAR1_EL1.AES == 0b0010.
+ Functionality implied by ID_AA64ISAR0_EL1.AES == 0b0010.
HWCAP_SHA1
@@ -153,7 +153,7 @@ HWCAP_ASIMDDP
HWCAP_SHA512
- Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0002.
+ Functionality implied by ID_AA64ISAR0_EL1.SHA2 == 0b0010.
HWCAP_SVE
@@ -173,8 +173,12 @@ HWCAP_USCAT
HWCAP_ILRCPC
- Functionality implied by ID_AA64ISR1_EL1.LRCPC == 0b0002.
+ Functionality implied by ID_AA64ISAR1_EL1.LRCPC == 0b0010.
HWCAP_FLAGM
Functionality implied by ID_AA64ISAR0_EL1.TS == 0b0001.
+
+HWCAP_SSBS
+
+ Functionality implied by ID_AA64PFR1_EL1.SSBS == 0b0010.
diff --git a/Documentation/arm64/hugetlbpage.txt b/Documentation/arm64/hugetlbpage.txt
new file mode 100644
index 000000000000..cfae87dc653b
--- /dev/null
+++ b/Documentation/arm64/hugetlbpage.txt
@@ -0,0 +1,38 @@
+HugeTLBpage on ARM64
+====================
+
+Hugepage relies on making efficient use of TLBs to improve performance of
+address translations. The benefit depends on both -
+
+ - the size of hugepages
+ - size of entries supported by the TLBs
+
+The ARM64 port supports two flavours of hugepages.
+
+1) Block mappings at the pud/pmd level
+--------------------------------------
+
+These are regular hugepages where a pmd or a pud page table entry points to a
+block of memory. Regardless of the supported size of entries in TLB, block
+mappings reduce the depth of page table walk needed to translate hugepage
+addresses.
+
+2) Using the Contiguous bit
+---------------------------
+
+The architecture provides a contiguous bit in the translation table entries
+(D4.5.3, ARM DDI 0487C.a) that hints to the MMU to indicate that it is one of a
+contiguous set of entries that can be cached in a single TLB entry.
+
+The contiguous bit is used in Linux to increase the mapping size at the pmd and
+pte (last) level. The number of supported contiguous entries varies by page size
+and level of the page table.
+
+
+The following hugepage sizes are supported -
+
+ CONT PTE PMD CONT PMD PUD
+ -------- --- -------- ---
+ 4K: 64K 2M 32M 1G
+ 16K: 2M 32M 1G
+ 64K: 2M 512M 16G
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
index 3b2f2dd82225..76ccded8b74c 100644
--- a/Documentation/arm64/silicon-errata.txt
+++ b/Documentation/arm64/silicon-errata.txt
@@ -56,6 +56,7 @@ stable kernels.
| ARM | Cortex-A72 | #853709 | N/A |
| ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
| ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
+| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
| ARM | MMU-500 | #841119,#826419 | N/A |
| | | | |
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
diff --git a/Documentation/blockdev/README.DAC960 b/Documentation/blockdev/README.DAC960
deleted file mode 100644
index bd85fb9dc6e5..000000000000
--- a/Documentation/blockdev/README.DAC960
+++ /dev/null
@@ -1,756 +0,0 @@
- Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
-
- Version 2.2.11 for Linux 2.2.19
- Version 2.4.11 for Linux 2.4.12
-
- PRODUCTION RELEASE
-
- 11 October 2001
-
- Leonard N. Zubkoff
- Dandelion Digital
- lnz@dandelion.com
-
- Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
-
-
- INTRODUCTION
-
-Mylex, Inc. designs and manufactures a variety of high performance PCI RAID
-controllers. Mylex Corporation is located at 34551 Ardenwood Blvd., Fremont,
-California 94555, USA and can be reached at 510.796.6100 or on the World Wide
-Web at http://www.mylex.com. Mylex Technical Support can be reached by
-electronic mail at mylexsup@us.ibm.com, by voice at 510.608.2400, or by FAX at
-510.745.7715. Contact information for offices in Europe and Japan is available
-on their Web site.
-
-The latest information on Linux support for DAC960 PCI RAID Controllers, as
-well as the most recent release of this driver, will always be available from
-my Linux Home Page at URL "http://www.dandelion.com/Linux/". The Linux DAC960
-driver supports all current Mylex PCI RAID controllers including the new
-eXtremeRAID 2000/3000 and AcceleRAID 352/170/160 models which have an entirely
-new firmware interface from the older eXtremeRAID 1100, AcceleRAID 150/200/250,
-and DAC960PJ/PG/PU/PD/PL. See below for a complete controller list as well as
-minimum firmware version requirements. For simplicity, in most places this
-documentation refers to DAC960 generically rather than explicitly listing all
-the supported models.
-
-Driver bug reports should be sent via electronic mail to "lnz@dandelion.com".
-Please include with the bug report the complete configuration messages reported
-by the driver at startup, along with any subsequent system messages relevant to
-the controller's operation, and a detailed description of your system's
-hardware configuration. Driver bugs are actually quite rare; if you encounter
-problems with disks being marked offline, for example, please contact Mylex
-Technical Support as the problem is related to the hardware configuration
-rather than the Linux driver.
-
-Please consult the RAID controller documentation for detailed information
-regarding installation and configuration of the controllers. This document
-primarily provides information specific to the Linux support.
-
-
- DRIVER FEATURES
-
-The DAC960 RAID controllers are supported solely as high performance RAID
-controllers, not as interfaces to arbitrary SCSI devices. The Linux DAC960
-driver operates at the block device level, the same level as the SCSI and IDE
-drivers. Unlike other RAID controllers currently supported on Linux, the
-DAC960 driver is not dependent on the SCSI subsystem, and hence avoids all the
-complexity and unnecessary code that would be associated with an implementation
-as a SCSI driver. The DAC960 driver is designed for as high a performance as
-possible with no compromises or extra code for compatibility with lower
-performance devices. The DAC960 driver includes extensive error logging and
-online configuration management capabilities. Except for initial configuration
-of the controller and adding new disk drives, most everything can be handled
-from Linux while the system is operational.
-
-The DAC960 driver is architected to support up to 8 controllers per system.
-Each DAC960 parallel SCSI controller can support up to 15 disk drives per
-channel, for a maximum of 60 drives on a four channel controller; the fibre
-channel eXtremeRAID 3000 controller supports up to 125 disk drives per loop for
-a total of 250 drives. The drives installed on a controller are divided into
-one or more "Drive Groups", and then each Drive Group is subdivided further
-into 1 to 32 "Logical Drives". Each Logical Drive has a specific RAID Level
-and caching policy associated with it, and it appears to Linux as a single
-block device. Logical Drives are further subdivided into up to 7 partitions
-through the normal Linux and PC disk partitioning schemes. Logical Drives are
-also known as "System Drives", and Drive Groups are also called "Packs". Both
-terms are in use in the Mylex documentation; I have chosen to standardize on
-the more generic "Logical Drive" and "Drive Group".
-
-DAC960 RAID disk devices are named in the style of the obsolete Device File
-System (DEVFS). The device corresponding to Logical Drive D on Controller C
-is referred to as /dev/rd/cCdD, and the partitions are called /dev/rd/cCdDp1
-through /dev/rd/cCdDp7. For example, partition 3 of Logical Drive 5 on
-Controller 2 is referred to as /dev/rd/c2d5p3. Note that unlike with SCSI
-disks the device names will not change in the event of a disk drive failure.
-The DAC960 driver is assigned major numbers 48 - 55 with one major number per
-controller. The 8 bits of minor number are divided into 5 bits for the Logical
-Drive and 3 bits for the partition.
-
-
- SUPPORTED DAC960/AcceleRAID/eXtremeRAID PCI RAID CONTROLLERS
-
-The following list comprises the supported DAC960, AcceleRAID, and eXtremeRAID
-PCI RAID Controllers as of the date of this document. It is recommended that
-anyone purchasing a Mylex PCI RAID Controller not in the following table
-contact the author beforehand to verify that it is or will be supported.
-
-eXtremeRAID 3000
- 1 Wide Ultra-2/LVD SCSI channel
- 2 External Fibre FC-AL channels
- 233MHz StrongARM SA 110 Processor
- 64 Bit 33MHz PCI (backward compatible with 32 Bit PCI slots)
- 32MB/64MB ECC SDRAM Memory
-
-eXtremeRAID 2000
- 4 Wide Ultra-160 LVD SCSI channels
- 233MHz StrongARM SA 110 Processor
- 64 Bit 33MHz PCI (backward compatible with 32 Bit PCI slots)
- 32MB/64MB ECC SDRAM Memory
-
-AcceleRAID 352
- 2 Wide Ultra-160 LVD SCSI channels
- 100MHz Intel i960RN RISC Processor
- 64 Bit 33MHz PCI (backward compatible with 32 Bit PCI slots)
- 32MB/64MB ECC SDRAM Memory
-
-AcceleRAID 170
- 1 Wide Ultra-160 LVD SCSI channel
- 100MHz Intel i960RM RISC Processor
- 16MB/32MB/64MB ECC SDRAM Memory
-
-AcceleRAID 160 (AcceleRAID 170LP)
- 1 Wide Ultra-160 LVD SCSI channel
- 100MHz Intel i960RS RISC Processor
- Built in 16M ECC SDRAM Memory
- PCI Low Profile Form Factor - fit for 2U height
-
-eXtremeRAID 1100 (DAC1164P)
- 3 Wide Ultra-2/LVD SCSI channels
- 233MHz StrongARM SA 110 Processor
- 64 Bit 33MHz PCI (backward compatible with 32 Bit PCI slots)
- 16MB/32MB/64MB Parity SDRAM Memory with Battery Backup
-
-AcceleRAID 250 (DAC960PTL1)
- Uses onboard Symbios SCSI chips on certain motherboards
- Also includes one onboard Wide Ultra-2/LVD SCSI Channel
- 66MHz Intel i960RD RISC Processor
- 4MB/8MB/16MB/32MB/64MB/128MB ECC EDO Memory
-
-AcceleRAID 200 (DAC960PTL0)
- Uses onboard Symbios SCSI chips on certain motherboards
- Includes no onboard SCSI Channels
- 66MHz Intel i960RD RISC Processor
- 4MB/8MB/16MB/32MB/64MB/128MB ECC EDO Memory
-
-AcceleRAID 150 (DAC960PRL)
- Uses onboard Symbios SCSI chips on certain motherboards
- Also includes one onboard Wide Ultra-2/LVD SCSI Channel
- 33MHz Intel i960RP RISC Processor
- 4MB Parity EDO Memory
-
-DAC960PJ 1/2/3 Wide Ultra SCSI-3 Channels
- 66MHz Intel i960RD RISC Processor
- 4MB/8MB/16MB/32MB/64MB/128MB ECC EDO Memory
-
-DAC960PG 1/2/3 Wide Ultra SCSI-3 Channels
- 33MHz Intel i960RP RISC Processor
- 4MB/8MB ECC EDO Memory
-
-DAC960PU 1/2/3 Wide Ultra SCSI-3 Channels
- Intel i960CF RISC Processor
- 4MB/8MB EDRAM or 2MB/4MB/8MB/16MB/32MB DRAM Memory
-
-DAC960PD 1/2/3 Wide Fast SCSI-2 Channels
- Intel i960CF RISC Processor
- 4MB/8MB EDRAM or 2MB/4MB/8MB/16MB/32MB DRAM Memory
-
-DAC960PL 1/2/3 Wide Fast SCSI-2 Channels
- Intel i960 RISC Processor
- 2MB/4MB/8MB/16MB/32MB DRAM Memory
-
-DAC960P 1/2/3 Wide Fast SCSI-2 Channels
- Intel i960 RISC Processor
- 2MB/4MB/8MB/16MB/32MB DRAM Memory
-
-For the eXtremeRAID 2000/3000 and AcceleRAID 352/170/160, firmware version
-6.00-01 or above is required.
-
-For the eXtremeRAID 1100, firmware version 5.06-0-52 or above is required.
-
-For the AcceleRAID 250, 200, and 150, firmware version 4.06-0-57 or above is
-required.
-
-For the DAC960PJ and DAC960PG, firmware version 4.06-0-00 or above is required.
-
-For the DAC960PU, DAC960PD, DAC960PL, and DAC960P, either firmware version
-3.51-0-04 or above is required (for dual Flash ROM controllers), or firmware
-version 2.73-0-00 or above is required (for single Flash ROM controllers)
-
-Please note that not all SCSI disk drives are suitable for use with DAC960
-controllers, and only particular firmware versions of any given model may
-actually function correctly. Similarly, not all motherboards have a BIOS that
-properly initializes the AcceleRAID 250, AcceleRAID 200, AcceleRAID 150,
-DAC960PJ, and DAC960PG because the Intel i960RD/RP is a multi-function device.
-If in doubt, contact Mylex RAID Technical Support (mylexsup@us.ibm.com) to
-verify compatibility. Mylex makes available a hard disk compatibility list at
-http://www.mylex.com/support/hdcomp/hd-lists.html.
-
-
- DRIVER INSTALLATION
-
-This distribution was prepared for Linux kernel version 2.2.19 or 2.4.12.
-
-To install the DAC960 RAID driver, you may use the following commands,
-replacing "/usr/src" with wherever you keep your Linux kernel source tree:
-
- cd /usr/src
- tar -xvzf DAC960-2.2.11.tar.gz (or DAC960-2.4.11.tar.gz)
- mv README.DAC960 linux/Documentation
- mv DAC960.[ch] linux/drivers/block
- patch -p0 < DAC960.patch (if DAC960.patch is included)
- cd linux
- make config
- make bzImage (or zImage)
-
-Then install "arch/x86/boot/bzImage" or "arch/x86/boot/zImage" as your
-standard kernel, run lilo if appropriate, and reboot.
-
-To create the necessary devices in /dev, the "make_rd" script included in
-"DAC960-Utilities.tar.gz" from http://www.dandelion.com/Linux/ may be used.
-LILO 21 and FDISK v2.9 include DAC960 support; also included in this archive
-are patches to LILO 20 and FDISK v2.8 that add DAC960 support, along with
-statically linked executables of LILO and FDISK. This modified version of LILO
-will allow booting from a DAC960 controller and/or mounting the root file
-system from a DAC960.
-
-Red Hat Linux 6.0 and SuSE Linux 6.1 include support for Mylex PCI RAID
-controllers. Installing directly onto a DAC960 may be problematic from other
-Linux distributions until their installation utilities are updated.
-
-
- INSTALLATION NOTES
-
-Before installing Linux or adding DAC960 logical drives to an existing Linux
-system, the controller must first be configured to provide one or more logical
-drives using the BIOS Configuration Utility or DACCF. Please note that since
-there are only at most 6 usable partitions on each logical drive, systems
-requiring more partitions should subdivide a drive group into multiple logical
-drives, each of which can have up to 6 usable partitions. Also, note that with
-large disk arrays it is advisable to enable the 8GB BIOS Geometry (255/63)
-rather than accepting the default 2GB BIOS Geometry (128/32); failing to so do
-will cause the logical drive geometry to have more than 65535 cylinders which
-will make it impossible for FDISK to be used properly. The 8GB BIOS Geometry
-can be enabled by configuring the DAC960 BIOS, which is accessible via Alt-M
-during the BIOS initialization sequence.
-
-For maximum performance and the most efficient E2FSCK performance, it is
-recommended that EXT2 file systems be built with a 4KB block size and 16 block
-stride to match the DAC960 controller's 64KB default stripe size. The command
-"mke2fs -b 4096 -R stride=16 <device>" is appropriate. Unless there will be a
-large number of small files on the file systems, it is also beneficial to add
-the "-i 16384" option to increase the bytes per inode parameter thereby
-reducing the file system metadata. Finally, on systems that will only be run
-with Linux 2.2 or later kernels it is beneficial to enable sparse superblocks
-with the "-s 1" option.
-
-
- DAC960 ANNOUNCEMENTS MAILING LIST
-
-The DAC960 Announcements Mailing List provides a forum for informing Linux
-users of new driver releases and other announcements regarding Linux support
-for DAC960 PCI RAID Controllers. To join the mailing list, send a message to
-"dac960-announce-request@dandelion.com" with the line "subscribe" in the
-message body.
-
-
- CONTROLLER CONFIGURATION AND STATUS MONITORING
-
-The DAC960 RAID controllers running firmware 4.06 or above include a Background
-Initialization facility so that system downtime is minimized both for initial
-installation and subsequent configuration of additional storage. The BIOS
-Configuration Utility (accessible via Alt-R during the BIOS initialization
-sequence) is used to quickly configure the controller, and then the logical
-drives that have been created are available for immediate use even while they
-are still being initialized by the controller. The primary need for online
-configuration and status monitoring is then to avoid system downtime when disk
-drives fail and must be replaced. Mylex's online monitoring and configuration
-utilities are being ported to Linux and will become available at some point in
-the future. Note that with a SAF-TE (SCSI Accessed Fault-Tolerant Enclosure)
-enclosure, the controller is able to rebuild failed drives automatically as
-soon as a drive replacement is made available.
-
-The primary interfaces for controller configuration and status monitoring are
-special files created in the /proc/rd/... hierarchy along with the normal
-system console logging mechanism. Whenever the system is operating, the DAC960
-driver queries each controller for status information every 10 seconds, and
-checks for additional conditions every 60 seconds. The initial status of each
-controller is always available for controller N in /proc/rd/cN/initial_status,
-and the current status as of the last status monitoring query is available in
-/proc/rd/cN/current_status. In addition, status changes are also logged by the
-driver to the system console and will appear in the log files maintained by
-syslog. The progress of asynchronous rebuild or consistency check operations
-is also available in /proc/rd/cN/current_status, and progress messages are
-logged to the system console at most every 60 seconds.
-
-Starting with the 2.2.3/2.0.3 versions of the driver, the status information
-available in /proc/rd/cN/initial_status and /proc/rd/cN/current_status has been
-augmented to include the vendor, model, revision, and serial number (if
-available) for each physical device found connected to the controller:
-
-***** DAC960 RAID Driver Version 2.2.3 of 19 August 1999 *****
-Copyright 1998-1999 by Leonard N. Zubkoff <lnz@dandelion.com>
-Configuring Mylex DAC960PRL PCI RAID Controller
- Firmware Version: 4.07-0-07, Channels: 1, Memory Size: 16MB
- PCI Bus: 1, Device: 4, Function: 1, I/O Address: Unassigned
- PCI Address: 0xFE300000 mapped at 0xA0800000, IRQ Channel: 21
- Controller Queue Depth: 128, Maximum Blocks per Command: 128
- Driver Queue Depth: 127, Maximum Scatter/Gather Segments: 33
- Stripe Size: 64KB, Segment Size: 8KB, BIOS Geometry: 255/63
- SAF-TE Enclosure Management Enabled
- Physical Devices:
- 0:0 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 68016775HA
- Disk Status: Online, 17928192 blocks
- 0:1 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 68004E53HA
- Disk Status: Online, 17928192 blocks
- 0:2 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 13013935HA
- Disk Status: Online, 17928192 blocks
- 0:3 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 13016897HA
- Disk Status: Online, 17928192 blocks
- 0:4 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 68019905HA
- Disk Status: Online, 17928192 blocks
- 0:5 Vendor: IBM Model: DRVS09D Revision: 0270
- Serial Number: 68012753HA
- Disk Status: Online, 17928192 blocks
- 0:6 Vendor: ESG-SHV Model: SCA HSBP M6 Revision: 0.61
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 89640960 blocks, Write Thru
- No Rebuild or Consistency Check in Progress
-
-To simplify the monitoring process for custom software, the special file
-/proc/rd/status returns "OK" when all DAC960 controllers in the system are
-operating normally and no failures have occurred, or "ALERT" if any logical
-drives are offline or critical or any non-standby physical drives are dead.
-
-Configuration commands for controller N are available via the special file
-/proc/rd/cN/user_command. A human readable command can be written to this
-special file to initiate a configuration operation, and the results of the
-operation can then be read back from the special file in addition to being
-logged to the system console. The shell command sequence
-
- echo "<configuration-command>" > /proc/rd/c0/user_command
- cat /proc/rd/c0/user_command
-
-is typically used to execute configuration commands. The configuration
-commands are:
-
- flush-cache
-
- The "flush-cache" command flushes the controller's cache. The system
- automatically flushes the cache at shutdown or if the driver module is
- unloaded, so this command is only needed to be certain a write back cache
- is flushed to disk before the system is powered off by a command to a UPS.
- Note that the flush-cache command also stops an asynchronous rebuild or
- consistency check, so it should not be used except when the system is being
- halted.
-
- kill <channel>:<target-id>
-
- The "kill" command marks the physical drive <channel>:<target-id> as DEAD.
- This command is provided primarily for testing, and should not be used
- during normal system operation.
-
- make-online <channel>:<target-id>
-
- The "make-online" command changes the physical drive <channel>:<target-id>
- from status DEAD to status ONLINE. In cases where multiple physical drives
- have been killed simultaneously, this command may be used to bring all but
- one of them back online, after which a rebuild to the final drive is
- necessary.
-
- Warning: make-online should only be used on a dead physical drive that is
- an active part of a drive group, never on a standby drive. The command
- should never be used on a dead drive that is part of a critical logical
- drive; rebuild should be used if only a single drive is dead.
-
- make-standby <channel>:<target-id>
-
- The "make-standby" command changes physical drive <channel>:<target-id>
- from status DEAD to status STANDBY. It should only be used in cases where
- a dead drive was replaced after an automatic rebuild was performed onto a
- standby drive. It cannot be used to add a standby drive to the controller
- configuration if one was not created initially; the BIOS Configuration
- Utility must be used for that currently.
-
- rebuild <channel>:<target-id>
-
- The "rebuild" command initiates an asynchronous rebuild onto physical drive
- <channel>:<target-id>. It should only be used when a dead drive has been
- replaced.
-
- check-consistency <logical-drive-number>
-
- The "check-consistency" command initiates an asynchronous consistency check
- of <logical-drive-number> with automatic restoration. It can be used
- whenever it is desired to verify the consistency of the redundancy
- information.
-
- cancel-rebuild
- cancel-consistency-check
-
- The "cancel-rebuild" and "cancel-consistency-check" commands cancel any
- rebuild or consistency check operations previously initiated.
-
-
- EXAMPLE I - DRIVE FAILURE WITHOUT A STANDBY DRIVE
-
-The following annotated logs demonstrate the controller configuration and and
-online status monitoring capabilities of the Linux DAC960 Driver. The test
-configuration comprises 6 1GB Quantum Atlas I disk drives on two channels of a
-DAC960PJ controller. The physical drives are configured into a single drive
-group without a standby drive, and the drive group has been configured into two
-logical drives, one RAID-5 and one RAID-6. Note that these logs are from an
-earlier version of the driver and the messages have changed somewhat with newer
-releases, but the functionality remains similar. First, here is the current
-status of the RAID configuration:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
-***** DAC960 RAID Driver Version 2.0.0 of 23 March 1999 *****
-Copyright 1998-1999 by Leonard N. Zubkoff <lnz@dandelion.com>
-Configuring Mylex DAC960PJ PCI RAID Controller
- Firmware Version: 4.06-0-08, Channels: 3, Memory Size: 8MB
- PCI Bus: 0, Device: 19, Function: 1, I/O Address: Unassigned
- PCI Address: 0xFD4FC000 mapped at 0x8807000, IRQ Channel: 9
- Controller Queue Depth: 128, Maximum Blocks per Command: 128
- Driver Queue Depth: 127, Maximum Scatter/Gather Segments: 33
- Stripe Size: 64KB, Segment Size: 8KB, BIOS Geometry: 255/63
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 5498880 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Online, 3305472 blocks, Write Thru
- No Rebuild or Consistency Check in Progress
-
-gwynedd:/u/lnz# cat /proc/rd/status
-OK
-
-The above messages indicate that everything is healthy, and /proc/rd/status
-returns "OK" indicating that there are no problems with any DAC960 controller
-in the system. For demonstration purposes, while I/O is active Physical Drive
-1:1 is now disconnected, simulating a drive failure. The failure is noted by
-the driver within 10 seconds of the controller's having detected it, and the
-driver logs the following console status messages indicating that Logical
-Drives 0 and 1 are now CRITICAL as a result of Physical Drive 1:1 being DEAD:
-
-DAC960#0: Physical Drive 1:2 Error Log: Sense Key = 6, ASC = 29, ASCQ = 02
-DAC960#0: Physical Drive 1:3 Error Log: Sense Key = 6, ASC = 29, ASCQ = 02
-DAC960#0: Physical Drive 1:1 killed because of timeout on SCSI command
-DAC960#0: Physical Drive 1:1 is now DEAD
-DAC960#0: Logical Drive 0 (/dev/rd/c0d0) is now CRITICAL
-DAC960#0: Logical Drive 1 (/dev/rd/c0d1) is now CRITICAL
-
-The Sense Keys logged here are just Check Condition / Unit Attention conditions
-arising from a SCSI bus reset that is forced by the controller during its error
-recovery procedures. Concurrently with the above, the driver status available
-from /proc/rd also reflects the drive failure. The status message in
-/proc/rd/status has changed from "OK" to "ALERT":
-
-gwynedd:/u/lnz# cat /proc/rd/status
-ALERT
-
-and /proc/rd/c0/current_status has been updated:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Dead, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Critical, 5498880 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Critical, 3305472 blocks, Write Thru
- No Rebuild or Consistency Check in Progress
-
-Since there are no standby drives configured, the system can continue to access
-the logical drives in a performance degraded mode until the failed drive is
-replaced and a rebuild operation completed to restore the redundancy of the
-logical drives. Once Physical Drive 1:1 is replaced with a properly
-functioning drive, or if the physical drive was killed without having failed
-(e.g., due to electrical problems on the SCSI bus), the user can instruct the
-controller to initiate a rebuild operation onto the newly replaced drive:
-
-gwynedd:/u/lnz# echo "rebuild 1:1" > /proc/rd/c0/user_command
-gwynedd:/u/lnz# cat /proc/rd/c0/user_command
-Rebuild of Physical Drive 1:1 Initiated
-
-The echo command instructs the controller to initiate an asynchronous rebuild
-operation onto Physical Drive 1:1, and the status message that results from the
-operation is then available for reading from /proc/rd/c0/user_command, as well
-as being logged to the console by the driver.
-
-Within 10 seconds of this command the driver logs the initiation of the
-asynchronous rebuild operation:
-
-DAC960#0: Rebuild of Physical Drive 1:1 Initiated
-DAC960#0: Physical Drive 1:1 Error Log: Sense Key = 6, ASC = 29, ASCQ = 01
-DAC960#0: Physical Drive 1:1 is now WRITE-ONLY
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 1% completed
-
-and /proc/rd/c0/current_status is updated:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Write-Only, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Critical, 5498880 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Critical, 3305472 blocks, Write Thru
- Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 6% completed
-
-As the rebuild progresses, the current status in /proc/rd/c0/current_status is
-updated every 10 seconds:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Write-Only, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Critical, 5498880 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Critical, 3305472 blocks, Write Thru
- Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 15% completed
-
-and every minute a progress message is logged to the console by the driver:
-
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 32% completed
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 63% completed
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 94% completed
-DAC960#0: Rebuild in Progress: Logical Drive 1 (/dev/rd/c0d1) 94% completed
-
-Finally, the rebuild completes successfully. The driver logs the status of the
-logical and physical drives and the rebuild completion:
-
-DAC960#0: Rebuild Completed Successfully
-DAC960#0: Physical Drive 1:1 is now ONLINE
-DAC960#0: Logical Drive 0 (/dev/rd/c0d0) is now ONLINE
-DAC960#0: Logical Drive 1 (/dev/rd/c0d1) is now ONLINE
-
-/proc/rd/c0/current_status is updated:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 5498880 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Online, 3305472 blocks, Write Thru
- Rebuild Completed Successfully
-
-and /proc/rd/status indicates that everything is healthy once again:
-
-gwynedd:/u/lnz# cat /proc/rd/status
-OK
-
-
- EXAMPLE II - DRIVE FAILURE WITH A STANDBY DRIVE
-
-The following annotated logs demonstrate the controller configuration and and
-online status monitoring capabilities of the Linux DAC960 Driver. The test
-configuration comprises 6 1GB Quantum Atlas I disk drives on two channels of a
-DAC960PJ controller. The physical drives are configured into a single drive
-group with a standby drive, and the drive group has been configured into two
-logical drives, one RAID-5 and one RAID-6. Note that these logs are from an
-earlier version of the driver and the messages have changed somewhat with newer
-releases, but the functionality remains similar. First, here is the current
-status of the RAID configuration:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
-***** DAC960 RAID Driver Version 2.0.0 of 23 March 1999 *****
-Copyright 1998-1999 by Leonard N. Zubkoff <lnz@dandelion.com>
-Configuring Mylex DAC960PJ PCI RAID Controller
- Firmware Version: 4.06-0-08, Channels: 3, Memory Size: 8MB
- PCI Bus: 0, Device: 19, Function: 1, I/O Address: Unassigned
- PCI Address: 0xFD4FC000 mapped at 0x8807000, IRQ Channel: 9
- Controller Queue Depth: 128, Maximum Blocks per Command: 128
- Driver Queue Depth: 127, Maximum Scatter/Gather Segments: 33
- Stripe Size: 64KB, Segment Size: 8KB, BIOS Geometry: 255/63
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Online, 2201600 blocks
- 1:3 - Disk: Standby, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 4399104 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Online, 2754560 blocks, Write Thru
- No Rebuild or Consistency Check in Progress
-
-gwynedd:/u/lnz# cat /proc/rd/status
-OK
-
-The above messages indicate that everything is healthy, and /proc/rd/status
-returns "OK" indicating that there are no problems with any DAC960 controller
-in the system. For demonstration purposes, while I/O is active Physical Drive
-1:2 is now disconnected, simulating a drive failure. The failure is noted by
-the driver within 10 seconds of the controller's having detected it, and the
-driver logs the following console status messages:
-
-DAC960#0: Physical Drive 1:1 Error Log: Sense Key = 6, ASC = 29, ASCQ = 02
-DAC960#0: Physical Drive 1:3 Error Log: Sense Key = 6, ASC = 29, ASCQ = 02
-DAC960#0: Physical Drive 1:2 killed because of timeout on SCSI command
-DAC960#0: Physical Drive 1:2 is now DEAD
-DAC960#0: Physical Drive 1:2 killed because it was removed
-DAC960#0: Logical Drive 0 (/dev/rd/c0d0) is now CRITICAL
-DAC960#0: Logical Drive 1 (/dev/rd/c0d1) is now CRITICAL
-
-Since a standby drive is configured, the controller automatically begins
-rebuilding onto the standby drive:
-
-DAC960#0: Physical Drive 1:3 is now WRITE-ONLY
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 4% completed
-
-Concurrently with the above, the driver status available from /proc/rd also
-reflects the drive failure and automatic rebuild. The status message in
-/proc/rd/status has changed from "OK" to "ALERT":
-
-gwynedd:/u/lnz# cat /proc/rd/status
-ALERT
-
-and /proc/rd/c0/current_status has been updated:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Dead, 2201600 blocks
- 1:3 - Disk: Write-Only, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Critical, 4399104 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Critical, 2754560 blocks, Write Thru
- Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 4% completed
-
-As the rebuild progresses, the current status in /proc/rd/c0/current_status is
-updated every 10 seconds:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Dead, 2201600 blocks
- 1:3 - Disk: Write-Only, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Critical, 4399104 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Critical, 2754560 blocks, Write Thru
- Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 40% completed
-
-and every minute a progress message is logged on the console by the driver:
-
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 40% completed
-DAC960#0: Rebuild in Progress: Logical Drive 0 (/dev/rd/c0d0) 76% completed
-DAC960#0: Rebuild in Progress: Logical Drive 1 (/dev/rd/c0d1) 66% completed
-DAC960#0: Rebuild in Progress: Logical Drive 1 (/dev/rd/c0d1) 84% completed
-
-Finally, the rebuild completes successfully. The driver logs the status of the
-logical and physical drives and the rebuild completion:
-
-DAC960#0: Rebuild Completed Successfully
-DAC960#0: Physical Drive 1:3 is now ONLINE
-DAC960#0: Logical Drive 0 (/dev/rd/c0d0) is now ONLINE
-DAC960#0: Logical Drive 1 (/dev/rd/c0d1) is now ONLINE
-
-/proc/rd/c0/current_status is updated:
-
-***** DAC960 RAID Driver Version 2.0.0 of 23 March 1999 *****
-Copyright 1998-1999 by Leonard N. Zubkoff <lnz@dandelion.com>
-Configuring Mylex DAC960PJ PCI RAID Controller
- Firmware Version: 4.06-0-08, Channels: 3, Memory Size: 8MB
- PCI Bus: 0, Device: 19, Function: 1, I/O Address: Unassigned
- PCI Address: 0xFD4FC000 mapped at 0x8807000, IRQ Channel: 9
- Controller Queue Depth: 128, Maximum Blocks per Command: 128
- Driver Queue Depth: 127, Maximum Scatter/Gather Segments: 33
- Stripe Size: 64KB, Segment Size: 8KB, BIOS Geometry: 255/63
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Dead, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 4399104 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Online, 2754560 blocks, Write Thru
- Rebuild Completed Successfully
-
-and /proc/rd/status indicates that everything is healthy once again:
-
-gwynedd:/u/lnz# cat /proc/rd/status
-OK
-
-Note that the absence of a viable standby drive does not create an "ALERT"
-status. Once dead Physical Drive 1:2 has been replaced, the controller must be
-told that this has occurred and that the newly replaced drive should become the
-new standby drive:
-
-gwynedd:/u/lnz# echo "make-standby 1:2" > /proc/rd/c0/user_command
-gwynedd:/u/lnz# cat /proc/rd/c0/user_command
-Make Standby of Physical Drive 1:2 Succeeded
-
-The echo command instructs the controller to make Physical Drive 1:2 into a
-standby drive, and the status message that results from the operation is then
-available for reading from /proc/rd/c0/user_command, as well as being logged to
-the console by the driver. Within 60 seconds of this command the driver logs:
-
-DAC960#0: Physical Drive 1:2 Error Log: Sense Key = 6, ASC = 29, ASCQ = 01
-DAC960#0: Physical Drive 1:2 is now STANDBY
-DAC960#0: Make Standby of Physical Drive 1:2 Succeeded
-
-and /proc/rd/c0/current_status is updated:
-
-gwynedd:/u/lnz# cat /proc/rd/c0/current_status
- ...
- Physical Devices:
- 0:1 - Disk: Online, 2201600 blocks
- 0:2 - Disk: Online, 2201600 blocks
- 0:3 - Disk: Online, 2201600 blocks
- 1:1 - Disk: Online, 2201600 blocks
- 1:2 - Disk: Standby, 2201600 blocks
- 1:3 - Disk: Online, 2201600 blocks
- Logical Drives:
- /dev/rd/c0d0: RAID-5, Online, 4399104 blocks, Write Thru
- /dev/rd/c0d1: RAID-6, Online, 2754560 blocks, Write Thru
- Rebuild Completed Successfully
diff --git a/Documentation/blockdev/zram.txt b/Documentation/blockdev/zram.txt
index 875b2b56b87f..3c1b5ab54bc0 100644
--- a/Documentation/blockdev/zram.txt
+++ b/Documentation/blockdev/zram.txt
@@ -190,7 +190,7 @@ whitespace:
notify_free Depending on device usage scenario it may account
a) the number of pages freed because of swap slot free
notifications or b) the number of pages freed because of
- REQ_DISCARD requests sent by bio. The former ones are
+ REQ_OP_DISCARD requests sent by bio. The former ones are
sent to a swap block device when a swap slot is freed,
which implies that this disk is being used as a swap disk.
The latter ones are sent by filesystem mounted with
diff --git a/Documentation/core-api/idr.rst b/Documentation/core-api/idr.rst
index d351e880a2f6..a2738050c4f0 100644
--- a/Documentation/core-api/idr.rst
+++ b/Documentation/core-api/idr.rst
@@ -1,4 +1,4 @@
-.. SPDX-License-Identifier: CC-BY-SA-4.0
+.. SPDX-License-Identifier: GPL-2.0+
=============
ID Allocation
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt
index 390c145f01d7..52a719b49afd 100644
--- a/Documentation/device-mapper/dm-raid.txt
+++ b/Documentation/device-mapper/dm-raid.txt
@@ -348,3 +348,7 @@ Version History
1.13.1 Fix deadlock caused by early md_stop_writes(). Also fix size an
state races.
1.13.2 Fix raid redundancy validation and avoid keeping raid set frozen
+1.14.0 Fix reshape race on small devices. Fix stripe adding reshape
+ deadlock/potential data corruption. Update superblock when
+ specific devices are requested via rebuild. Fix RAID leg
+ rebuild errors.
diff --git a/Documentation/device-mapper/log-writes.txt b/Documentation/device-mapper/log-writes.txt
index f4ebcbaf50f3..b638d124be6a 100644
--- a/Documentation/device-mapper/log-writes.txt
+++ b/Documentation/device-mapper/log-writes.txt
@@ -38,7 +38,7 @@ inconsistent file system.
Any REQ_FUA requests bypass this flushing mechanism and are logged as soon as
they complete as those requests will obviously bypass the device cache.
-Any REQ_DISCARD requests are treated like WRITE requests. Otherwise we would
+Any REQ_OP_DISCARD requests are treated like WRITE requests. Otherwise we would
have all the DISCARD requests, and then the WRITE requests and then the FLUSH
request. Consider the following example:
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index 5d5bd456d9d9..e30fd106df4f 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -10,6 +10,7 @@ PHYs.
Required properties:
- compatible : compatible string, one of:
- "allwinner,sun4i-a10-ahci"
+ - "allwinner,sun8i-r40-ahci"
- "brcm,iproc-ahci"
- "hisilicon,hisi-ahci"
- "cavium,octeon-7130-ahci"
@@ -31,8 +32,10 @@ Optional properties:
- clocks : a list of phandle + clock specifier pairs
- resets : a list of phandle + reset specifier pairs
- target-supply : regulator for SATA target power
+- phy-supply : regulator for PHY power
- phys : reference to the SATA PHY node
- phy-names : must be "sata-phy"
+- ahci-supply : regulator for AHCI controller
- ports-implemented : Mask that indicates which ports that the HBA supports
are available for software to use. Useful if PORTS_IMPL
is not programmed by the BIOS, which is true with
@@ -42,12 +45,13 @@ Required properties when using sub-nodes:
- #address-cells : number of cells to encode an address
- #size-cells : number of cells representing the size of an address
+For allwinner,sun8i-r40-ahci, the reset propertie must be present.
Sub-nodes required properties:
- reg : the port number
And at least one of the following properties:
- phys : reference to the SATA PHY node
-- target-supply : regulator for SATA target power
+- target-supply : regulator for SATA target power
Examples:
sata@ffe08000 {
diff --git a/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt b/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt
index 0a5b3b47f217..7713a413c6a7 100644
--- a/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt
+++ b/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt
@@ -9,6 +9,7 @@ Required properties:
"brcm,bcm7445-ahci"
"brcm,bcm-nsp-ahci"
"brcm,sata3-ahci"
+ "brcm,bcm63138-ahci"
- reg : register mappings for AHCI and SATA_TOP_CTRL
- reg-names : "ahci" and "top-ctrl"
- interrupts : interrupt mapping for SATA IRQ
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index 996ce84352cb..7cccc49b6bea 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -1,4 +1,4 @@
-Device-Tree bindings for input/gpio_keys.c keyboard driver
+Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
Required properties:
- compatible = "gpio-keys";
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 457d5ae16f23..3e17ac1d5d58 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -10,6 +10,7 @@ Required properties:
Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
the Cadence GEM, or the generic form: "cdns,gem".
Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
+ Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
index 4b3825da48d9..82b6dbbd31cd 100644
--- a/Documentation/driver-api/fpga/fpga-mgr.rst
+++ b/Documentation/driver-api/fpga/fpga-mgr.rst
@@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver
API for programming an FPGA
---------------------------
+FPGA Manager flags
+
+.. kernel-doc:: include/linux/fpga/fpga-mgr.h
+ :doc: FPGA Manager flags
+
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_image_info
diff --git a/Documentation/fb/uvesafb.txt b/Documentation/fb/uvesafb.txt
index f6362d88763b..aa924196c366 100644
--- a/Documentation/fb/uvesafb.txt
+++ b/Documentation/fb/uvesafb.txt
@@ -15,7 +15,8 @@ than x86. Check the v86d documentation for a list of currently supported
arches.
v86d source code can be downloaded from the following website:
- http://dev.gentoo.org/~spock/projects/uvesafb
+
+ https://github.com/mjanusz/v86d
Please refer to the v86d documentation for detailed configuration and
installation instructions.
@@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
--
Michal Januszewski <spock@gentoo.org>
- Last updated: 2009-03-30
+ Last updated: 2017-10-10
Documentation of the uvesafb options is loosely based on vesafb.txt.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 4b2084d0f1fb..a6c6a8af48a2 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -848,7 +848,7 @@ struct file_operations
----------------------
This describes how the VFS can manipulate an open file. As of kernel
-4.1, the following members are defined:
+4.18, the following members are defined:
struct file_operations {
struct module *owner;
@@ -858,11 +858,11 @@ struct file_operations {
ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
+ int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
- int (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *);
@@ -882,6 +882,10 @@ struct file_operations {
#ifndef CONFIG_MMU
unsigned (*mmap_capabilities)(struct file *);
#endif
+ ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
+ int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+ int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+ int (*fadvise)(struct file *, loff_t, loff_t, int);
};
Again, all methods are called without any locks being held, unless
@@ -899,6 +903,9 @@ otherwise noted.
iterate: called when the VFS needs to read the directory contents
+ iterate_shared: called when the VFS needs to read the directory contents
+ when filesystem supports concurrent dir iterators
+
poll: called by the VFS when a process wants to check if there is
activity on this file and (optionally) go to sleep until there
is activity. Called by the select(2) and poll(2) system calls
@@ -951,6 +958,16 @@ otherwise noted.
fallocate: called by the VFS to preallocate blocks or punch a hole.
+ copy_file_range: called by the copy_file_range(2) system call.
+
+ clone_file_range: called by the ioctl(2) system call for FICLONERANGE and
+ FICLONE commands.
+
+ dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE
+ command.
+
+ fadvise: possibly called by the fadvise64() system call.
+
Note that the file operations are implemented by the specific
filesystem in which the inode resides. When opening a device node
(character or block special) most filesystems will call special
diff --git a/Documentation/media/uapi/dvb/video_function_calls.rst b/Documentation/media/uapi/dvb/video_function_calls.rst
index 3f4f6c9ffad7..a4222b6cd2d3 100644
--- a/Documentation/media/uapi/dvb/video_function_calls.rst
+++ b/Documentation/media/uapi/dvb/video_function_calls.rst
@@ -33,4 +33,3 @@ Video Function Calls
video-clear-buffer
video-set-streamtype
video-set-format
- video-set-attributes
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 8313a636dd53..960de8fe3f40 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
1 - Disabled by default, enabled when an ICMP black hole detected
2 - Always enabled, use initial MSS of tcp_base_mss.
-tcp_probe_interval - INTEGER
+tcp_probe_interval - UNSIGNED INTEGER
Controls how often to start TCP Packetization-Layer Path MTU
Discovery reprobe. The default is reprobing every 10 minutes as
per RFC4821.
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 61f918b10a0c..d1bf143b446f 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -86,7 +86,7 @@ pkg-config
The build system, as of 4.18, requires pkg-config to check for installed
kconfig tools and to determine flags settings for use in
-'make {menu,n,g,x}config'. Previously pkg-config was being used but not
+'make {g,x}config'. Previously pkg-config was being used but not
verified or documented.
Flex
diff --git a/Documentation/process/code-of-conduct-interpretation.rst b/Documentation/process/code-of-conduct-interpretation.rst
new file mode 100644
index 000000000000..e899f14a4ba2
--- /dev/null
+++ b/Documentation/process/code-of-conduct-interpretation.rst
@@ -0,0 +1,156 @@
+.. _code_of_conduct_interpretation:
+
+Linux Kernel Contributor Covenant Code of Conduct Interpretation
+================================================================
+
+The :ref:`code_of_conduct` is a general document meant to
+provide a set of rules for almost any open source community. Every
+open-source community is unique and the Linux kernel is no exception.
+Because of this, this document describes how we in the Linux kernel
+community will interpret it. We also do not expect this interpretation
+to be static over time, and will adjust it as needed.
+
+The Linux kernel development effort is a very personal process compared
+to "traditional" ways of developing software. Your contributions and
+ideas behind them will be carefully reviewed, often resulting in
+critique and criticism. The review will almost always require
+improvements before the material can be included in the
+kernel. Know that this happens because everyone involved wants to see
+the best possible solution for the overall success of Linux. This
+development process has been proven to create the most robust operating
+system kernel ever, and we do not want to do anything to cause the
+quality of submission and eventual result to ever decrease.
+
+Maintainers
+-----------
+
+The Code of Conduct uses the term "maintainers" numerous times. In the
+kernel community, a "maintainer" is anyone who is responsible for a
+subsystem, driver, or file, and is listed in the MAINTAINERS file in the
+kernel source tree.
+
+Responsibilities
+----------------
+
+The Code of Conduct mentions rights and responsibilities for
+maintainers, and this needs some further clarifications.
+
+First and foremost, it is a reasonable expectation to have maintainers
+lead by example.
+
+That being said, our community is vast and broad, and there is no new
+requirement for maintainers to unilaterally handle how other people
+behave in the parts of the community where they are active. That
+responsibility is upon all of us, and ultimately the Code of Conduct
+documents final escalation paths in case of unresolved concerns
+regarding conduct issues.
+
+Maintainers should be willing to help when problems occur, and work with
+others in the community when needed. Do not be afraid to reach out to
+the Technical Advisory Board (TAB) or other maintainers if you're
+uncertain how to handle situations that come up. It will not be
+considered a violation report unless you want it to be. If you are
+uncertain about approaching the TAB or any other maintainers, please
+reach out to our conflict mediator, Mishi Choudhary <mishi@linux.com>.
+
+In the end, "be kind to each other" is really what the end goal is for
+everybody. We know everyone is human and we all fail at times, but the
+primary goal for all of us should be to work toward amicable resolutions
+of problems. Enforcement of the code of conduct will only be a last
+resort option.
+
+Our goal of creating a robust and technically advanced operating system
+and the technical complexity involved naturally require expertise and
+decision-making.
+
+The required expertise varies depending on the area of contribution. It
+is determined mainly by context and technical complexity and only
+secondary by the expectations of contributors and maintainers.
+
+Both the expertise expectations and decision-making are subject to
+discussion, but at the very end there is a basic necessity to be able to
+make decisions in order to make progress. This prerogative is in the
+hands of maintainers and project's leadership and is expected to be used
+in good faith.
+
+As a consequence, setting expertise expectations, making decisions and
+rejecting unsuitable contributions are not viewed as a violation of the
+Code of Conduct.
+
+While maintainers are in general welcoming to newcomers, their capacity
+of helping contributors overcome the entry hurdles is limited, so they
+have to set priorities. This, also, is not to be seen as a violation of
+the Code of Conduct. The kernel community is aware of that and provides
+entry level programs in various forms like kernelnewbies.org.
+
+Scope
+-----
+
+The Linux kernel community primarily interacts on a set of public email
+lists distributed around a number of different servers controlled by a
+number of different companies or individuals. All of these lists are
+defined in the MAINTAINERS file in the kernel source tree. Any emails
+sent to those mailing lists are considered covered by the Code of
+Conduct.
+
+Developers who use the kernel.org bugzilla, and other subsystem bugzilla
+or bug tracking tools should follow the guidelines of the Code of
+Conduct. The Linux kernel community does not have an "official" project
+email address, or "official" social media address. Any activity
+performed using a kernel.org email account must follow the Code of
+Conduct as published for kernel.org, just as any individual using a
+corporate email account must follow the specific rules of that
+corporation.
+
+The Code of Conduct does not prohibit continuing to include names, email
+addresses, and associated comments in mailing list messages, kernel
+change log messages, or code comments.
+
+Interaction in other forums is covered by whatever rules apply to said
+forums and is in general not covered by the Code of Conduct. Exceptions
+may be considered for extreme circumstances.
+
+Contributions submitted for the kernel should use appropriate language.
+Content that already exists predating the Code of Conduct will not be
+addressed now as a violation. Inappropriate language can be seen as a
+bug, though; such bugs will be fixed more quickly if any interested
+parties submit patches to that effect. Expressions that are currently
+part of the user/kernel API, or reflect terminology used in published
+standards or specifications, are not considered bugs.
+
+Enforcement
+-----------
+
+The address listed in the Code of Conduct goes to the Code of Conduct
+Committee. The exact members receiving these emails at any given time
+are listed at https://kernel.org/code-of-conduct.html. Members can not
+access reports made before they joined or after they have left the
+committee.
+
+The initial Code of Conduct Committee consists of volunteer members of
+the TAB, as well as a professional mediator acting as a neutral third
+party. The first task of the committee is to establish documented
+processes, which will be made public.
+
+Any member of the committee, including the mediator, can be contacted
+directly if a reporter does not wish to include the full committee in a
+complaint or concern.
+
+The Code of Conduct Committee reviews the cases according to the
+processes (see above) and consults with the TAB as needed and
+appropriate, for instance to request and receive information about the
+kernel community.
+
+Any decisions by the committee will be brought to the TAB, for
+implementation of enforcement with the relevant maintainers if needed.
+A decision by the Code of Conduct Committee can be overturned by the TAB
+by a two-thirds vote.
+
+At quarterly intervals, the Code of Conduct Committee and TAB will
+provide a report summarizing the anonymised reports that the Code of
+Conduct committee has received and their status, as well details of any
+overridden decisions including complete and identifiable voting details.
+
+We expect to establish a different process for Code of Conduct Committee
+staffing beyond the bootstrap period. This document will be updated
+with that information when this occurs.
diff --git a/Documentation/process/code-of-conduct.rst b/Documentation/process/code-of-conduct.rst
new file mode 100644
index 000000000000..be50294aebd5
--- /dev/null
+++ b/Documentation/process/code-of-conduct.rst
@@ -0,0 +1,86 @@
+.. _code_of_conduct:
+
+Contributor Covenant Code of Conduct
+++++++++++++++++++++++++++++++++++++
+
+Our Pledge
+==========
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, sex characteristics, gender identity and
+expression, level of experience, education, socio-economic status, nationality,
+personal appearance, race, religion, or sexual identity and orientation.
+
+Our Standards
+=============
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others’ private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+
+Our Responsibilities
+====================
+
+Maintainers are responsible for clarifying the standards of acceptable behavior
+and are expected to take appropriate and fair corrective action in response to
+any instances of unacceptable behavior.
+
+Maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+Scope
+=====
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+Enforcement
+===========
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the Code of Conduct Committee at
+<conduct@kernel.org>. All complaints will be reviewed and investigated
+and will result in a response that is deemed necessary and appropriate
+to the circumstances. The Code of Conduct Committee is obligated to
+maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted
+separately.
+
+Attribution
+===========
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
+
+Interpretation
+==============
+
+See the :ref:`code_of_conduct_interpretation` document for how the Linux
+kernel community will be interpreting this document.
diff --git a/Documentation/process/code-of-conflict.rst b/Documentation/process/code-of-conflict.rst
deleted file mode 100644
index 47b6de763203..000000000000
--- a/Documentation/process/code-of-conflict.rst
+++ /dev/null
@@ -1,28 +0,0 @@
-Code of Conflict
-----------------
-
-The Linux kernel development effort is a very personal process compared
-to "traditional" ways of developing software. Your code and ideas
-behind it will be carefully reviewed, often resulting in critique and
-criticism. The review will almost always require improvements to the
-code before it can be included in the kernel. Know that this happens
-because everyone involved wants to see the best possible solution for
-the overall success of Linux. This development process has been proven
-to create the most robust operating system kernel ever, and we do not
-want to do anything to cause the quality of submission and eventual
-result to ever decrease.
-
-If however, anyone feels personally abused, threatened, or otherwise
-uncomfortable due to this process, that is not acceptable. If so,
-please contact the Linux Foundation's Technical Advisory Board at
-<tab@lists.linux-foundation.org>, or the individual members, and they
-will work to resolve the issue to the best of their ability. For more
-information on who is on the Technical Advisory Board and what their
-role is, please see:
-
- - http://www.linuxfoundation.org/projects/linux/tab
-
-As a reviewer of code, please strive to keep things civil and focused on
-the technical issues involved. We are all humans, and frustrations can
-be high on both sides of the process. Try to keep in mind the immortal
-words of Bill and Ted, "Be excellent to each other."
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst
index 37bd0628b6ee..42691e2880eb 100644
--- a/Documentation/process/index.rst
+++ b/Documentation/process/index.rst
@@ -20,7 +20,8 @@ Below are the essential guides that every developer should read.
:maxdepth: 1
howto
- code-of-conflict
+ code-of-conduct
+ code-of-conduct-interpretation
development-process
submitting-patches
coding-style
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index c664064f76fb..647f94128a85 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -4510,7 +4510,8 @@ Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
Architectures: s390
Parameters: none
Returns: 0 on success, -EINVAL if hpage module parameter was not set
- or cmma is enabled
+ or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL
+ flag set
With this capability the KVM support for memory backing with 1m pages
through hugetlbfs can be enabled for a VM. After the capability is
@@ -4521,6 +4522,15 @@ hpage module parameter is not set to 1, -EINVAL is returned.
While it is generally possible to create a huge page backed VM without
this capability, the VM will not be able to run.
+7.14 KVM_CAP_MSR_PLATFORM_INFO
+
+Architectures: x86
+Parameters: args[0] whether feature should be enabled or not
+
+With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise,
+a #GP would be raised when the guest tries to access. Currently, this
+capability does not enable write permissions of this MSR for the guest.
+
8. Other capabilities.
----------------------
diff --git a/Documentation/x86/earlyprintk.txt b/Documentation/x86/earlyprintk.txt
index 688e3eeed21d..46933e06c972 100644
--- a/Documentation/x86/earlyprintk.txt
+++ b/Documentation/x86/earlyprintk.txt
@@ -35,25 +35,25 @@ and two USB cables, connected like this:
( If your system does not list a debug port capability then you probably
won't be able to use the USB debug key. )
- b.) You also need a Netchip USB debug cable/key:
+ b.) You also need a NetChip USB debug cable/key:
http://www.plxtech.com/products/NET2000/NET20DC/default.asp
- This is a small blue plastic connector with two USB connections,
+ This is a small blue plastic connector with two USB connections;
it draws power from its USB connections.
c.) You need a second client/console system with a high speed USB 2.0
port.
- d.) The Netchip device must be plugged directly into the physical
+ d.) The NetChip device must be plugged directly into the physical
debug port on the "host/target" system. You cannot use a USB hub in
between the physical debug port and the "host/target" system.
The EHCI debug controller is bound to a specific physical USB
- port and the Netchip device will only work as an early printk
+ port and the NetChip device will only work as an early printk
device in this port. The EHCI host controllers are electrically
wired such that the EHCI debug controller is hooked up to the
- first physical and there is no way to change this via software.
+ first physical port and there is no way to change this via software.
You can find the physical port through experimentation by trying
each physical port on the system and rebooting. Or you can try
and use lsusb or look at the kernel info messages emitted by the
@@ -65,9 +65,9 @@ and two USB cables, connected like this:
to the hardware vendor, because there is no reason not to wire
this port into one of the physically accessible ports.
- e.) It is also important to note, that many versions of the Netchip
+ e.) It is also important to note, that many versions of the NetChip
device require the "client/console" system to be plugged into the
- right and side of the device (with the product logo facing up and
+ right hand side of the device (with the product logo facing up and
readable left to right). The reason being is that the 5 volt
power supply is taken from only one side of the device and it
must be the side that does not get rebooted.
@@ -81,13 +81,18 @@ and two USB cables, connected like this:
CONFIG_EARLY_PRINTK_DBGP=y
And you need to add the boot command line: "earlyprintk=dbgp".
+
(If you are using Grub, append it to the 'kernel' line in
- /etc/grub.conf)
+ /etc/grub.conf. If you are using Grub2 on a BIOS firmware system,
+ append it to the 'linux' line in /boot/grub2/grub.cfg. If you are
+ using Grub2 on an EFI firmware system, append it to the 'linux'
+ or 'linuxefi' line in /boot/grub2/grub.cfg or
+ /boot/efi/EFI/<distro>/grub.cfg.)
On systems with more than one EHCI debug controller you must
specify the correct EHCI debug controller number. The ordering
comes from the PCI bus enumeration of the EHCI controllers. The
- default with no number argument is "0" the first EHCI debug
+ default with no number argument is "0" or the first EHCI debug
controller. To use the second EHCI debug controller, you would
use the command line: "earlyprintk=dbgp1"
@@ -111,7 +116,7 @@ and two USB cables, connected like this:
see the raw output.
c.) On Nvidia Southbridge based systems: the kernel will try to probe
- and find out which port has debug device connected.
+ and find out which port has a debug device connected.
3. Testing that it works fine:
diff --git a/LICENSES/other/CC-BY-SA-4.0 b/LICENSES/other/CC-BY-SA-4.0
deleted file mode 100644
index f9158e831e79..000000000000
--- a/LICENSES/other/CC-BY-SA-4.0
+++ /dev/null
@@ -1,397 +0,0 @@
-Valid-License-Identifier: CC-BY-SA-4.0
-SPDX-URL: https://spdx.org/licenses/CC-BY-SA-4.0
-Usage-Guide:
- To use the Creative Commons Attribution Share Alike 4.0 International
- license put the following SPDX tag/value pair into a comment according to
- the placement guidelines in the licensing rules documentation:
- SPDX-License-Identifier: CC-BY-SA-4.0
-License-Text:
-
-Creative Commons Attribution-ShareAlike 4.0 International
-
-Creative Commons Corporation ("Creative Commons") is not a law firm and
-does not provide legal services or legal advice. Distribution of Creative
-Commons public licenses does not create a lawyer-client or other
-relationship. Creative Commons makes its licenses and related information
-available on an "as-is" basis. Creative Commons gives no warranties
-regarding its licenses, any material licensed under their terms and
-conditions, or any related information. Creative Commons disclaims all
-liability for damages resulting from their use to the fullest extent
-possible.
-
-Using Creative Commons Public Licenses
-
-Creative Commons public licenses provide a standard set of terms and
-conditions that creators and other rights holders may use to share original
-works of authorship and other material subject to copyright and certain
-other rights specified in the public license below. The following
-considerations are for informational purposes only, are not exhaustive, and
-do not form part of our licenses.
-
-Considerations for licensors: Our public licenses are intended for use by
-those authorized to give the public permission to use material in ways
-otherwise restricted by copyright and certain other rights. Our licenses
-are irrevocable. Licensors should read and understand the terms and
-conditions of the license they choose before applying it. Licensors should
-also secure all rights necessary before applying our licenses so that the
-public can reuse the material as expected. Licensors should clearly mark
-any material not subject to the license. This includes other CC-licensed
-material, or material used under an exception or limitation to
-copyright. More considerations for licensors :
-wiki.creativecommons.org/Considerations_for_licensors
-
-Considerations for the public: By using one of our public licenses, a
-licensor grants the public permission to use the licensed material under
-specified terms and conditions. If the licensor's permission is not
-necessary for any reason - for example, because of any applicable exception
-or limitation to copyright - then that use is not regulated by the
-license. Our licenses grant only permissions under copyright and certain
-other rights that a licensor has authority to grant. Use of the licensed
-material may still be restricted for other reasons, including because
-others have copyright or other rights in the material. A licensor may make
-special requests, such as asking that all changes be marked or described.
-
-Although not required by our licenses, you are encouraged to respect those
-requests where reasonable. More considerations for the public :
-wiki.creativecommons.org/Considerations_for_licensees
-
-Creative Commons Attribution-ShareAlike 4.0 International Public License
-
-By exercising the Licensed Rights (defined below), You accept and agree to
-be bound by the terms and conditions of this Creative Commons
-Attribution-ShareAlike 4.0 International Public License ("Public
-License"). To the extent this Public License may be interpreted as a
-contract, You are granted the Licensed Rights in consideration of Your
-acceptance of these terms and conditions, and the Licensor grants You such
-rights in consideration of benefits the Licensor receives from making the
-Licensed Material available under these terms and conditions.
-
-Section 1 - Definitions.
-
- a. Adapted Material means material subject to Copyright and Similar
- Rights that is derived from or based upon the Licensed Material and
- in which the Licensed Material is translated, altered, arranged,
- transformed, or otherwise modified in a manner requiring permission
- under the Copyright and Similar Rights held by the Licensor. For
- purposes of this Public License, where the Licensed Material is a
- musical work, performance, or sound recording, Adapted Material is
- always produced where the Licensed Material is synched in timed
- relation with a moving image.
-
- b. Adapter's License means the license You apply to Your Copyright and
- Similar Rights in Your contributions to Adapted Material in
- accordance with the terms and conditions of this Public License.
-
- c. BY-SA Compatible License means a license listed at
- creativecommons.org/compatiblelicenses, approved by Creative Commons
- as essentially the equivalent of this Public License.
-
- d. Copyright and Similar Rights means copyright and/or similar rights
- closely related to copyright including, without limitation,
- performance, broadcast, sound recording, and Sui Generis Database
- Rights, without regard to how the rights are labeled or
- categorized. For purposes of this Public License, the rights
- specified in Section 2(b)(1)-(2) are not Copyright and Similar
- Rights.
-
- e. Effective Technological Measures means those measures that, in the
- absence of proper authority, may not be circumvented under laws
- fulfilling obligations under Article 11 of the WIPO Copyright Treaty
- adopted on December 20, 1996, and/or similar international
- agreements.
-
- f. Exceptions and Limitations means fair use, fair dealing, and/or any
- other exception or limitation to Copyright and Similar Rights that
- applies to Your use of the Licensed Material.
-
- g. License Elements means the license attributes listed in the name of
- a Creative Commons Public License. The License Elements of this
- Public License are Attribution and ShareAlike.
-
- h. Licensed Material means the artistic or literary work, database, or
- other material to which the Licensor applied this Public License.
-
- i. Licensed Rights means the rights granted to You subject to the terms
- and conditions of this Public License, which are limited to all
- Copyright and Similar Rights that apply to Your use of the Licensed
- Material and that the Licensor has authority to license.
-
- j. Licensor means the individual(s) or entity(ies) granting rights
- under this Public License.
-
- k. Share means to provide material to the public by any means or
- process that requires permission under the Licensed Rights, such as
- reproduction, public display, public performance, distribution,
- dissemination, communication, or importation, and to make material
- available to the public including in ways that members of the public
- may access the material from a place and at a time individually
- chosen by them.
-
- l. Sui Generis Database Rights means rights other than copyright
- resulting from Directive 96/9/EC of the European Parliament and of
- the Council of 11 March 1996 on the legal protection of databases,
- as amended and/or succeeded, as well as other essentially equivalent
- rights anywhere in the world. m. You means the individual or entity
- exercising the Licensed Rights under this Public License. Your has a
- corresponding meaning.
-
-Section 2 - Scope.
-
- a. License grant.
-
- 1. Subject to the terms and conditions of this Public License, the
- Licensor hereby grants You a worldwide, royalty-free,
- non-sublicensable, non-exclusive, irrevocable license to
- exercise the Licensed Rights in the Licensed Material to:
-
- A. reproduce and Share the Licensed Material, in whole or in part; and
-
- B. produce, reproduce, and Share Adapted Material.
-
- 2. Exceptions and Limitations. For the avoidance of doubt, where
- Exceptions and Limitations apply to Your use, this Public
- License does not apply, and You do not need to comply with its
- terms and conditions.
-
- 3. Term. The term of this Public License is specified in Section 6(a).
-
- 4. Media and formats; technical modifications allowed. The Licensor
- authorizes You to exercise the Licensed Rights in all media and
- formats whether now known or hereafter created, and to make
- technical modifications necessary to do so. The Licensor waives
- and/or agrees not to assert any right or authority to forbid You
- from making technical modifications necessary to exercise the
- Licensed Rights, including technical modifications necessary to
- circumvent Effective Technological Measures. For purposes of
- this Public License, simply making modifications authorized by
- this Section 2(a)(4) never produces Adapted Material.
-
- 5. Downstream recipients.
-
- A. Offer from the Licensor - Licensed Material. Every recipient
- of the Licensed Material automatically receives an offer
- from the Licensor to exercise the Licensed Rights under the
- terms and conditions of this Public License.
-
- B. Additional offer from the Licensor - Adapted Material. Every
- recipient of Adapted Material from You automatically
- receives an offer from the Licensor to exercise the Licensed
- Rights in the Adapted Material under the conditions of the
- Adapter's License You apply.
-
- C. No downstream restrictions. You may not offer or impose any
- additional or different terms or conditions on, or apply any
- Effective Technological Measures to, the Licensed Material
- if doing so restricts exercise of the Licensed Rights by any
- recipient of the Licensed Material.
-
- 6. No endorsement. Nothing in this Public License constitutes or
- may be construed as permission to assert or imply that You are,
- or that Your use of the Licensed Material is, connected with, or
- sponsored, endorsed, or granted official status by, the Licensor
- or others designated to receive attribution as provided in
- Section 3(a)(1)(A)(i).
-
- b. Other rights.
-
- 1. Moral rights, such as the right of integrity, are not licensed
- under this Public License, nor are publicity, privacy, and/or
- other similar personality rights; however, to the extent
- possible, the Licensor waives and/or agrees not to assert any
- such rights held by the Licensor to the limited extent necessary
- to allow You to exercise the Licensed Rights, but not otherwise.
-
- 2. Patent and trademark rights are not licensed under this Public
- License.
-
- 3. To the extent possible, the Licensor waives any right to collect
- royalties from You for the exercise of the Licensed Rights,
- whether directly or through a collecting society under any
- voluntary or waivable statutory or compulsory licensing
- scheme. In all other cases the Licensor expressly reserves any
- right to collect such royalties.
-
-Section 3 - License Conditions.
-
-Your exercise of the Licensed Rights is expressly made subject to the
-following conditions.
-
- a. Attribution.
-
- 1. If You Share the Licensed Material (including in modified form),
- You must:
-
- A. retain the following if it is supplied by the Licensor with
- the Licensed Material:
-
- i. identification of the creator(s) of the Licensed
- Material and any others designated to receive
- attribution, in any reasonable manner requested by the
- Licensor (including by pseudonym if designated);
-
- ii. a copyright notice;
-
- iii. a notice that refers to this Public License;
-
- iv. a notice that refers to the disclaimer of warranties;
-
- v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
-
- B. indicate if You modified the Licensed Material and retain an
- indication of any previous modifications; and
-
- C. indicate the Licensed Material is licensed under this Public
- License, and include the text of, or the URI or hyperlink to,
- this Public License.
-
- 2. You may satisfy the conditions in Section 3(a)(1) in any
- reasonable manner based on the medium, means, and context in
- which You Share the Licensed Material. For example, it may be
- reasonable to satisfy the conditions by providing a URI or
- hyperlink to a resource that includes the required information.
-
- 3. If requested by the Licensor, You must remove any of the
- information required by Section 3(a)(1)(A) to the extent
- reasonably practicable. b. ShareAlike.In addition to the
- conditions in Section 3(a), if You Share Adapted Material You
- produce, the following conditions also apply.
-
- 1. The Adapter's License You apply must be a Creative Commons
- license with the same License Elements, this version or
- later, or a BY-SA Compatible License.
-
- 2. You must include the text of, or the URI or hyperlink to, the
- Adapter's License You apply. You may satisfy this condition
- in any reasonable manner based on the medium, means, and
- context in which You Share Adapted Material.
-
- 3. You may not offer or impose any additional or different terms
- or conditions on, or apply any Effective Technological
- Measures to, Adapted Material that restrict exercise of the
- rights granted under the Adapter's License You apply.
-
-Section 4 - Sui Generis Database Rights.
-
-Where the Licensed Rights include Sui Generis Database Rights that apply to
-Your use of the Licensed Material:
-
- a. for the avoidance of doubt, Section 2(a)(1) grants You the right to
- extract, reuse, reproduce, and Share all or a substantial portion of
- the contents of the database;
-
- b. if You include all or a substantial portion of the database contents
- in a database in which You have Sui Generis Database Rights, then
- the database in which You have Sui Generis Database Rights (but not
- its individual contents) is Adapted Material, including for purposes
- of Section 3(b); and
-
- c. You must comply with the conditions in Section 3(a) if You Share all
- or a substantial portion of the contents of the database.
-
- For the avoidance of doubt, this Section 4 supplements and does not
- replace Your obligations under this Public License where the Licensed
- Rights include other Copyright and Similar Rights.
-
-Section 5 - Disclaimer of Warranties and Limitation of Liability.
-
- a. Unless otherwise separately undertaken by the Licensor, to the
- extent possible, the Licensor offers the Licensed Material as-is and
- as-available, and makes no representations or warranties of any kind
- concerning the Licensed Material, whether express, implied,
- statutory, or other. This includes, without limitation, warranties
- of title, merchantability, fitness for a particular purpose,
- non-infringement, absence of latent or other defects, accuracy, or
- the presence or absence of errors, whether or not known or
- discoverable. Where disclaimers of warranties are not allowed in
- full or in part, this disclaimer may not apply to You.
-
- b. To the extent possible, in no event will the Licensor be liable to
- You on any legal theory (including, without limitation, negligence)
- or otherwise for any direct, special, indirect, incidental,
- consequential, punitive, exemplary, or other losses, costs,
- expenses, or damages arising out of this Public License or use of
- the Licensed Material, even if the Licensor has been advised of the
- possibility of such losses, costs, expenses, or damages. Where a
- limitation of liability is not allowed in full or in part, this
- limitation may not apply to You.
-
- c. The disclaimer of warranties and limitation of liability provided
- above shall be interpreted in a manner that, to the extent possible,
- most closely approximates an absolute disclaimer and waiver of all
- liability.
-
-Section 6 - Term and Termination.
-
- a. This Public License applies for the term of the Copyright and
- Similar Rights licensed here. However, if You fail to comply with
- this Public License, then Your rights under this Public License
- terminate automatically.
-
- b. Where Your right to use the Licensed Material has terminated under
- Section 6(a), it reinstates:
-
- 1. automatically as of the date the violation is cured, provided it
- is cured within 30 days of Your discovery of the violation; or
-
- 2. upon express reinstatement by the Licensor.
-
- c. For the avoidance of doubt, this Section 6(b) does not affect any
- right the Licensor may have to seek remedies for Your violations of
- this Public License.
-
- d. For the avoidance of doubt, the Licensor may also offer the Licensed
- Material under separate terms or conditions or stop distributing the
- Licensed Material at any time; however, doing so will not terminate
- this Public License.
-
- e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
-
-Section 7 - Other Terms and Conditions.
-
- a. The Licensor shall not be bound by any additional or different terms
- or conditions communicated by You unless expressly agreed.
-
- b. Any arrangements, understandings, or agreements regarding the
- Licensed Material not stated herein are separate from and
- independent of the terms and conditions of this Public License.
-
-Section 8 - Interpretation.
-
- a. For the avoidance of doubt, this Public License does not, and shall
- not be interpreted to, reduce, limit, restrict, or impose conditions
- on any use of the Licensed Material that could lawfully be made
- without permission under this Public License.
-
- b. To the extent possible, if any provision of this Public License is
- deemed unenforceable, it shall be automatically reformed to the
- minimum extent necessary to make it enforceable. If the provision
- cannot be reformed, it shall be severed from this Public License
- without affecting the enforceability of the remaining terms and
- conditions.
-
- c. No term or condition of this Public License will be waived and no
- failure to comply consented to unless expressly agreed to by the
- Licensor.
-
- d. Nothing in this Public License constitutes or may be interpreted as
- a limitation upon, or waiver of, any privileges and immunities that
- apply to the Licensor or You, including from the legal processes of
- any jurisdiction or authority.
-
-Creative Commons is not a party to its public licenses. Notwithstanding,
-Creative Commons may elect to apply one of its public licenses to material
-it publishes and in those instances will be considered the "Licensor." The
-text of the Creative Commons public licenses is dedicated to the public
-domain under the CC0 Public Domain Dedication. Except for the limited
-purpose of indicating that material is shared under a Creative Commons
-public license or as otherwise permitted by the Creative Commons policies
-published at creativecommons.org/policies, Creative Commons does not
-authorize the use of the trademark "Creative Commons" or any other
-trademark or logo of Creative Commons without its prior written consent
-including, without limitation, in connection with any unauthorized
-modifications to any of its public licenses or any other arrangements,
-understandings, or agreements concerning use of licensed material. For the
-avoidance of doubt, this paragraph does not form part of the public
-licenses.
-
-Creative Commons may be contacted at creativecommons.org.
diff --git a/MAINTAINERS b/MAINTAINERS
index d870cb57c887..c393746b0209 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -324,7 +324,6 @@ F: Documentation/ABI/testing/sysfs-bus-acpi
F: Documentation/ABI/testing/configfs-acpi
F: drivers/pci/*acpi*
F: drivers/pci/*/*acpi*
-F: drivers/pci/*/*/*acpi*
F: tools/power/acpi/
ACPI APEI
@@ -1251,7 +1250,7 @@ N: meson
ARM/Annapurna Labs ALPINE ARCHITECTURE
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
-M: Antoine Tenart <antoine.tenart@free-electrons.com>
+M: Antoine Tenart <antoine.tenart@bootlin.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-alpine/
@@ -2956,7 +2955,6 @@ F: include/linux/bcm963xx_tag.h
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
M: Rasesh Mody <rasesh.mody@cavium.com>
-M: Harish Patil <harish.patil@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
S: Supported
@@ -2977,6 +2975,7 @@ F: drivers/scsi/bnx2i/
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
M: Ariel Elior <ariel.elior@cavium.com>
+M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
M: everest-linux-l2@cavium.com
L: netdev@vger.kernel.org
S: Supported
@@ -3007,6 +3006,14 @@ S: Supported
F: drivers/gpio/gpio-brcmstb.c
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+BROADCOM BRCMSTB I2C DRIVER
+M: Kamal Dasu <kdasu.kdev@gmail.com>
+L: linux-i2c@vger.kernel.org
+L: bcm-kernel-feedback-list@broadcom.com
+S: Supported
+F: drivers/i2c/busses/i2c-brcmstb.c
+F: Documentation/devicetree/bindings/i2c/i2c-brcmstb.txt
+
BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
M: Al Cooper <alcooperx@gmail.com>
L: linux-kernel@vger.kernel.org
@@ -3674,6 +3681,12 @@ S: Maintained
F: Documentation/devicetree/bindings/media/coda.txt
F: drivers/media/platform/coda/
+CODE OF CONDUCT
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+S: Supported
+F: Documentation/process/code-of-conduct.rst
+F: Documentation/process/code-of-conduct-interpretation.rst
+
COMMON CLK FRAMEWORK
M: Michael Turquette <mturquette@baylibre.com>
M: Stephen Boyd <sboyd@kernel.org>
@@ -5470,7 +5483,8 @@ S: Odd Fixes
F: drivers/net/ethernet/agere/
ETHERNET BRIDGE
-M: Stephen Hemminger <stephen@networkplumber.org>
+M: Roopa Prabhu <roopa@cumulusnetworks.com>
+M: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net:Bridge
@@ -5625,6 +5639,8 @@ F: lib/fault-inject.c
FBTFT Framebuffer drivers
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+L: dri-devel@lists.freedesktop.org
+L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/staging/fbtft/
@@ -6060,7 +6076,7 @@ F: Documentation/gcc-plugins.txt
GASKET DRIVER FRAMEWORK
M: Rob Springer <rspringer@google.com>
-M: John Joseph <jnjoseph@google.com>
+M: Todd Poynor <toddpoynor@google.com>
M: Ben Chan <benchan@chromium.org>
S: Maintained
F: drivers/staging/gasket/
@@ -7016,6 +7032,20 @@ F: drivers/crypto/vmx/aes*
F: drivers/crypto/vmx/ghash*
F: drivers/crypto/vmx/ppc-xlate.pl
+IBM Power PCI Hotplug Driver for RPA-compliant PPC64 platform
+M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L: linux-pci@vger.kernel.org
+L: linuxppc-dev@lists.ozlabs.org
+S: Supported
+F: drivers/pci/hotplug/rpaphp*
+
+IBM Power IO DLPAR Driver for RPA-compliant PPC64 platform
+M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
+L: linux-pci@vger.kernel.org
+L: linuxppc-dev@lists.ozlabs.org
+S: Supported
+F: drivers/pci/hotplug/rpadlpar*
+
IBM ServeRAID RAID DRIVER
S: Orphan
F: drivers/scsi/ips.*
@@ -8300,7 +8330,7 @@ F: include/linux/libata.h
F: Documentation/devicetree/bindings/ata/
LIBLOCKDEP
-M: Sasha Levin <alexander.levin@verizon.com>
+M: Sasha Levin <alexander.levin@microsoft.com>
S: Maintained
F: tools/lib/lockdep/
@@ -8582,7 +8612,6 @@ F: include/linux/spinlock*.h
F: arch/*/include/asm/spinlock*.h
F: include/linux/rwlock*.h
F: include/linux/mutex*.h
-F: arch/*/include/asm/mutex*.h
F: include/linux/rwsem*.h
F: arch/*/include/asm/rwsem.h
F: include/linux/seqlock.h
@@ -9642,7 +9671,8 @@ MIPS/LOONGSON2 ARCHITECTURE
M: Jiaxun Yang <jiaxun.yang@flygoat.com>
L: linux-mips@linux-mips.org
S: Maintained
-F: arch/mips/loongson64/*{2e/2f}*
+F: arch/mips/loongson64/fuloong-2e/
+F: arch/mips/loongson64/lemote-2f/
F: arch/mips/include/asm/mach-loongson64/
F: drivers/*/*loongson2*
F: drivers/*/*/*loongson2*
@@ -9682,6 +9712,19 @@ S: Maintained
F: arch/arm/boot/dts/mmp*
F: arch/arm/mach-mmp/
+MMU GATHER AND TLB INVALIDATION
+M: Will Deacon <will.deacon@arm.com>
+M: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
+M: Andrew Morton <akpm@linux-foundation.org>
+M: Nick Piggin <npiggin@gmail.com>
+M: Peter Zijlstra <peterz@infradead.org>
+L: linux-arch@vger.kernel.org
+L: linux-mm@kvack.org
+S: Maintained
+F: arch/*/include/asm/tlb.h
+F: include/asm-generic/tlb.h
+F: mm/mmu_gather.c
+
MN88472 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@@ -9700,13 +9743,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
S: Maintained
F: drivers/media/dvb-frontends/mn88473*
-PCI DRIVER FOR MOBIVEIL PCIE IP
-M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
-L: linux-pci@vger.kernel.org
-S: Supported
-F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
-F: drivers/pci/controller/pcie-mobiveil.c
-
MODULE SUPPORT
M: Jessica Yu <jeyu@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
@@ -9856,7 +9892,7 @@ M: Peter Rosin <peda@axentia.se>
S: Maintained
F: Documentation/ABI/testing/sysfs-class-mux*
F: Documentation/devicetree/bindings/mux/
-F: include/linux/dt-bindings/mux/
+F: include/dt-bindings/mux/
F: include/linux/mux/
F: drivers/mux/
@@ -10113,7 +10149,6 @@ L: netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
S: Maintained
-F: net/core/flow.c
F: net/xfrm/
F: net/key/
F: net/ipv4/xfrm*
@@ -10933,7 +10968,7 @@ M: Willy Tarreau <willy@haproxy.com>
M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
S: Odd Fixes
F: Documentation/auxdisplay/lcd-panel-cgram.txt
-F: drivers/misc/panel.c
+F: drivers/auxdisplay/panel.c
PARALLEL PORT SUBSYSTEM
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
@@ -11121,6 +11156,13 @@ F: include/uapi/linux/switchtec_ioctl.h
F: include/linux/switchtec.h
F: drivers/ntb/hw/mscc/
+PCI DRIVER FOR MOBIVEIL PCIE IP
+M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
+L: linux-pci@vger.kernel.org
+S: Supported
+F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
+F: drivers/pci/controller/pcie-mobiveil.c
+
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
M: Jason Cooper <jason@lakedaemon.net>
@@ -11154,7 +11196,7 @@ F: drivers/pci/controller/dwc/pci-exynos.c
PCI DRIVER FOR SYNOPSYS DESIGNWARE
M: Jingoo Han <jingoohan1@gmail.com>
-M: Joao Pinto <Joao.Pinto@synopsys.com>
+M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
L: linux-pci@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -11187,8 +11229,14 @@ F: tools/pci/
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
M: Russell Currey <ruscur@russell.cc>
+M: Sam Bobroff <sbobroff@linux.ibm.com>
+M: Oliver O'Halloran <oohall@gmail.com>
L: linuxppc-dev@lists.ozlabs.org
S: Supported
+F: Documentation/PCI/pci-error-recovery.txt
+F: drivers/pci/pcie/aer.c
+F: drivers/pci/pcie/dpc.c
+F: drivers/pci/pcie/err.c
F: Documentation/powerpc/eeh-pci-error-recovery.txt
F: arch/powerpc/kernel/eeh*.c
F: arch/powerpc/platforms/*/eeh*.c
@@ -11346,10 +11394,10 @@ S: Maintained
F: drivers/platform/x86/peaq-wmi.c
PER-CPU MEMORY ALLOCATOR
+M: Dennis Zhou <dennis@kernel.org>
M: Tejun Heo <tj@kernel.org>
M: Christoph Lameter <cl@linux.com>
-M: Dennis Zhou <dennisszhou@gmail.com>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dennis/percpu.git
S: Maintained
F: include/linux/percpu*.h
F: mm/percpu*.c
@@ -11957,7 +12005,7 @@ F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Harish Patil <harish.patil@cavium.com>
+M: Shahed Shaikh <Shahed.Shaikh@cavium.com>
M: Manish Chopra <manish.chopra@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
@@ -11965,7 +12013,6 @@ S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
-M: Harish Patil <harish.patil@cavium.com>
M: Manish Chopra <manish.chopra@cavium.com>
M: Dept-GELinuxNICDev@cavium.com
L: netdev@vger.kernel.org
@@ -12244,6 +12291,7 @@ F: Documentation/networking/rds.txt
RDT - RESOURCE ALLOCATION
M: Fenghua Yu <fenghua.yu@intel.com>
+M: Reinette Chatre <reinette.chatre@intel.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: arch/x86/kernel/cpu/intel_rdt*
@@ -13040,7 +13088,7 @@ SELINUX SECURITY MODULE
M: Paul Moore <paul@paul-moore.com>
M: Stephen Smalley <sds@tycho.nsa.gov>
M: Eric Paris <eparis@parisplace.org>
-L: selinux@tycho.nsa.gov (moderated for non-subscribers)
+L: selinux@vger.kernel.org
W: https://selinuxproject.org
W: https://github.com/SELinuxProject
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git
@@ -13433,9 +13481,8 @@ F: drivers/i2c/busses/i2c-synquacer.c
F: Documentation/devicetree/bindings/i2c/i2c-synquacer.txt
SOCIONEXT UNIPHIER SOUND DRIVER
-M: Katsuhiro Suzuki <suzuki.katsuhiro@socionext.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
-S: Maintained
+S: Orphan
F: sound/soc/uniphier/
SOEKRIS NET48XX LED SUPPORT
@@ -13468,8 +13515,8 @@ L: linux-arm-kernel@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/arm/firmware/sdei.txt
F: drivers/firmware/arm_sdei.c
-F: include/linux/sdei.h
-F: include/uapi/linux/sdei.h
+F: include/linux/arm_sdei.h
+F: include/uapi/linux/arm_sdei.h
SOFTWARE RAID (Multiple Disks) SUPPORT
M: Shaohua Li <shli@kernel.org>
@@ -15373,7 +15420,7 @@ S: Maintained
UVESAFB DRIVER
M: Michal Januszewski <spock@gentoo.org>
L: linux-fbdev@vger.kernel.org
-W: http://dev.gentoo.org/~spock/projects/uvesafb/
+W: https://github.com/mjanusz/v86d
S: Maintained
F: Documentation/fb/uvesafb.txt
F: drivers/video/fbdev/uvesafb.*
@@ -15897,6 +15944,7 @@ F: net/x25/
X86 ARCHITECTURE (32-BIT AND 64-BIT)
M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com>
+M: Borislav Petkov <bp@alien8.de>
R: "H. Peter Anvin" <hpa@zytor.com>
M: x86@kernel.org
L: linux-kernel@vger.kernel.org
@@ -15925,6 +15973,15 @@ M: Borislav Petkov <bp@alien8.de>
S: Maintained
F: arch/x86/kernel/cpu/microcode/*
+X86 MM
+M: Dave Hansen <dave.hansen@linux.intel.com>
+M: Andy Lutomirski <luto@kernel.org>
+M: Peter Zijlstra <peterz@infradead.org>
+L: linux-kernel@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/mm
+S: Maintained
+F: arch/x86/mm/
+
X86 PLATFORM DRIVERS
M: Darren Hart <dvhart@infradead.org>
M: Andy Shevchenko <andy@infradead.org>
diff --git a/Makefile b/Makefile
index 19948e556941..69fa5c0310d8 100644
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
VERSION = 4
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc2
-NAME = Merciless Moray
+EXTRAVERSION =
+NAME = "People's Front"
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -299,19 +299,7 @@ KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
-# SUBARCH tells the usermode build what the underlying arch is. That is set
-# first, and if a usermode build is happening, the "ARCH=um" on the command
-# line overrides the setting of ARCH below. If a native build is happening,
-# then ARCH is assigned, getting whatever value it gets normally, and
-# SUBARCH is subsequently ignored.
-
-SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
- -e s/sun4u/sparc64/ \
- -e s/arm.*/arm/ -e s/sa110/arm/ \
- -e s/s390x/s390/ -e s/parisc64/parisc/ \
- -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
- -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
- -e s/riscv.*/riscv/)
+include scripts/subarch.include
# Cross compiling and selecting different set of gcc/bin-utils
# ---------------------------------------------------------------------------
@@ -495,13 +483,15 @@ endif
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
+CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
+GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
endif
@@ -616,6 +606,11 @@ CFLAGS_GCOV := -fprofile-arcs -ftest-coverage \
$(call cc-disable-warning,maybe-uninitialized,)
export CFLAGS_GCOV
+# The arch Makefiles can override CC_FLAGS_FTRACE. We may also append it later.
+ifdef CONFIG_FUNCTION_TRACER
+ CC_FLAGS_FTRACE := -pg
+endif
+
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
# values of the respective KBUILD_* variables
ARCH_CPPFLAGS :=
@@ -755,9 +750,6 @@ KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) \
endif
ifdef CONFIG_FUNCTION_TRACER
-ifndef CC_FLAGS_FTRACE
-CC_FLAGS_FTRACE := -pg
-endif
ifdef CONFIG_FTRACE_MCOUNT_RECORD
# gcc 5 supports generating the mcount tables directly
ifeq ($(call cc-option-yn,-mrecord-mcount),y)
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 3d9bdecfa52d..e98c6b8e6186 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -149,7 +149,7 @@ config ARC_CPU_770
Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
This core has a bunch of cool new features:
-MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
- Shared Address Spaces (for sharing TLB entires in MMU)
+ Shared Address Spaces (for sharing TLB entries in MMU)
-Caches: New Prog Model, Region Flush
-Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 99cce77ab98f..644815c0516e 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -6,33 +6,11 @@
# published by the Free Software Foundation.
#
-ifeq ($(CROSS_COMPILE),)
-ifndef CONFIG_CPU_BIG_ENDIAN
-CROSS_COMPILE := arc-linux-
-else
-CROSS_COMPILE := arceb-linux-
-endif
-endif
-
KBUILD_DEFCONFIG := nsim_700_defconfig
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
-cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
-
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
- $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
- $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
+cflags-$(CONFIG_ISA_ARCV2) += -mcpu=hs38
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
@@ -79,7 +57,7 @@ cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
-LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
+LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 4674541eba3f..8ce6e7235915 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
task_thread_info(current)->thr_ptr;
}
+
+ /*
+ * setup usermode thread pointer #1:
+ * when child is picked by scheduler, __switch_to() uses @c_callee to
+ * populate usermode callee regs: this works (despite being in a kernel
+ * function) since special return path for child @ret_from_fork()
+ * ensures those regs are not clobbered all the way to RTIE to usermode
+ */
+ c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /*
+ * setup usermode thread pointer #2:
+ * however for this special use of r25 in kernel, __switch_to() sets
+ * r25 for kernel needs and only in the final return path is usermode
+ * r25 setup, from pt_regs->user_r25. So set that up as well
+ */
+ c_regs->user_r25 = c_callee->r25;
+#endif
+
return 0;
}
diff --git a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
index b10dccd0958f..3b1baa8605a7 100644
--- a/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_ptc_ek.dts
@@ -11,6 +11,7 @@
#include "sama5d2-pinfunc.h"
#include <dt-bindings/mfd/atmel-flexcom.h>
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/at91.h>
/ {
model = "Atmel SAMA5D2 PTC EK";
@@ -299,6 +300,7 @@
<PIN_PA30__NWE_NANDWE>,
<PIN_PB2__NRD_NANDOE>;
bias-pull-up;
+ atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
};
ale_cle_rdy_cs {
diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
index 43ee992ccdcf..6df61518776f 100644
--- a/arch/arm/boot/dts/bcm63138.dtsi
+++ b/arch/arm/boot/dts/bcm63138.dtsi
@@ -106,21 +106,23 @@
global_timer: timer@1e200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x1e200 0x20>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&axi_clk>;
};
local_timer: local-timer@1e600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x1e600 0x20>;
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_EDGE_RISING)>;
clocks = <&axi_clk>;
};
twd_watchdog: watchdog@1e620 {
compatible = "arm,cortex-a9-twd-wdt";
reg = <0x1e620 0x20>;
- interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
+ IRQ_TYPE_LEVEL_HIGH)>;
};
armpll: armpll {
@@ -158,7 +160,7 @@
serial0: serial@600 {
compatible = "brcm,bcm6345-uart";
reg = <0x600 0x1b>;
- interrupts = <GIC_SPI 32 0>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&periph_clk>;
clock-names = "periph";
status = "disabled";
@@ -167,7 +169,7 @@
serial1: serial@620 {
compatible = "brcm,bcm6345-uart";
reg = <0x620 0x1b>;
- interrupts = <GIC_SPI 33 0>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&periph_clk>;
clock-names = "periph";
status = "disabled";
@@ -180,7 +182,7 @@
reg = <0x2000 0x600>, <0xf0 0x10>;
reg-names = "nand", "nand-int-base";
status = "disabled";
- interrupts = <GIC_SPI 38 0>;
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "nand";
};
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 7423d462d1e4..50dde84b72ed 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -123,6 +123,17 @@
};
};
+&cpu0 {
+ /* CPU rated to 1GHz, not 1.2GHz as per the default settings */
+ operating-points = <
+ /* kHz uV */
+ 166666 850000
+ 400000 900000
+ 800000 1050000
+ 1000000 1200000
+ >;
+};
+
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
index 7cb235ef0fb6..6e9e1c2f9def 100644
--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
@@ -41,7 +41,7 @@
};
macb1: ethernet@f802c000 {
- compatible = "cdns,at91sam9260-macb", "cdns,macb";
+ compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/stm32mp157c.dtsi b/arch/arm/boot/dts/stm32mp157c.dtsi
index 661be948ab74..185541a5b69f 100644
--- a/arch/arm/boot/dts/stm32mp157c.dtsi
+++ b/arch/arm/boot/dts/stm32mp157c.dtsi
@@ -1078,8 +1078,8 @@
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&rcc SPI6_K>;
resets = <&rcc SPI6_R>;
- dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
- <&mdma1 35 0x0 0x40002 0x0 0x0 0>;
+ dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
+ <&mdma1 35 0x0 0x40002 0x0 0x0>;
dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun8i-r40.dtsi b/arch/arm/boot/dts/sun8i-r40.dtsi
index ffd9f00f74a4..5f547c161baf 100644
--- a/arch/arm/boot/dts/sun8i-r40.dtsi
+++ b/arch/arm/boot/dts/sun8i-r40.dtsi
@@ -800,8 +800,7 @@
};
hdmi_phy: hdmi-phy@1ef0000 {
- compatible = "allwinner,sun8i-r40-hdmi-phy",
- "allwinner,sun50i-a64-hdmi-phy";
+ compatible = "allwinner,sun8i-r40-hdmi-phy";
reg = <0x01ef0000 0x10000>;
clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
<&ccu 7>, <&ccu 16>;
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 2cfbc531f63b..6b51826ab3d1 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -28,7 +28,6 @@
#include <asm/byteorder.h>
#include <asm/memory.h>
#include <asm-generic/pci_iomap.h>
-#include <xen/xen.h>
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
@@ -459,20 +458,6 @@ extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
#include <asm-generic/io.h>
-/*
- * can the hardware map this into one segment or not, given no other
- * constraints.
- */
-#define BIOVEC_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
-struct bio_vec;
-extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
- (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
-
#ifdef CONFIG_MMU
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 3ab8b3781bfe..2d43dca29c72 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -161,6 +161,7 @@
#else
#define VTTBR_X (5 - KVM_T0SZ)
#endif
+#define VTTBR_CNP_BIT _AC(1, UL)
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
#define VTTBR_VMID_SHIFT _AC(48, ULL)
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 79906cecb091..3ad482d2f1eb 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -223,7 +223,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 265ea9cf7df7..847f01fa429d 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -355,6 +355,11 @@ static inline int hyp_map_aux_data(void)
#define kvm_phys_to_vttbr(addr) (addr)
+static inline bool kvm_cpu_has_cnp(void)
+{
+ return false;
+}
+
#endif /* !__ASSEMBLY__ */
#endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h
index ae5fdff18406..8247bc15addc 100644
--- a/arch/arm/kernel/vmlinux.lds.h
+++ b/arch/arm/kernel/vmlinux.lds.h
@@ -49,6 +49,8 @@
#define ARM_DISCARD \
*(.ARM.exidx.exit.text) \
*(.ARM.extab.exit.text) \
+ *(.ARM.exidx.text.exit) \
+ *(.ARM.extab.text.exit) \
ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
ARM_EXIT_DISCARD(EXIT_TEXT) \
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 450c7a4fbc8a..cb094e55dc5f 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -478,15 +478,15 @@ static const struct coproc_reg cp15_regs[] = {
/* ICC_SGI1R */
{ CRm64(12), Op1( 0), is64, access_gic_sgi},
- /* ICC_ASGI1R */
- { CRm64(12), Op1( 1), is64, access_gic_sgi},
- /* ICC_SGI0R */
- { CRm64(12), Op1( 2), is64, access_gic_sgi},
/* VBAR: swapped by interrupt.S. */
{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32,
NULL, reset_val, c12_VBAR, 0x00000000 },
+ /* ICC_ASGI1R */
+ { CRm64(12), Op1( 1), is64, access_gic_sgi},
+ /* ICC_SGI0R */
+ { CRm64(12), Op1( 2), is64, access_gic_sgi},
/* ICC_SRE */
{ CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre },
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index fc91205ff46c..5bf9443cfbaa 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
{
- BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
+ BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
PCI_IO_VIRT_BASE + offset + SZ_64K,
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index fbc74b5fa3ed..8edf93b4490f 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -413,3 +413,4 @@
396 common pkey_free sys_pkey_free
397 common statx sys_statx
398 common rseq sys_rseq
+399 common io_pgetevents sys_io_pgetevents
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1b1a0e95c751..a8ae30fab508 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -75,6 +75,7 @@ config ARM64
select CLONE_BACKWARDS
select COMMON_CLK
select CPU_PM if (SUSPEND || CPU_IDLE)
+ select CRC32
select DCACHE_WORD_ACCESS
select DMA_DIRECT_OPS
select EDAC_SUPPORT
@@ -142,6 +143,7 @@ config ARM64
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
+ select HAVE_RCU_TABLE_INVALIDATE
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
@@ -479,6 +481,19 @@ config ARM64_ERRATUM_1024718
If unsure, say Y.
+config ARM64_ERRATUM_1188873
+ bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
+ default y
+ select ARM_ARCH_TIMER_OOL_WORKAROUND
+ help
+ This option adds work arounds for ARM Cortex-A76 erratum 1188873
+
+ Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause
+ register corruption when accessing the timer registers from
+ AArch32 userspace.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -769,9 +784,6 @@ source kernel/Kconfig.hz
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
-config ARCH_HAS_HOLES_MEMORYMODEL
- def_bool y if SPARSEMEM
-
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
@@ -786,7 +798,7 @@ config ARCH_FLATMEM_ENABLE
def_bool !NUMA
config HAVE_ARCH_PFN_VALID
- def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
+ def_bool y
config HW_PERF_EVENTS
def_bool y
@@ -1132,6 +1144,20 @@ config ARM64_RAS_EXTN
and access the new registers if the system supports the extension.
Platform RAS features may additionally depend on firmware support.
+config ARM64_CNP
+ bool "Enable support for Common Not Private (CNP) translations"
+ default y
+ depends on ARM64_PAN || !ARM64_SW_TTBR0_PAN
+ help
+ Common Not Private (CNP) allows translation table entries to
+ be shared between different PEs in the same inner shareable
+ domain, so the hardware can use this fact to optimise the
+ caching of such entries in the TLB.
+
+ Selecting this option allows the CNP feature to be detected
+ at runtime, and does not affect PEs that do not implement
+ this feature.
+
endmenu
config ARM64_SVE
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
index ceffc40810ee..48daec7f78ba 100644
--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
@@ -46,6 +46,7 @@
pinctrl-0 = <&mmc0_pins>;
vmmc-supply = <&reg_cldo1>;
cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>;
+ bus-width = <4>;
status = "okay";
};
@@ -56,6 +57,7 @@
vqmmc-supply = <&reg_bldo2>;
non-removable;
cap-mmc-hw-reset;
+ bus-width = <8>;
status = "okay";
};
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 0bcc98dbba56..6142402c2eb4 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -286,12 +286,11 @@ alternative_endif
ldr \rd, [\rn, #MM_CONTEXT_ID]
.endm
/*
- * read_ctr - read CTR_EL0. If the system has mismatched
- * cache line sizes, provide the system wide safe value
- * from arm64_ftr_reg_ctrel0.sys_val
+ * read_ctr - read CTR_EL0. If the system has mismatched register fields,
+ * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
*/
.macro read_ctr, reg
-alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
+alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
mrs \reg, ctr_el0 // read CTR
nop
alternative_else
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 5ee5bca8c24b..13dd42c3ad4e 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -40,6 +40,15 @@
#define L1_CACHE_SHIFT (6)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define CLIDR_LOUU_SHIFT 27
+#define CLIDR_LOC_SHIFT 24
+#define CLIDR_LOUIS_SHIFT 21
+
+#define CLIDR_LOUU(clidr) (((clidr) >> CLIDR_LOUU_SHIFT) & 0x7)
+#define CLIDR_LOC(clidr) (((clidr) >> CLIDR_LOC_SHIFT) & 0x7)
+#define CLIDR_LOUIS(clidr) (((clidr) >> CLIDR_LOUIS_SHIFT) & 0x7)
+
/*
* Memory returned by kmalloc() may be used for DMA, so we must make
* sure that all such allocations are cache aligned. Otherwise,
@@ -84,6 +93,37 @@ static inline int cache_line_size(void)
return cwg ? 4 << cwg : ARCH_DMA_MINALIGN;
}
+/*
+ * Read the effective value of CTR_EL0.
+ *
+ * According to ARM ARM for ARMv8-A (ARM DDI 0487C.a),
+ * section D10.2.33 "CTR_EL0, Cache Type Register" :
+ *
+ * CTR_EL0.IDC reports the data cache clean requirements for
+ * instruction to data coherence.
+ *
+ * 0 - dcache clean to PoU is required unless :
+ * (CLIDR_EL1.LoC == 0) || (CLIDR_EL1.LoUIS == 0 && CLIDR_EL1.LoUU == 0)
+ * 1 - dcache clean to PoU is not required for i-to-d coherence.
+ *
+ * This routine provides the CTR_EL0 with the IDC field updated to the
+ * effective state.
+ */
+static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void)
+{
+ u32 ctr = read_cpuid_cachetype();
+
+ if (!(ctr & BIT(CTR_IDC_SHIFT))) {
+ u64 clidr = read_sysreg(clidr_el1);
+
+ if (CLIDR_LOC(clidr) == 0 ||
+ (CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0))
+ ctr |= BIT(CTR_IDC_SHIFT);
+ }
+
+ return ctr;
+}
+
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 1a037b94eba1..cee28a05ee98 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -159,6 +159,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
}
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
+#define COMPAT_MINSIGSTKSZ 2048
static inline void __user *arch_compat_alloc_user_space(long len)
{
diff --git a/arch/arm64/include/asm/compiler.h b/arch/arm64/include/asm/compiler.h
deleted file mode 100644
index ee35fd0f2236..000000000000
--- a/arch/arm64/include/asm/compiler.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Based on arch/arm/include/asm/compiler.h
- *
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_COMPILER_H
-#define __ASM_COMPILER_H
-
-/*
- * This is used to ensure the compiler did actually allocate the register we
- * asked it for some inline assembly sequences. Apparently we can't trust the
- * compiler from one version to another so a bit of paranoia won't hurt. This
- * string is meant to be concatenated with the inline asm string and will
- * cause compilation to stop on mismatch. (for details, see gcc PR 15089)
- */
-#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
-
-#endif /* __ASM_COMPILER_H */
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index ae1f70450fb2..6e2d254c09eb 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -33,7 +33,7 @@
#define ARM64_WORKAROUND_CAVIUM_27456 12
#define ARM64_HAS_32BIT_EL0 13
#define ARM64_HARDEN_EL2_VECTORS 14
-#define ARM64_MISMATCHED_CACHE_LINE_SIZE 15
+#define ARM64_HAS_CNP 15
#define ARM64_HAS_NO_FPSIMD 16
#define ARM64_WORKAROUND_REPEAT_TLBI 17
#define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18
@@ -51,7 +51,10 @@
#define ARM64_SSBD 30
#define ARM64_MISMATCHED_CACHE_TYPE 31
#define ARM64_HAS_STAGE2_FWB 32
+#define ARM64_HAS_CRC32 33
+#define ARM64_SSBS 34
+#define ARM64_WORKAROUND_1188873 35
-#define ARM64_NCAPS 33
+#define ARM64_NCAPS 36
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 1717ba1db35d..6db48d90ad63 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -262,7 +262,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
/*
* CPU feature detected at boot time based on system-wide value of a
* feature. It is safe for a late CPU to have this feature even though
- * the system hasn't enabled it, although the featuer will not be used
+ * the system hasn't enabled it, although the feature will not be used
* by Linux in this case. If the system has enabled this feature already,
* then every late CPU must have it.
*/
@@ -508,6 +508,12 @@ static inline bool system_supports_sve(void)
cpus_have_const_cap(ARM64_SVE);
}
+static inline bool system_supports_cnp(void)
+{
+ return IS_ENABLED(CONFIG_ARM64_CNP) &&
+ cpus_have_const_cap(ARM64_HAS_CNP);
+}
+
#define ARM64_SSBD_UNKNOWN -1
#define ARM64_SSBD_FORCE_DISABLE 0
#define ARM64_SSBD_KERNEL 1
@@ -530,6 +536,7 @@ void arm64_set_ssbd_mitigation(bool state);
static inline void arm64_set_ssbd_mitigation(bool state) {}
#endif
+extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index ea690b3562af..12f93e4d2452 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -86,6 +86,7 @@
#define ARM_CPU_PART_CORTEX_A75 0xD0A
#define ARM_CPU_PART_CORTEX_A35 0xD04
#define ARM_CPU_PART_CORTEX_A55 0xD05
+#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define APM_CPU_PART_POTENZA 0x000
@@ -110,6 +111,7 @@
#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
+#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 22e4c83de5a5..8d91f2233135 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -36,11 +36,8 @@ static inline unsigned long local_daif_save(void)
{
unsigned long flags;
- asm volatile(
- "mrs %0, daif // local_daif_save\n"
- : "=r" (flags)
- :
- : "memory");
+ flags = arch_local_save_flags();
+
local_daif_mask();
return flags;
@@ -60,11 +57,9 @@ static inline void local_daif_restore(unsigned long flags)
{
if (!arch_irqs_disabled_flags(flags))
trace_hardirqs_on();
- asm volatile(
- "msr daif, %0 // local_daif_restore"
- :
- : "r" (flags)
- : "memory");
+
+ arch_local_irq_restore(flags);
+
if (arch_irqs_disabled_flags(flags))
trace_hardirqs_off();
}
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index ce70c3ffb993..676de2ec1762 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -137,6 +137,8 @@
#define ESR_ELx_CV (UL(1) << 24)
#define ESR_ELx_COND_SHIFT (20)
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
+#define ESR_ELx_WFx_ISS_TI (UL(1) << 0)
+#define ESR_ELx_WFx_ISS_WFI (UL(0) << 0)
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
@@ -148,6 +150,9 @@
#define DISR_EL1_ESR_MASK (ESR_ELx_AET | ESR_ELx_EA | ESR_ELx_FSC)
/* ESR value templates for specific events */
+#define ESR_ELx_WFx_MASK (ESR_ELx_EC_MASK | ESR_ELx_WFx_ISS_TI)
+#define ESR_ELx_WFx_WFI_VAL ((ESR_ELx_EC_WFx << ESR_ELx_EC_SHIFT) | \
+ ESR_ELx_WFx_ISS_WFI)
/* BRK instruction trap from AArch64 state */
#define ESR_ELx_VAL_BRK64(imm) \
@@ -187,6 +192,8 @@
#define ESR_ELx_SYS64_ISS_SYS_OP_MASK (ESR_ELx_SYS64_ISS_SYS_MASK | \
ESR_ELx_SYS64_ISS_DIR_MASK)
+#define ESR_ELx_SYS64_ISS_RT(esr) \
+ (((esr) & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT)
/*
* User space cache operations have the following sysreg encoding
* in System instructions.
@@ -206,6 +213,18 @@
#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL \
(ESR_ELx_SYS64_ISS_SYS_VAL(1, 3, 1, 7, 0) | \
ESR_ELx_SYS64_ISS_DIR_WRITE)
+/*
+ * User space MRS operations which are supported for emulation
+ * have the following sysreg encoding in System instructions.
+ * op0 = 3, op1= 0, crn = 0, {crm = 0, 4-7}, READ (L = 1)
+ */
+#define ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
+ ESR_ELx_SYS64_ISS_OP1_MASK | \
+ ESR_ELx_SYS64_ISS_CRN_MASK | \
+ ESR_ELx_SYS64_ISS_DIR_MASK)
+#define ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL \
+ (ESR_ELx_SYS64_ISS_SYS_VAL(3, 0, 0, 0, 0) | \
+ ESR_ELx_SYS64_ISS_DIR_READ)
#define ESR_ELx_SYS64_ISS_SYS_CTR ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 1, 0, 0)
#define ESR_ELx_SYS64_ISS_SYS_CTR_READ (ESR_ELx_SYS64_ISS_SYS_CTR | \
@@ -249,6 +268,64 @@
#define ESR_ELx_FP_EXC_TFV (UL(1) << 23)
+/*
+ * ISS field definitions for CP15 accesses
+ */
+#define ESR_ELx_CP15_32_ISS_DIR_MASK 0x1
+#define ESR_ELx_CP15_32_ISS_DIR_READ 0x1
+#define ESR_ELx_CP15_32_ISS_DIR_WRITE 0x0
+
+#define ESR_ELx_CP15_32_ISS_RT_SHIFT 5
+#define ESR_ELx_CP15_32_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_32_ISS_RT_SHIFT)
+#define ESR_ELx_CP15_32_ISS_CRM_SHIFT 1
+#define ESR_ELx_CP15_32_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_32_ISS_CRM_SHIFT)
+#define ESR_ELx_CP15_32_ISS_CRN_SHIFT 10
+#define ESR_ELx_CP15_32_ISS_CRN_MASK (UL(0xf) << ESR_ELx_CP15_32_ISS_CRN_SHIFT)
+#define ESR_ELx_CP15_32_ISS_OP1_SHIFT 14
+#define ESR_ELx_CP15_32_ISS_OP1_MASK (UL(0x7) << ESR_ELx_CP15_32_ISS_OP1_SHIFT)
+#define ESR_ELx_CP15_32_ISS_OP2_SHIFT 17
+#define ESR_ELx_CP15_32_ISS_OP2_MASK (UL(0x7) << ESR_ELx_CP15_32_ISS_OP2_SHIFT)
+
+#define ESR_ELx_CP15_32_ISS_SYS_MASK (ESR_ELx_CP15_32_ISS_OP1_MASK | \
+ ESR_ELx_CP15_32_ISS_OP2_MASK | \
+ ESR_ELx_CP15_32_ISS_CRN_MASK | \
+ ESR_ELx_CP15_32_ISS_CRM_MASK | \
+ ESR_ELx_CP15_32_ISS_DIR_MASK)
+#define ESR_ELx_CP15_32_ISS_SYS_VAL(op1, op2, crn, crm) \
+ (((op1) << ESR_ELx_CP15_32_ISS_OP1_SHIFT) | \
+ ((op2) << ESR_ELx_CP15_32_ISS_OP2_SHIFT) | \
+ ((crn) << ESR_ELx_CP15_32_ISS_CRN_SHIFT) | \
+ ((crm) << ESR_ELx_CP15_32_ISS_CRM_SHIFT))
+
+#define ESR_ELx_CP15_64_ISS_DIR_MASK 0x1
+#define ESR_ELx_CP15_64_ISS_DIR_READ 0x1
+#define ESR_ELx_CP15_64_ISS_DIR_WRITE 0x0
+
+#define ESR_ELx_CP15_64_ISS_RT_SHIFT 5
+#define ESR_ELx_CP15_64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT_SHIFT)
+
+#define ESR_ELx_CP15_64_ISS_RT2_SHIFT 10
+#define ESR_ELx_CP15_64_ISS_RT2_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT2_SHIFT)
+
+#define ESR_ELx_CP15_64_ISS_OP1_SHIFT 16
+#define ESR_ELx_CP15_64_ISS_OP1_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_OP1_SHIFT)
+#define ESR_ELx_CP15_64_ISS_CRM_SHIFT 1
+#define ESR_ELx_CP15_64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_CRM_SHIFT)
+
+#define ESR_ELx_CP15_64_ISS_SYS_VAL(op1, crm) \
+ (((op1) << ESR_ELx_CP15_64_ISS_OP1_SHIFT) | \
+ ((crm) << ESR_ELx_CP15_64_ISS_CRM_SHIFT))
+
+#define ESR_ELx_CP15_64_ISS_SYS_MASK (ESR_ELx_CP15_64_ISS_OP1_MASK | \
+ ESR_ELx_CP15_64_ISS_CRM_MASK | \
+ ESR_ELx_CP15_64_ISS_DIR_MASK)
+
+#define ESR_ELx_CP15_64_ISS_SYS_CNTVCT (ESR_ELx_CP15_64_ISS_SYS_VAL(1, 14) | \
+ ESR_ELx_CP15_64_ISS_DIR_READ)
+
+#define ESR_ELx_CP15_32_ISS_SYS_CNTFRQ (ESR_ELx_CP15_32_ISS_SYS_VAL(0, 0, 14, 0) |\
+ ESR_ELx_CP15_32_ISS_DIR_READ)
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 35b2e50f17fb..9f8b915af3a7 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -31,8 +31,6 @@
#include <asm/alternative.h>
#include <asm/cpufeature.h>
-#include <xen/xen.h>
-
/*
* Generic IO read/write. These perform native-endian accesses.
*/
@@ -205,12 +203,5 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
extern int devmem_is_allowed(unsigned long pfn);
-struct bio_vec;
-extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
- (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
-
#endif /* __KERNEL__ */
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 1b5e0e843c3a..7e2b3e360086 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -28,7 +28,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm goto("1: nop\n\t"
+ asm_volatile_goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm goto("1: b %l[l_yes]\n\t"
+ asm_volatile_goto("1: b %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index a780f6714b44..850e2122d53f 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -97,7 +97,7 @@
+ EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
+ EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
+ EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
-#define SWAPPER_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end))
+#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR + TEXT_OFFSET, _end))
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index aa45df752a16..b476bc46f0ab 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -175,6 +175,7 @@
#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
+#define VTTBR_CNP_BIT (UL(1))
#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
#define VTTBR_VMID_SHIFT (UL(48))
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 6106a85ae0be..21247870def7 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -335,7 +335,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
- return (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ return ESR_ELx_SYS64_ISS_RT(esr);
}
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f26055f2306e..2842bf149029 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -61,8 +61,7 @@ struct kvm_arch {
u64 vmid_gen;
u32 vmid;
- /* 1-level 2nd stage table and lock */
- spinlock_t pgd_lock;
+ /* 1-level 2nd stage table, protected by kvm->mmu_lock */
pgd_t *pgd;
/* VTTBR value associated with above pgd and vmid */
@@ -357,7 +356,6 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
@@ -389,6 +387,8 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
+void __kvm_enable_ssbs(void);
+
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
@@ -409,6 +409,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
*/
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
+
+ /*
+ * Disabling SSBD on a non-VHE system requires us to enable SSBS
+ * at EL2.
+ */
+ if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
+ arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+ kvm_call_hyp(__kvm_enable_ssbs);
+ }
}
static inline bool kvm_arch_check_sve_has_vhe(void)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index d6fff7de5539..64337afbf124 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -517,5 +517,10 @@ static inline int hyp_map_aux_data(void)
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
+static inline bool kvm_cpu_has_cnp(void)
+{
+ return system_supports_cnp();
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index dd320df0d026..7689c7aa1d77 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -95,5 +95,8 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
extern void mark_linear_text_alias_ro(void);
+#define INIT_MM_CONTEXT(name) \
+ .pgd = init_pg_dir,
+
#endif /* !__ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 39ec0b8a689e..1e58bf58c22b 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -147,12 +147,25 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp)
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
ttbr_replace_func *replace_phys;
- phys_addr_t pgd_phys = virt_to_phys(pgdp);
+ /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
+ phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
+
+ if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) {
+ /*
+ * cpu_replace_ttbr1() is used when there's a boot CPU
+ * up (i.e. cpufeature framework is not up yet) and
+ * latter only when we enable CNP via cpufeature's
+ * enable() callback.
+ * Also we rely on the cpu_hwcap bit being set before
+ * calling the enable() function.
+ */
+ ttbr1 |= TTBR_CNP_BIT;
+ }
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
cpu_install_idmap();
- replace_phys(pgd_phys);
+ replace_phys(ttbr1);
cpu_uninstall_idmap();
}
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 60d02c81a3a2..c88a3cb117a1 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -37,9 +37,7 @@ extern void clear_page(void *to);
typedef struct page *pgtable_t;
-#ifdef CONFIG_HAVE_ARCH_PFN_VALID
extern int pfn_valid(unsigned long);
-#endif
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index fd208eac9f2a..1d7d8da2ef9b 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -211,6 +211,8 @@
#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
+#define TTBR_CNP_BIT (UL(1) << 0)
+
/*
* TCR flags.
*/
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 1bdeca8918a6..50b1ef8584c0 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -360,6 +360,7 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_present(pmd) pte_present(pmd_pte(pmd))
#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_valid(pmd) pte_valid(pmd_pte(pmd))
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
@@ -428,10 +429,33 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PUD_TYPE_TABLE)
#endif
+extern pgd_t init_pg_dir[PTRS_PER_PGD];
+extern pgd_t init_pg_end[];
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
+
+extern void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
+
+static inline bool in_swapper_pgdir(void *addr)
+{
+ return ((unsigned long)addr & PAGE_MASK) ==
+ ((unsigned long)swapper_pg_dir & PAGE_MASK);
+}
+
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
+#ifdef __PAGETABLE_PMD_FOLDED
+ if (in_swapper_pgdir(pmdp)) {
+ set_swapper_pgd((pgd_t *)pmdp, __pgd(pmd_val(pmd)));
+ return;
+ }
+#endif /* __PAGETABLE_PMD_FOLDED */
+
WRITE_ONCE(*pmdp, pmd);
- dsb(ishst);
+
+ if (pmd_valid(pmd))
+ dsb(ishst);
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -477,11 +501,21 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
#define pud_present(pud) pte_present(pud_pte(pud))
+#define pud_valid(pud) pte_valid(pud_pte(pud))
static inline void set_pud(pud_t *pudp, pud_t pud)
{
+#ifdef __PAGETABLE_PUD_FOLDED
+ if (in_swapper_pgdir(pudp)) {
+ set_swapper_pgd((pgd_t *)pudp, __pgd(pud_val(pud)));
+ return;
+ }
+#endif /* __PAGETABLE_PUD_FOLDED */
+
WRITE_ONCE(*pudp, pud);
- dsb(ishst);
+
+ if (pud_valid(pud))
+ dsb(ishst);
}
static inline void pud_clear(pud_t *pudp)
@@ -532,6 +566,11 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
+ if (in_swapper_pgdir(pgdp)) {
+ set_swapper_pgd(pgdp, pgd);
+ return;
+ }
+
WRITE_ONCE(*pgdp, pgd);
dsb(ishst);
}
@@ -712,11 +751,6 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
}
#endif
-extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
-extern pgd_t swapper_pg_end[];
-extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
-extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
-
/*
* Encode and decode a swap entry:
* bits 0-1: present (must be zero)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 79657ad91397..2bf6691371c2 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -174,6 +174,10 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
{
start_thread_common(regs, pc);
regs->pstate = PSR_MODE_EL0t;
+
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+ regs->pstate |= PSR_SSBS_BIT;
+
regs->sp = sp;
}
@@ -190,6 +194,9 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate |= PSR_AA32_E_BIT;
#endif
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+ regs->pstate |= PSR_AA32_SSBS_BIT;
+
regs->compat_sp = sp;
}
#endif
@@ -244,10 +251,6 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif
-void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
-void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
-void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused);
-
extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
extern void __init minsigstksz_setup(void);
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 177b851ca6d9..6bc43889d11e 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -50,6 +50,7 @@
#define PSR_AA32_I_BIT 0x00000080
#define PSR_AA32_A_BIT 0x00000100
#define PSR_AA32_E_BIT 0x00000200
+#define PSR_AA32_SSBS_BIT 0x00800000
#define PSR_AA32_DIT_BIT 0x01000000
#define PSR_AA32_Q_BIT 0x08000000
#define PSR_AA32_V_BIT 0x10000000
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index c1470931b897..0c909c4a932f 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,7 +20,6 @@
#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H
-#include <asm/compiler.h>
#include <linux/stringify.h>
/*
@@ -84,13 +83,26 @@
#endif /* CONFIG_BROKEN_GAS_INST */
-#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
-#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
+/*
+ * Instructions for modifying PSTATE fields.
+ * As per Arm ARM for v8-A, Section "C.5.1.3 op0 == 0b00, architectural hints,
+ * barriers and CLREX, and PSTATE access", ARM DDI 0487 C.a, system instructions
+ * for accessing PSTATE fields have the following encoding:
+ * Op0 = 0, CRn = 4
+ * Op1, Op2 encodes the PSTATE field modified and defines the constraints.
+ * CRm = Imm4 for the instruction.
+ * Rt = 0x1f
+ */
+#define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift)
+#define PSTATE_Imm_shift CRm_shift
+
+#define PSTATE_PAN pstate_field(0, 4)
+#define PSTATE_UAO pstate_field(0, 3)
+#define PSTATE_SSBS pstate_field(3, 1)
-#define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
- (!!x)<<8 | 0x1f)
-#define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
- (!!x)<<8 | 0x1f)
+#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
@@ -419,6 +431,7 @@
#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_DSSBS (1UL << 44)
#define SCTLR_ELx_EE (1 << 25)
#define SCTLR_ELx_IESB (1 << 21)
#define SCTLR_ELx_WXN (1 << 19)
@@ -439,7 +452,7 @@
(1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
(1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
(1 << 27) | (1 << 30) | (1 << 31) | \
- (0xffffffffUL << 32))
+ (0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL2 SCTLR_ELx_EE
@@ -453,7 +466,7 @@
#define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
- ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
+ SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
#error "Inconsistent SCTLR_EL2 set/clear bits"
@@ -477,7 +490,7 @@
(1 << 29))
#define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
(1 << 27) | (1 << 30) | (1 << 31) | \
- (0xffffffffUL << 32))
+ (0xffffefffUL << 32))
#ifdef CONFIG_CPU_BIG_ENDIAN
#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
@@ -489,12 +502,12 @@
#define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\
SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I |\
- SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_NTWI |\
+ SCTLR_EL1_DZE | SCTLR_EL1_UCT |\
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
- SCTLR_EL1_RES0)
+ SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
#error "Inconsistent SCTLR_EL1 set/clear bits"
@@ -544,6 +557,13 @@
#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
+/* id_aa64pfr1 */
+#define ID_AA64PFR1_SSBS_SHIFT 4
+
+#define ID_AA64PFR1_SSBS_PSTATE_NI 0
+#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
+#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
+
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index a3233167be60..106fdc951b6e 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -22,16 +22,10 @@
#include <linux/pagemap.h>
#include <linux/swap.h>
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-
-#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry)
static inline void __tlb_remove_table(void *_table)
{
free_page_and_swap_cache((struct page *)_table);
}
-#else
-#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry)
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
static void tlb_flush(struct mmu_gather *tlb);
@@ -40,36 +34,35 @@ static void tlb_flush(struct mmu_gather *tlb);
static inline void tlb_flush(struct mmu_gather *tlb)
{
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
+ bool last_level = !tlb->freed_tables;
+ unsigned long stride = tlb_get_unmap_size(tlb);
/*
- * The ASID allocator will either invalidate the ASID or mark
- * it as used.
+ * If we're tearing down the address space then we only care about
+ * invalidating the walk-cache, since the ASID allocator won't
+ * reallocate our ASID without invalidating the entire TLB.
*/
- if (tlb->fullmm)
+ if (tlb->fullmm) {
+ if (!last_level)
+ flush_tlb_mm(tlb->mm);
return;
+ }
- /*
- * The intermediate page table levels are already handled by
- * the __(pte|pmd|pud)_free_tlb() functions, so last level
- * TLBI is sufficient here.
- */
- __flush_tlb_range(&vma, tlb->start, tlb->end, true);
+ __flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long addr)
{
- __flush_tlb_pgtable(tlb->mm, addr);
pgtable_page_dtor(pte);
- tlb_remove_entry(tlb, pte);
+ tlb_remove_table(tlb, pte);
}
#if CONFIG_PGTABLE_LEVELS > 2
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
- __flush_tlb_pgtable(tlb->mm, addr);
- tlb_remove_entry(tlb, virt_to_page(pmdp));
+ tlb_remove_table(tlb, virt_to_page(pmdp));
}
#endif
@@ -77,8 +70,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
unsigned long addr)
{
- __flush_tlb_pgtable(tlb->mm, addr);
- tlb_remove_entry(tlb, virt_to_page(pudp));
+ tlb_remove_table(tlb, virt_to_page(pudp));
}
#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index a4a1901140ee..c3c0387aee18 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -70,43 +70,73 @@
})
/*
- * TLB Management
- * ==============
+ * TLB Invalidation
+ * ================
*
- * The TLB specific code is expected to perform whatever tests it needs
- * to determine if it should invalidate the TLB for each call. Start
- * addresses are inclusive and end addresses are exclusive; it is safe to
- * round these addresses down.
+ * This header file implements the low-level TLB invalidation routines
+ * (sometimes referred to as "flushing" in the kernel) for arm64.
*
- * flush_tlb_all()
+ * Every invalidation operation uses the following template:
+ *
+ * DSB ISHST // Ensure prior page-table updates have completed
+ * TLBI ... // Invalidate the TLB
+ * DSB ISH // Ensure the TLB invalidation has completed
+ * if (invalidated kernel mappings)
+ * ISB // Discard any instructions fetched from the old mapping
+ *
+ *
+ * The following functions form part of the "core" TLB invalidation API,
+ * as documented in Documentation/core-api/cachetlb.rst:
*
- * Invalidate the entire TLB.
+ * flush_tlb_all()
+ * Invalidate the entire TLB (kernel + user) on all CPUs
*
* flush_tlb_mm(mm)
+ * Invalidate an entire user address space on all CPUs.
+ * The 'mm' argument identifies the ASID to invalidate.
+ *
+ * flush_tlb_range(vma, start, end)
+ * Invalidate the virtual-address range '[start, end)' on all
+ * CPUs for the user address space corresponding to 'vma->mm'.
+ * Note that this operation also invalidates any walk-cache
+ * entries associated with translations for the specified address
+ * range.
+ *
+ * flush_tlb_kernel_range(start, end)
+ * Same as flush_tlb_range(..., start, end), but applies to
+ * kernel mappings rather than a particular user address space.
+ * Whilst not explicitly documented, this function is used when
+ * unmapping pages from vmalloc/io space.
+ *
+ * flush_tlb_page(vma, addr)
+ * Invalidate a single user mapping for address 'addr' in the
+ * address space corresponding to 'vma->mm'. Note that this
+ * operation only invalidates a single, last-level page-table
+ * entry and therefore does not affect any walk-caches.
*
- * Invalidate all TLB entries in a particular address space.
- * - mm - mm_struct describing address space
*
- * flush_tlb_range(mm,start,end)
+ * Next, we have some undocumented invalidation routines that you probably
+ * don't want to call unless you know what you're doing:
*
- * Invalidate a range of TLB entries in the specified address
- * space.
- * - mm - mm_struct describing address space
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
+ * local_flush_tlb_all()
+ * Same as flush_tlb_all(), but only applies to the calling CPU.
*
- * flush_tlb_page(vaddr,vma)
+ * __flush_tlb_kernel_pgtable(addr)
+ * Invalidate a single kernel mapping for address 'addr' on all
+ * CPUs, ensuring that any walk-cache entries associated with the
+ * translation are also invalidated.
*
- * Invalidate the specified page in the specified address range.
- * - vaddr - virtual address (may not be aligned)
- * - vma - vma_struct describing address range
+ * __flush_tlb_range(vma, start, end, stride, last_level)
+ * Invalidate the virtual-address range '[start, end)' on all
+ * CPUs for the user address space corresponding to 'vma->mm'.
+ * The invalidation operations are issued at a granularity
+ * determined by 'stride' and only affect any walk-cache entries
+ * if 'last_level' is equal to false.
*
- * flush_kern_tlb_page(kaddr)
*
- * Invalidate the TLB entry for the specified page. The address
- * will be in the kernels virtual memory space. Current uses
- * only require the D-TLB to be invalidated.
- * - kaddr - Kernel virtual memory address
+ * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
+ * on top of these routines, since that is our interface to the mmu_gather
+ * API as used by munmap() and friends.
*/
static inline void local_flush_tlb_all(void)
{
@@ -149,25 +179,28 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
* necessarily a performance improvement.
*/
-#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
+#define MAX_TLBI_OPS 1024UL
static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
- bool last_level)
+ unsigned long stride, bool last_level)
{
unsigned long asid = ASID(vma->vm_mm);
unsigned long addr;
- if ((end - start) > MAX_TLB_RANGE) {
+ if ((end - start) > (MAX_TLBI_OPS * stride)) {
flush_tlb_mm(vma->vm_mm);
return;
}
+ /* Convert the stride into units of 4k */
+ stride >>= 12;
+
start = __TLBI_VADDR(start, asid);
end = __TLBI_VADDR(end, asid);
dsb(ishst);
- for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+ for (addr = start; addr < end; addr += stride) {
if (last_level) {
__tlbi(vale1is, addr);
__tlbi_user(vale1is, addr);
@@ -182,14 +215,18 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- __flush_tlb_range(vma, start, end, false);
+ /*
+ * We cannot use leaf-only invalidation here, since we may be invalidating
+ * table entries as part of collapsing hugepages or moving page tables.
+ */
+ __flush_tlb_range(vma, start, end, PAGE_SIZE, false);
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long addr;
- if ((end - start) > MAX_TLB_RANGE) {
+ if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
flush_tlb_all();
return;
}
@@ -199,7 +236,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
dsb(ishst);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
- __tlbi(vaae1is, addr);
+ __tlbi(vaale1is, addr);
dsb(ish);
isb();
}
@@ -208,20 +245,11 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
* table levels (pgd/pud/pmd).
*/
-static inline void __flush_tlb_pgtable(struct mm_struct *mm,
- unsigned long uaddr)
-{
- unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm));
-
- __tlbi(vae1is, addr);
- __tlbi_user(vae1is, addr);
- dsb(ish);
-}
-
static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
{
unsigned long addr = __TLBI_VADDR(kaddr, 0);
+ dsb(ishst);
__tlbi(vaae1is, addr);
dsb(ish);
}
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index e66b0fca99c2..07c34087bd5e 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -32,7 +32,6 @@
#include <asm/cpufeature.h>
#include <asm/ptrace.h>
#include <asm/memory.h>
-#include <asm/compiler.h>
#include <asm/extable.h>
#define get_ds() (KERNEL_DS)
diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h
index 4e22b7a8c038..2788e95d0ff0 100644
--- a/arch/arm64/include/asm/xen/events.h
+++ b/arch/arm64/include/asm/xen/events.h
@@ -14,7 +14,7 @@ enum ipi_vector {
static inline int xen_irqs_disabled(struct pt_regs *regs)
{
- return raw_irqs_disabled_flags((unsigned long) regs->pstate);
+ return !interrupts_enabled(regs);
}
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 17c65c8f33cb..2bcd6e4f3474 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -48,5 +48,6 @@
#define HWCAP_USCAT (1 << 25)
#define HWCAP_ILRCPC (1 << 26)
#define HWCAP_FLAGM (1 << 27)
+#define HWCAP_SSBS (1 << 28)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 98c4ce55d9c3..a36227fdb084 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -46,6 +46,7 @@
#define PSR_I_BIT 0x00000080
#define PSR_A_BIT 0x00000100
#define PSR_D_BIT 0x00000200
+#define PSR_SSBS_BIT 0x00001000
#define PSR_PAN_BIT 0x00400000
#define PSR_UAO_BIT 0x00800000
#define PSR_V_BIT 0x10000000
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 95ac7374d723..4c8b13bede80 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -54,6 +54,7 @@ arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+arm64-obj-$(CONFIG_CRASH_CORE) += crash_core.o
arm64-obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o
arm64-obj-$(CONFIG_ARM64_SSBD) += ssbd.o
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index dec10898d688..a509e35132d2 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -68,21 +68,43 @@ static bool
has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
int scope)
{
- u64 mask = CTR_CACHE_MINLINE_MASK;
-
- /* Skip matching the min line sizes for cache type check */
- if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
- mask ^= arm64_ftr_reg_ctrel0.strict_mask;
+ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
+ u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
+ u64 ctr_raw, ctr_real;
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return (read_cpuid_cachetype() & mask) !=
- (arm64_ftr_reg_ctrel0.sys_val & mask);
+
+ /*
+ * We want to make sure that all the CPUs in the system expose
+ * a consistent CTR_EL0 to make sure that applications behaves
+ * correctly with migration.
+ *
+ * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
+ *
+ * 1) It is safe if the system doesn't support IDC, as CPU anyway
+ * reports IDC = 0, consistent with the rest.
+ *
+ * 2) If the system has IDC, it is still safe as we trap CTR_EL0
+ * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
+ *
+ * So, we need to make sure either the raw CTR_EL0 or the effective
+ * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
+ */
+ ctr_raw = read_cpuid_cachetype() & mask;
+ ctr_real = read_cpuid_effective_cachetype() & mask;
+
+ return (ctr_real != sys) && (ctr_raw != sys);
}
static void
cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
{
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
+ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
+
+ /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
+ if ((read_cpuid_cachetype() & mask) !=
+ (arm64_ftr_reg_ctrel0.sys_val & mask))
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
@@ -116,6 +138,15 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
static DEFINE_SPINLOCK(bp_lock);
int cpu, slot = -1;
+ /*
+ * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs
+ * start/end if we're a guest. Skip the hyp-vectors work.
+ */
+ if (!hyp_vecs_start) {
+ __this_cpu_write(bp_hardening_data.fn, fn);
+ return;
+ }
+
spin_lock(&bp_lock);
for_each_possible_cpu(cpu) {
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
@@ -312,6 +343,14 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
void arm64_set_ssbd_mitigation(bool state)
{
+ if (this_cpu_has_cap(ARM64_SSBS)) {
+ if (state)
+ asm volatile(SET_PSTATE_SSBS(0));
+ else
+ asm volatile(SET_PSTATE_SSBS(1));
+ return;
+ }
+
switch (psci_ops.conduit) {
case PSCI_CONDUIT_HVC:
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
@@ -336,6 +375,11 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ if (this_cpu_has_cap(ARM64_SSBS)) {
+ required = false;
+ goto out_printmsg;
+ }
+
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
ssbd_state = ARM64_SSBD_UNKNOWN;
return false;
@@ -384,7 +428,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
switch (ssbd_state) {
case ARM64_SSBD_FORCE_DISABLE:
- pr_info_once("%s disabled from command-line\n", entry->desc);
arm64_set_ssbd_mitigation(false);
required = false;
break;
@@ -397,7 +440,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
break;
case ARM64_SSBD_FORCE_ENABLE:
- pr_info_once("%s forced from command-line\n", entry->desc);
arm64_set_ssbd_mitigation(true);
required = true;
break;
@@ -407,10 +449,27 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
break;
}
+out_printmsg:
+ switch (ssbd_state) {
+ case ARM64_SSBD_FORCE_DISABLE:
+ pr_info_once("%s disabled from command-line\n", entry->desc);
+ break;
+
+ case ARM64_SSBD_FORCE_ENABLE:
+ pr_info_once("%s forced from command-line\n", entry->desc);
+ break;
+ }
+
return required;
}
#endif /* CONFIG_ARM64_SSBD */
+static void __maybe_unused
+cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
+{
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
+}
+
#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
.matches = is_affected_midr_range, \
.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
@@ -616,14 +675,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
},
#endif
{
- .desc = "Mismatched cache line size",
- .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
- .matches = has_mismatched_cache_type,
- .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
- .cpu_enable = cpu_enable_trap_ctr_access,
- },
- {
- .desc = "Mismatched cache type",
+ .desc = "Mismatched cache type (CTR_EL0)",
.capability = ARM64_MISMATCHED_CACHE_TYPE,
.matches = has_mismatched_cache_type,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
@@ -680,6 +732,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_ssbd_mitigation,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+ {
+ /* Cortex-A76 r0p0 to r2p0 */
+ .desc = "ARM erratum 1188873",
+ .capability = ARM64_WORKAROUND_1188873,
+ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index e238b7932096..af50064dea51 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -20,6 +20,7 @@
#include <linux/bsearch.h>
#include <linux/cpumask.h>
+#include <linux/crash_dump.h>
#include <linux/sort.h>
#include <linux/stop_machine.h>
#include <linux/types.h>
@@ -117,6 +118,7 @@ EXPORT_SYMBOL(cpu_hwcap_keys);
static bool __maybe_unused
cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
+static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
/*
* NOTE: Any changes to the visibility of features should be kept in
@@ -164,6 +166,11 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_END,
};
+static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
+ ARM64_FTR_END,
+};
+
static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
@@ -371,7 +378,7 @@ static const struct __ftr_reg_entry {
/* Op1 = 0, CRn = 0, CRm = 4 */
ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
- ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
+ ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
/* Op1 = 0, CRn = 0, CRm = 5 */
@@ -657,7 +664,6 @@ void update_cpu_features(int cpu,
/*
* EL3 is not our concern.
- * ID_AA64PFR1 is currently RES0.
*/
taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
@@ -848,15 +854,55 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
}
static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
- int __unused)
+ int scope)
{
- return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT);
+ u64 ctr;
+
+ if (scope == SCOPE_SYSTEM)
+ ctr = arm64_ftr_reg_ctrel0.sys_val;
+ else
+ ctr = read_cpuid_effective_cachetype();
+
+ return ctr & BIT(CTR_IDC_SHIFT);
+}
+
+static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
+ * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
+ * to the CTR_EL0 on this CPU and emulate it with the real/safe
+ * value.
+ */
+ if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
}
static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
- int __unused)
+ int scope)
{
- return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT);
+ u64 ctr;
+
+ if (scope == SCOPE_SYSTEM)
+ ctr = arm64_ftr_reg_ctrel0.sys_val;
+ else
+ ctr = read_cpuid_cachetype();
+
+ return ctr & BIT(CTR_DIC_SHIFT);
+}
+
+static bool __maybe_unused
+has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
+{
+ /*
+ * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
+ * may share TLB entries with a CPU stuck in the crashed
+ * kernel.
+ */
+ if (is_kdump_kernel())
+ return false;
+
+ return has_cpuid_feature(entry, scope);
}
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
@@ -1035,6 +1081,70 @@ static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
WARN_ON(val & (7 << 27 | 7 << 21));
}
+#ifdef CONFIG_ARM64_SSBD
+static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
+{
+ if (user_mode(regs))
+ return 1;
+
+ if (instr & BIT(PSTATE_Imm_shift))
+ regs->pstate |= PSR_SSBS_BIT;
+ else
+ regs->pstate &= ~PSR_SSBS_BIT;
+
+ arm64_skip_faulting_instruction(regs, 4);
+ return 0;
+}
+
+static struct undef_hook ssbs_emulation_hook = {
+ .instr_mask = ~(1U << PSTATE_Imm_shift),
+ .instr_val = 0xd500401f | PSTATE_SSBS,
+ .fn = ssbs_emulation_handler,
+};
+
+static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
+{
+ static bool undef_hook_registered = false;
+ static DEFINE_SPINLOCK(hook_lock);
+
+ spin_lock(&hook_lock);
+ if (!undef_hook_registered) {
+ register_undef_hook(&ssbs_emulation_hook);
+ undef_hook_registered = true;
+ }
+ spin_unlock(&hook_lock);
+
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+ arm64_set_ssbd_mitigation(false);
+ } else {
+ arm64_set_ssbd_mitigation(true);
+ }
+}
+#endif /* CONFIG_ARM64_SSBD */
+
+#ifdef CONFIG_ARM64_PAN
+static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
+{
+ /*
+ * We modify PSTATE. This won't work from irq context as the PSTATE
+ * is discarded once we return from the exception.
+ */
+ WARN_ON_ONCE(in_interrupt());
+
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
+ asm(SET_PSTATE_PAN(1));
+}
+#endif /* CONFIG_ARM64_PAN */
+
+#ifdef CONFIG_ARM64_RAS_EXTN
+static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
+{
+ /* Firmware may have left a deferred SError in this register. */
+ write_sysreg_s(0, SYS_DISR_EL1);
+}
+#endif /* CONFIG_ARM64_RAS_EXTN */
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -1184,6 +1294,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.capability = ARM64_HAS_CACHE_IDC,
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
.matches = has_cache_idc,
+ .cpu_enable = cpu_emulate_effective_ctr,
},
{
.desc = "Instruction cache invalidation not required for I/D coherence",
@@ -1222,6 +1333,41 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.cpu_enable = cpu_enable_hw_dbm,
},
#endif
+#ifdef CONFIG_ARM64_SSBD
+ {
+ .desc = "CRC32 instructions",
+ .capability = ARM64_HAS_CRC32,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64ISAR0_EL1,
+ .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
+ .min_field_value = 1,
+ },
+ {
+ .desc = "Speculative Store Bypassing Safe (SSBS)",
+ .capability = ARM64_SSBS,
+ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR1_EL1,
+ .field_pos = ID_AA64PFR1_SSBS_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
+ .cpu_enable = cpu_enable_ssbs,
+ },
+#endif
+#ifdef CONFIG_ARM64_CNP
+ {
+ .desc = "Common not Private translations",
+ .capability = ARM64_HAS_CNP,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_useable_cnp,
+ .sys_reg = SYS_ID_AA64MMFR2_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64MMFR2_CNP_SHIFT,
+ .min_field_value = 1,
+ .cpu_enable = cpu_enable_cnp,
+ },
+#endif
{},
};
@@ -1267,6 +1413,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
#ifdef CONFIG_ARM64_SVE
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
#endif
+ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
{},
};
@@ -1658,6 +1805,11 @@ cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
}
+static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
+{
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+}
+
/*
* We emulate only the following system register space.
* Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
@@ -1719,27 +1871,32 @@ static int emulate_sys_reg(u32 id, u64 *valp)
return 0;
}
-static int emulate_mrs(struct pt_regs *regs, u32 insn)
+int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
{
int rc;
- u32 sys_reg, dst;
u64 val;
- /*
- * sys_reg values are defined as used in mrs/msr instruction.
- * shift the imm value to get the encoding.
- */
- sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
rc = emulate_sys_reg(sys_reg, &val);
if (!rc) {
- dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
- pt_regs_write_reg(regs, dst, val);
+ pt_regs_write_reg(regs, rt, val);
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
-
return rc;
}
+static int emulate_mrs(struct pt_regs *regs, u32 insn)
+{
+ u32 sys_reg, rt;
+
+ /*
+ * sys_reg values are defined as used in mrs/msr instruction.
+ * shift the imm value to get the encoding.
+ */
+ sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
+ rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
+ return do_emulate_mrs(regs, sys_reg, rt);
+}
+
static struct undef_hook mrs_hook = {
.instr_mask = 0xfff00000,
.instr_val = 0xd5300000,
@@ -1755,9 +1912,3 @@ static int __init enable_mrs_emulation(void)
}
core_initcall(enable_mrs_emulation);
-
-void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
-{
- /* Firmware may have left a deferred SError in this register. */
- write_sysreg_s(0, SYS_DISR_EL1);
-}
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index e9ab7b3ed317..bcc2831399cb 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -81,6 +81,7 @@ static const char *const hwcap_str[] = {
"uscat",
"ilrcpc",
"flagm",
+ "ssbs",
NULL
};
@@ -324,7 +325,15 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
{
info->reg_cntfrq = arch_timer_get_cntfrq();
- info->reg_ctr = read_cpuid_cachetype();
+ /*
+ * Use the effective value of the CTR_EL0 than the raw value
+ * exposed by the CPU. CTR_E0.IDC field value must be interpreted
+ * with the CLIDR_EL1 fields to avoid triggering false warnings
+ * when there is a mismatch across the CPUs. Keep track of the
+ * effective value of the CTR_EL0 in our internal records for
+ * acurate sanity check and feature enablement.
+ */
+ info->reg_ctr = read_cpuid_effective_cachetype();
info->reg_dczid = read_cpuid(DCZID_EL0);
info->reg_midr = read_cpuid_id();
info->reg_revidr = read_cpuid(REVIDR_EL1);
diff --git a/arch/arm64/kernel/crash_core.c b/arch/arm64/kernel/crash_core.c
new file mode 100644
index 000000000000..ca4c3e12d8c5
--- /dev/null
+++ b/arch/arm64/kernel/crash_core.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Linaro.
+ * Copyright (C) Huawei Futurewei Technologies.
+ */
+
+#include <linux/crash_core.h>
+#include <asm/memory.h>
+
+void arch_crash_save_vmcoreinfo(void)
+{
+ VMCOREINFO_NUMBER(VA_BITS);
+ /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
+ vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
+ kimage_voffset);
+ vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
+ PHYS_OFFSET);
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
+}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 09dbea221a27..039144ecbcb2 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -589,7 +589,7 @@ el1_undef:
inherit_daif pstate=x23, tmp=x2
mov x0, sp
bl do_undefinstr
- ASM_BUG()
+ kernel_exit 1
el1_dbg:
/*
* Debug exception handling
@@ -665,6 +665,7 @@ el0_sync:
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
+ ccmp x24, #ESR_ELx_EC_WFx, #4, ne
b.eq el0_sys
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc
@@ -697,9 +698,9 @@ el0_sync_compat:
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
- b.eq el0_undef
+ b.eq el0_cp15
cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
- b.eq el0_undef
+ b.eq el0_cp15
cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
@@ -722,6 +723,17 @@ el0_irq_compat:
el0_error_compat:
kernel_entry 0, 32
b el0_error_naked
+
+el0_cp15:
+ /*
+ * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
+ */
+ enable_daif
+ ct_user_exit
+ mov x0, x25
+ mov x1, sp
+ bl do_cp15instr
+ b ret_to_user
#endif
el0_da:
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b0853069702f..4471f570a295 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -287,19 +287,21 @@ __create_page_tables:
mov x28, lr
/*
- * Invalidate the idmap and swapper page tables to avoid potential
- * dirty cache lines being evicted.
+ * Invalidate the init page tables to avoid potential dirty cache lines
+ * being evicted. Other page tables are allocated in rodata as part of
+ * the kernel image, and thus are clean to the PoC per the boot
+ * protocol.
*/
- adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
+ adrp x0, init_pg_dir
+ adrp x1, init_pg_end
sub x1, x1, x0
bl __inval_dcache_area
/*
- * Clear the idmap and swapper page tables.
+ * Clear the init page tables.
*/
- adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
+ adrp x0, init_pg_dir
+ adrp x1, init_pg_end
sub x1, x1, x0
1: stp xzr, xzr, [x0], #16
stp xzr, xzr, [x0], #16
@@ -373,7 +375,7 @@ __create_page_tables:
/*
* Map the kernel image (starting with PHYS_OFFSET).
*/
- adrp x0, swapper_pg_dir
+ adrp x0, init_pg_dir
mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
add x5, x5, x23 // add KASLR displacement
mov x4, PTRS_PER_PGD
@@ -390,7 +392,7 @@ __create_page_tables:
* tables again to remove any speculatively loaded cache lines.
*/
adrp x0, idmap_pg_dir
- adrp x1, swapper_pg_end
+ adrp x1, init_pg_end
sub x1, x1, x0
dmb sy
bl __inval_dcache_area
@@ -706,6 +708,7 @@ secondary_startup:
* Common entry point for secondary CPUs.
*/
bl __cpu_setup // initialise processor
+ adrp x1, swapper_pg_dir
bl __enable_mmu
ldr x8, =__secondary_switched
br x8
@@ -748,6 +751,7 @@ ENDPROC(__secondary_switched)
* Enable the MMU.
*
* x0 = SCTLR_EL1 value for turning on the MMU.
+ * x1 = TTBR1_EL1 value
*
* Returns to the caller via x30/lr. This requires the caller to be covered
* by the .idmap.text section.
@@ -756,17 +760,16 @@ ENDPROC(__secondary_switched)
* If it isn't, park the CPU
*/
ENTRY(__enable_mmu)
- mrs x1, ID_AA64MMFR0_EL1
- ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
+ mrs x2, ID_AA64MMFR0_EL1
+ ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
b.ne __no_granule_support
- update_early_cpu_boot_status 0, x1, x2
- adrp x1, idmap_pg_dir
- adrp x2, swapper_pg_dir
- phys_to_ttbr x3, x1
- phys_to_ttbr x4, x2
- msr ttbr0_el1, x3 // load TTBR0
- msr ttbr1_el1, x4 // load TTBR1
+ update_early_cpu_boot_status 0, x2, x3
+ adrp x2, idmap_pg_dir
+ phys_to_ttbr x1, x1
+ phys_to_ttbr x2, x2
+ msr ttbr0_el1, x2 // load TTBR0
+ msr ttbr1_el1, x1 // load TTBR1
isb
msr sctlr_el1, x0
isb
@@ -823,6 +826,7 @@ __primary_switch:
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
#endif
+ adrp x1, init_pg_dir
bl __enable_mmu
#ifdef CONFIG_RELOCATABLE
bl __relocate_kernel
diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
index f6a5c6bc1434..922add8adb74 100644
--- a/arch/arm64/kernel/machine_kexec.c
+++ b/arch/arm64/kernel/machine_kexec.c
@@ -358,14 +358,3 @@ void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
}
}
#endif /* CONFIG_HIBERNATION */
-
-void arch_crash_save_vmcoreinfo(void)
-{
- VMCOREINFO_NUMBER(VA_BITS);
- /* Please note VMCOREINFO_NUMBER() uses "%d", not "%x" */
- vmcoreinfo_append_str("NUMBER(kimage_voffset)=0x%llx\n",
- kimage_voffset);
- vmcoreinfo_append_str("NUMBER(PHYS_OFFSET)=0x%llx\n",
- PHYS_OFFSET);
- vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
-}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 8e38d5267f22..e213f8e867f6 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -966,6 +966,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
return 0;
}
+static int armv8pmu_filter_match(struct perf_event *event)
+{
+ unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT;
+ return evtype != ARMV8_PMUV3_PERFCTR_CHAIN;
+}
+
static void armv8pmu_reset(void *info)
{
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
@@ -1114,6 +1120,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->stop = armv8pmu_stop,
cpu_pmu->reset = armv8pmu_reset,
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
+ cpu_pmu->filter_match = armv8pmu_filter_match;
return 0;
}
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index e78c3ef04d95..9b65132e789a 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -107,7 +107,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
if (!p->ainsn.api.insn)
return -ENOMEM;
break;
- };
+ }
/* prepare the instruction */
if (p->ainsn.api.insn)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7f1628effe6d..ce99c58cd1f1 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -358,6 +358,10 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
if (IS_ENABLED(CONFIG_ARM64_UAO) &&
cpus_have_const_cap(ARM64_HAS_UAO))
childregs->pstate |= PSR_UAO_BIT;
+
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+ childregs->pstate |= PSR_SSBS_BIT;
+
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index e8edbf13302a..8cdaf25e99cd 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -24,7 +24,6 @@
#include <uapi/linux/psci.h>
-#include <asm/compiler.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/smp_plat.h>
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 5b4fac434c84..d0f62dd24c90 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -64,6 +64,9 @@
#include <asm/xen/hypervisor.h>
#include <asm/mmu_context.h>
+static int num_standard_resources;
+static struct resource *standard_resources;
+
phys_addr_t __fdt_pointer __initdata;
/*
@@ -206,14 +209,19 @@ static void __init request_standard_resources(void)
{
struct memblock_region *region;
struct resource *res;
+ unsigned long i = 0;
kernel_code.start = __pa_symbol(_text);
kernel_code.end = __pa_symbol(__init_begin - 1);
kernel_data.start = __pa_symbol(_sdata);
kernel_data.end = __pa_symbol(_end - 1);
+ num_standard_resources = memblock.memory.cnt;
+ standard_resources = alloc_bootmem_low(num_standard_resources *
+ sizeof(*standard_resources));
+
for_each_memblock(memory, region) {
- res = alloc_bootmem_low(sizeof(*res));
+ res = &standard_resources[i++];
if (memblock_is_nomap(region)) {
res->name = "reserved";
res->flags = IORESOURCE_MEM;
@@ -243,36 +251,26 @@ static void __init request_standard_resources(void)
static int __init reserve_memblock_reserved_regions(void)
{
- phys_addr_t start, end, roundup_end = 0;
- struct resource *mem, *res;
- u64 i;
-
- for_each_reserved_mem_region(i, &start, &end) {
- if (end <= roundup_end)
- continue; /* done already */
-
- start = __pfn_to_phys(PFN_DOWN(start));
- end = __pfn_to_phys(PFN_UP(end)) - 1;
- roundup_end = end;
-
- res = kzalloc(sizeof(*res), GFP_ATOMIC);
- if (WARN_ON(!res))
- return -ENOMEM;
- res->start = start;
- res->end = end;
- res->name = "reserved";
- res->flags = IORESOURCE_MEM;
-
- mem = request_resource_conflict(&iomem_resource, res);
- /*
- * We expected memblock_reserve() regions to conflict with
- * memory created by request_standard_resources().
- */
- if (WARN_ON_ONCE(!mem))
+ u64 i, j;
+
+ for (i = 0; i < num_standard_resources; ++i) {
+ struct resource *mem = &standard_resources[i];
+ phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+ if (!memblock_is_region_reserved(mem->start, mem_size))
continue;
- kfree(res);
- reserve_region_with_split(mem, start, end, "reserved");
+ for_each_reserved_mem_region(j, &r_start, &r_end) {
+ resource_size_t start, end;
+
+ start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+ end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+ if (start > mem->end || end < mem->start)
+ continue;
+
+ reserve_region_with_split(mem, start, end, "reserved");
+ }
}
return 0;
@@ -351,12 +349,8 @@ void __init setup_arch(char **cmdline_p)
#endif
#ifdef CONFIG_VT
-#if defined(CONFIG_VGA_CONSOLE)
- conswitchp = &vga_con;
-#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
-#endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {
pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index bebec8ef9372..3e53ffa07994 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,6 +101,7 @@ ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
+ adrp x1, swapper_pg_dir
bl __enable_mmu
ldr x8, =_cpu_resume
br x8
diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
index 3432e5ef9f41..885f13e58708 100644
--- a/arch/arm64/kernel/ssbd.c
+++ b/arch/arm64/kernel/ssbd.c
@@ -3,17 +3,33 @@
* Copyright (C) 2018 ARM Ltd, All Rights Reserved.
*/
+#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
#include <asm/cpufeature.h>
+static void ssbd_ssbs_enable(struct task_struct *task)
+{
+ u64 val = is_compat_thread(task_thread_info(task)) ?
+ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+ task_pt_regs(task)->pstate |= val;
+}
+
+static void ssbd_ssbs_disable(struct task_struct *task)
+{
+ u64 val = is_compat_thread(task_thread_info(task)) ?
+ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
+
+ task_pt_regs(task)->pstate &= ~val;
+}
+
/*
* prctl interface for SSBD
- * FIXME: Drop the below ifdefery once merged in 4.18.
*/
-#ifdef PR_SPEC_STORE_BYPASS
static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
{
int state = arm64_get_ssbd_state();
@@ -46,12 +62,14 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
return -EPERM;
task_clear_spec_ssb_disable(task);
clear_tsk_thread_flag(task, TIF_SSBD);
+ ssbd_ssbs_enable(task);
break;
case PR_SPEC_DISABLE:
if (state == ARM64_SSBD_FORCE_DISABLE)
return -EPERM;
task_set_spec_ssb_disable(task);
set_tsk_thread_flag(task, TIF_SSBD);
+ ssbd_ssbs_disable(task);
break;
case PR_SPEC_FORCE_DISABLE:
if (state == ARM64_SSBD_FORCE_DISABLE)
@@ -59,6 +77,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
task_set_spec_ssb_disable(task);
task_set_spec_ssb_force_disable(task);
set_tsk_thread_flag(task, TIF_SSBD);
+ ssbd_ssbs_disable(task);
break;
default:
return -ERANGE;
@@ -107,4 +126,3 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
return -ENODEV;
}
}
-#endif /* PR_SPEC_STORE_BYPASS */
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 70c283368b64..9405d1b7f4b0 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -48,6 +48,10 @@ void notrace __cpu_suspend_exit(void)
*/
cpu_uninstall_idmap();
+ /* Restore CnP bit in TTBR1_EL1 */
+ if (system_supports_cnp())
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+
/*
* PSTATE was not saved over suspend/resume, re-enable any detected
* features that might not have been set correctly.
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 039e9ff379cc..4066da7f1e5e 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -310,10 +310,12 @@ static int call_undef_hook(struct pt_regs *regs)
int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
void __user *pc = (void __user *)instruction_pointer(regs);
- if (!user_mode(regs))
- return 1;
-
- if (compat_thumb_mode(regs)) {
+ if (!user_mode(regs)) {
+ __le32 instr_le;
+ if (probe_kernel_address((__force __le32 *)pc, instr_le))
+ goto exit;
+ instr = le32_to_cpu(instr_le);
+ } else if (compat_thumb_mode(regs)) {
/* 16-bit Thumb instruction */
__le16 instr_le;
if (get_user(instr_le, (__le16 __user *)pc))
@@ -352,6 +354,9 @@ void force_signal_inject(int signal, int code, unsigned long address)
const char *desc;
struct pt_regs *regs = current_pt_regs();
+ if (WARN_ON(!user_mode(regs)))
+ return;
+
clear_siginfo(&info);
switch (signal) {
@@ -406,14 +411,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (call_undef_hook(regs) == 0)
return;
+ BUG_ON(!user_mode(regs));
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
}
-void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
-{
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
-}
-
#define __user_cache_maint(insn, address, res) \
if (address >= user_addr_max()) { \
res = -EFAULT; \
@@ -437,7 +438,7 @@ void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{
unsigned long address;
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
int ret = 0;
@@ -472,7 +473,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
{
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
pt_regs_write_reg(regs, rt, val);
@@ -482,7 +483,7 @@ static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
{
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
@@ -490,12 +491,28 @@ static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
{
- int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ int rt = ESR_ELx_SYS64_ISS_RT(esr);
pt_regs_write_reg(regs, rt, arch_timer_get_rate());
arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
}
+static void mrs_handler(unsigned int esr, struct pt_regs *regs)
+{
+ u32 sysreg, rt;
+
+ rt = ESR_ELx_SYS64_ISS_RT(esr);
+ sysreg = esr_sys64_to_sysreg(esr);
+
+ if (do_emulate_mrs(regs, sysreg, rt) != 0)
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
+}
+
+static void wfi_handler(unsigned int esr, struct pt_regs *regs)
+{
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+}
+
struct sys64_hook {
unsigned int esr_mask;
unsigned int esr_val;
@@ -526,9 +543,176 @@ static struct sys64_hook sys64_hooks[] = {
.esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
.handler = cntfrq_read_handler,
},
+ {
+ /* Trap read access to CPUID registers */
+ .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
+ .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
+ .handler = mrs_handler,
+ },
+ {
+ /* Trap WFI instructions executed in userspace */
+ .esr_mask = ESR_ELx_WFx_MASK,
+ .esr_val = ESR_ELx_WFx_WFI_VAL,
+ .handler = wfi_handler,
+ },
{},
};
+
+#ifdef CONFIG_COMPAT
+#define PSTATE_IT_1_0_SHIFT 25
+#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
+#define PSTATE_IT_7_2_SHIFT 10
+#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
+
+static u32 compat_get_it_state(struct pt_regs *regs)
+{
+ u32 it, pstate = regs->pstate;
+
+ it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
+ it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
+
+ return it;
+}
+
+static void compat_set_it_state(struct pt_regs *regs, u32 it)
+{
+ u32 pstate_it;
+
+ pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
+ pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
+
+ regs->pstate &= ~PSR_AA32_IT_MASK;
+ regs->pstate |= pstate_it;
+}
+
+static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
+{
+ int cond;
+
+ /* Only a T32 instruction can trap without CV being set */
+ if (!(esr & ESR_ELx_CV)) {
+ u32 it;
+
+ it = compat_get_it_state(regs);
+ if (!it)
+ return true;
+
+ cond = it >> 4;
+ } else {
+ cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
+ }
+
+ return aarch32_opcode_cond_checks[cond](regs->pstate);
+}
+
+static void advance_itstate(struct pt_regs *regs)
+{
+ u32 it;
+
+ /* ARM mode */
+ if (!(regs->pstate & PSR_AA32_T_BIT) ||
+ !(regs->pstate & PSR_AA32_IT_MASK))
+ return;
+
+ it = compat_get_it_state(regs);
+
+ /*
+ * If this is the last instruction of the block, wipe the IT
+ * state. Otherwise advance it.
+ */
+ if (!(it & 7))
+ it = 0;
+ else
+ it = (it & 0xe0) | ((it << 1) & 0x1f);
+
+ compat_set_it_state(regs, it);
+}
+
+static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
+ unsigned int sz)
+{
+ advance_itstate(regs);
+ arm64_skip_faulting_instruction(regs, sz);
+}
+
+static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
+
+ pt_regs_write_reg(regs, reg, arch_timer_get_rate());
+ arm64_compat_skip_faulting_instruction(regs, 4);
+}
+
+static struct sys64_hook cp15_32_hooks[] = {
+ {
+ .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
+ .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
+ .handler = compat_cntfrq_read_handler,
+ },
+ {},
+};
+
+static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
+{
+ int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
+ int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
+ u64 val = arch_counter_get_cntvct();
+
+ pt_regs_write_reg(regs, rt, lower_32_bits(val));
+ pt_regs_write_reg(regs, rt2, upper_32_bits(val));
+ arm64_compat_skip_faulting_instruction(regs, 4);
+}
+
+static struct sys64_hook cp15_64_hooks[] = {
+ {
+ .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
+ .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
+ .handler = compat_cntvct_read_handler,
+ },
+ {},
+};
+
+asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
+{
+ struct sys64_hook *hook, *hook_base;
+
+ if (!cp15_cond_valid(esr, regs)) {
+ /*
+ * There is no T16 variant of a CP access, so we
+ * always advance PC by 4 bytes.
+ */
+ arm64_compat_skip_faulting_instruction(regs, 4);
+ return;
+ }
+
+ switch (ESR_ELx_EC(esr)) {
+ case ESR_ELx_EC_CP15_32:
+ hook_base = cp15_32_hooks;
+ break;
+ case ESR_ELx_EC_CP15_64:
+ hook_base = cp15_64_hooks;
+ break;
+ default:
+ do_undefinstr(regs);
+ return;
+ }
+
+ for (hook = hook_base; hook->handler; hook++)
+ if ((hook->esr_mask & esr) == hook->esr_val) {
+ hook->handler(esr, regs);
+ return;
+ }
+
+ /*
+ * New cp15 instructions may previously have been undefined at
+ * EL0. Fall back to our usual undefined instruction handler
+ * so that we handle these consistently.
+ */
+ do_undefinstr(regs);
+}
+#endif
+
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
{
struct sys64_hook *hook;
@@ -605,7 +789,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
- die("Oops - bad mode", regs, 0);
local_daif_mask();
panic("bad mode");
}
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 605d1b60469c..ab29c06a7d4b 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -138,6 +138,23 @@ SECTIONS
EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */
NOTES
+ . = ALIGN(PAGE_SIZE);
+ idmap_pg_dir = .;
+ . += IDMAP_DIR_SIZE;
+
+#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ tramp_pg_dir = .;
+ . += PAGE_SIZE;
+#endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ reserved_ttbr0 = .;
+ . += RESERVED_TTBR0_SIZE;
+#endif
+ swapper_pg_dir = .;
+ . += PAGE_SIZE;
+ swapper_pg_end = .;
+
. = ALIGN(SEGMENT_ALIGN);
__init_begin = .;
__inittext_begin = .;
@@ -216,21 +233,9 @@ SECTIONS
BSS_SECTION(0, 0, 0)
. = ALIGN(PAGE_SIZE);
- idmap_pg_dir = .;
- . += IDMAP_DIR_SIZE;
-
-#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- tramp_pg_dir = .;
- . += PAGE_SIZE;
-#endif
-
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- reserved_ttbr0 = .;
- . += RESERVED_TTBR0_SIZE;
-#endif
- swapper_pg_dir = .;
- . += SWAPPER_DIR_SIZE;
- swapper_pg_end = .;
+ init_pg_dir = .;
+ . += INIT_DIR_SIZE;
+ init_pg_end = .;
__pecoff_data_size = ABSOLUTE(. - __initdata_begin);
_end = .;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 07256b08226c..a6c9fbaeaefc 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+ u64 off = core_reg_offset_from_id(reg->id);
+ int size;
+
+ switch (off) {
+ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+ KVM_REG_ARM_CORE_REG(regs.regs[30]):
+ case KVM_REG_ARM_CORE_REG(regs.sp):
+ case KVM_REG_ARM_CORE_REG(regs.pc):
+ case KVM_REG_ARM_CORE_REG(regs.pstate):
+ case KVM_REG_ARM_CORE_REG(sp_el1):
+ case KVM_REG_ARM_CORE_REG(elr_el1):
+ case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+ KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+ size = sizeof(__u64);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+ size = sizeof(__uint128_t);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+ size = sizeof(__u32);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (KVM_REG_SIZE(reg->id) == size &&
+ IS_ALIGNED(off, size / sizeof(__u32)))
+ return 0;
+
+ return -EINVAL;
+}
+
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/*
@@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
+ if (validate_core_offset(reg))
+ return -EINVAL;
+
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
return -EFAULT;
@@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
+ if (validate_core_offset(reg))
+ return -EINVAL;
+
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL;
@@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
- u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
+ u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
switch (mode) {
case PSR_AA32_MODE_USR:
+ if (!system_supports_32bit_el0())
+ return -EINVAL;
+ break;
case PSR_AA32_MODE_FIQ:
case PSR_AA32_MODE_IRQ:
case PSR_AA32_MODE_SVC:
case PSR_AA32_MODE_ABT:
case PSR_AA32_MODE_UND:
+ if (!vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
+ break;
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
+ if (vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
break;
default:
err = -EINVAL;
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index ea9225160786..4576b86a5579 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -65,6 +65,9 @@ __do_hyp_init:
b.lo __kvm_handle_stub_hvc
phys_to_ttbr x4, x0
+alternative_if ARM64_HAS_CNP
+ orr x4, x4, #TTBR_CNP_BIT
+alternative_else_nop_endif
msr ttbr0_el2, x4
mrs x4, tcr_el1
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index d496ef579859..ca46153d7915 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
val = read_sysreg(cpacr_el1);
val |= CPACR_EL1_TTA;
val &= ~CPACR_EL1_ZEN;
- if (!update_fp_enabled(vcpu))
+ if (!update_fp_enabled(vcpu)) {
val &= ~CPACR_EL1_FPEN;
+ __activate_traps_fpsimd32(vcpu);
+ }
write_sysreg(val, cpacr_el1);
@@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
val = CPTR_EL2_DEFAULT;
val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
- if (!update_fp_enabled(vcpu))
+ if (!update_fp_enabled(vcpu)) {
val |= CPTR_EL2_TFP;
+ __activate_traps_fpsimd32(vcpu);
+ }
write_sysreg(val, cptr_el2);
}
@@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
- __activate_traps_fpsimd32(vcpu);
if (has_vhe())
activate_traps_vhe(vcpu);
else
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 9ce223944983..76d016b446b2 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -288,3 +288,14 @@ void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
vcpu->arch.sysregs_loaded_on_cpu = false;
}
+
+void __hyp_text __kvm_enable_ssbs(void)
+{
+ u64 tmp;
+
+ asm volatile(
+ "mrs %0, sctlr_el2\n"
+ "orr %0, %0, %1\n"
+ "msr sctlr_el2, %0"
+ : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
+}
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 68755fd70dcf..69ff9887f724 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -12,7 +12,7 @@ lib-y := clear_user.o delay.o copy_from_user.o \
# when supported by the CPU. Result and argument registers are handled
# correctly, based on the function prototype.
lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
-CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
+CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \
-ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \
-ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
-fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
@@ -25,3 +25,5 @@ KCOV_INSTRUMENT_atomic_ll_sc.o := n
UBSAN_SANITIZE_atomic_ll_sc.o := n
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
+
+obj-$(CONFIG_CRC32) += crc32.o
diff --git a/arch/arm64/lib/crc32.S b/arch/arm64/lib/crc32.S
new file mode 100644
index 000000000000..5bc1e85b4e1c
--- /dev/null
+++ b/arch/arm64/lib/crc32.S
@@ -0,0 +1,60 @@
+/*
+ * Accelerated CRC32(C) using AArch64 CRC instructions
+ *
+ * Copyright (C) 2016 - 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/linkage.h>
+#include <asm/alternative.h>
+#include <asm/assembler.h>
+
+ .cpu generic+crc
+
+ .macro __crc32, c
+0: subs x2, x2, #16
+ b.mi 8f
+ ldp x3, x4, [x1], #16
+CPU_BE( rev x3, x3 )
+CPU_BE( rev x4, x4 )
+ crc32\c\()x w0, w0, x3
+ crc32\c\()x w0, w0, x4
+ b.ne 0b
+ ret
+
+8: tbz x2, #3, 4f
+ ldr x3, [x1], #8
+CPU_BE( rev x3, x3 )
+ crc32\c\()x w0, w0, x3
+4: tbz x2, #2, 2f
+ ldr w3, [x1], #4
+CPU_BE( rev w3, w3 )
+ crc32\c\()w w0, w0, w3
+2: tbz x2, #1, 1f
+ ldrh w3, [x1], #2
+CPU_BE( rev16 w3, w3 )
+ crc32\c\()h w0, w0, w3
+1: tbz x2, #0, 0f
+ ldrb w3, [x1]
+ crc32\c\()b w0, w0, w3
+0: ret
+ .endm
+
+ .align 5
+ENTRY(crc32_le)
+alternative_if_not ARM64_HAS_CRC32
+ b crc32_le_base
+alternative_else_nop_endif
+ __crc32
+ENDPROC(crc32_le)
+
+ .align 5
+ENTRY(__crc32c_le)
+alternative_if_not ARM64_HAS_CRC32
+ b __crc32c_le_base
+alternative_else_nop_endif
+ __crc32 c
+ENDPROC(__crc32c_le)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index c127f94da8e2..1f0ea2facf24 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -88,7 +88,7 @@ void verify_cpu_asid_bits(void)
}
}
-static void flush_context(unsigned int cpu)
+static void flush_context(void)
{
int i;
u64 asid;
@@ -142,7 +142,7 @@ static bool check_update_reserved_asid(u64 asid, u64 newasid)
return hit;
}
-static u64 new_context(struct mm_struct *mm, unsigned int cpu)
+static u64 new_context(struct mm_struct *mm)
{
static u32 cur_idx = 1;
u64 asid = atomic64_read(&mm->context.id);
@@ -180,7 +180,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
/* We're out of ASIDs, so increment the global generation count */
generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
&asid_generation);
- flush_context(cpu);
+ flush_context();
/* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
@@ -196,6 +196,9 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
unsigned long flags;
u64 asid, old_active_asid;
+ if (system_supports_cnp())
+ cpu_set_reserved_ttbr0();
+
asid = atomic64_read(&mm->context.id);
/*
@@ -223,7 +226,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
/* Check that our ASID belongs to the current generation. */
asid = atomic64_read(&mm->context.id);
if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
- asid = new_context(mm, cpu);
+ asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index 65dfc8571bf8..fcb1f2a6d7c6 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -36,8 +36,8 @@ static const struct addr_marker address_markers[] = {
#endif
{ MODULES_VADDR, "Modules start" },
{ MODULES_END, "Modules end" },
- { VMALLOC_START, "vmalloc() Area" },
- { VMALLOC_END, "vmalloc() End" },
+ { VMALLOC_START, "vmalloc() area" },
+ { VMALLOC_END, "vmalloc() end" },
{ FIXADDR_START, "Fixmap start" },
{ FIXADDR_TOP, "Fixmap end" },
{ PCI_IO_START, "PCI I/O start" },
@@ -46,7 +46,7 @@ static const struct addr_marker address_markers[] = {
{ VMEMMAP_START, "vmemmap start" },
{ VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" },
#endif
- { PAGE_OFFSET, "Linear Mapping" },
+ { PAGE_OFFSET, "Linear mapping" },
{ -1, NULL },
};
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 50b30ff30de4..d0e638ef3af6 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -37,6 +37,7 @@
#include <asm/cmpxchg.h>
#include <asm/cpufeature.h>
#include <asm/exception.h>
+#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/sysreg.h>
@@ -56,10 +57,16 @@ struct fault_info {
};
static const struct fault_info fault_info[];
+static struct fault_info debug_fault_info[];
static inline const struct fault_info *esr_to_fault_info(unsigned int esr)
{
- return fault_info + (esr & 63);
+ return fault_info + (esr & ESR_ELx_FSC);
+}
+
+static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr)
+{
+ return debug_fault_info + DBG_ESR_EVT(esr);
}
#ifdef CONFIG_KPROBES
@@ -235,9 +242,8 @@ static bool is_el1_instruction_abort(unsigned int esr)
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
-static inline bool is_el1_permission_fault(unsigned int esr,
- struct pt_regs *regs,
- unsigned long addr)
+static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
{
unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
@@ -283,7 +289,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
return;
- if (is_el1_permission_fault(esr, regs, addr)) {
+ if (is_el1_permission_fault(addr, esr, regs)) {
if (esr & ESR_ELx_WNR)
msg = "write to read-only memory";
else
@@ -454,7 +460,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < TASK_SIZE && is_el1_permission_fault(esr, regs, addr)) {
+ if (addr < TASK_SIZE && is_el1_permission_fault(addr, esr, regs)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die_kernel_fault("access to user memory with fs=KERNEL_DS",
@@ -771,7 +777,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
if (addr > TASK_SIZE)
arm64_apply_bp_hardening();
- local_irq_enable();
+ local_daif_restore(DAIF_PROCCTX);
do_mem_abort(addr, esr, regs);
}
@@ -785,7 +791,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
if (user_mode(regs)) {
if (instruction_pointer(regs) > TASK_SIZE)
arm64_apply_bp_hardening();
- local_irq_enable();
+ local_daif_restore(DAIF_PROCCTX);
}
clear_siginfo(&info);
@@ -831,7 +837,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{
- const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
+ const struct fault_info *inf = esr_to_debug_fault_info(esr);
int rv;
/*
@@ -864,17 +870,3 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
return rv;
}
NOKPROBE_SYMBOL(do_debug_exception);
-
-#ifdef CONFIG_ARM64_PAN
-void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
-{
- /*
- * We modify PSTATE. This won't work from irq context as the PSTATE
- * is discarded once we return from the exception.
- */
- WARN_ON_ONCE(in_interrupt());
-
- sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
- asm(SET_PSTATE_PAN(1));
-}
-#endif /* CONFIG_ARM64_PAN */
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 192b3ba07075..f58ea503ad01 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
/*
* If HW_AFDBM is enabled, then the HW could turn on
- * the dirty bit for any page in the set, so check
- * them all. All hugetlb entries are already young.
+ * the dirty or accessed bit for any page in the set,
+ * so check them all.
*/
if (pte_dirty(pte))
orig_pte = pte_mkdirty(orig_pte);
+
+ if (pte_young(pte))
+ orig_pte = pte_mkyoung(orig_pte);
}
if (valid) {
@@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
}
+/*
+ * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
+ * and write permission.
+ *
+ * For a contiguous huge pte range we need to check whether or not write
+ * permission has to change only on the first pte in the set. Then for
+ * all the contiguous ptes we need to check whether or not there is a
+ * discrepancy between dirty or young.
+ */
+static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
+{
+ int i;
+
+ if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
+ return 1;
+
+ for (i = 0; i < ncontig; i++) {
+ pte_t orig_pte = huge_ptep_get(ptep + i);
+
+ if (pte_dirty(pte) != pte_dirty(orig_pte))
+ return 1;
+
+ if (pte_young(pte) != pte_young(orig_pte))
+ return 1;
+ }
+
+ return 0;
+}
+
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t pte, int dirty)
{
- int ncontig, i, changed = 0;
+ int ncontig, i;
size_t pgsize = 0;
unsigned long pfn = pte_pfn(pte), dpfn;
pgprot_t hugeprot;
@@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
dpfn = pgsize >> PAGE_SHIFT;
+ if (!__cont_access_flags_changed(ptep, pte, ncontig))
+ return 0;
+
orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
- if (!pte_same(orig_pte, pte))
- changed = 1;
- /* Make sure we don't lose the dirty state */
+ /* Make sure we don't lose the dirty or young state */
if (pte_dirty(orig_pte))
pte = pte_mkdirty(pte);
+ if (pte_young(orig_pte))
+ pte = pte_mkyoung(pte);
+
hugeprot = pte_pgprot(pte);
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
- return changed;
+ return 1;
}
void huge_ptep_set_wrprotect(struct mm_struct *mm,
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 787e27964ab9..3cf87341859f 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -284,7 +284,6 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
#endif /* CONFIG_NUMA */
-#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
phys_addr_t addr = pfn << PAGE_SHIFT;
@@ -294,7 +293,6 @@ int pfn_valid(unsigned long pfn)
return memblock_is_map_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
-#endif
#ifndef CONFIG_SPARSEMEM
static void __init arm64_memory_present(void)
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 12145874c02b..fccb1a6f8c6f 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -192,7 +192,7 @@ void __init kasan_init(void)
/*
* We are going to perform proper setup of shadow memory.
- * At first we should unmap early shadow (clear_pgds() call bellow).
+ * At first we should unmap early shadow (clear_pgds() call below).
* However, instrumented code couldn't execute without shadow memory.
* tmp_pg_dir used to keep early shadow mapped until full shadow
* setup will be finished.
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 8080c9f489c3..9498c15b847b 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -67,6 +67,24 @@ static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+static DEFINE_SPINLOCK(swapper_pgdir_lock);
+
+void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+ pgd_t *fixmap_pgdp;
+
+ spin_lock(&swapper_pgdir_lock);
+ fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
+ WRITE_ONCE(*fixmap_pgdp, pgd);
+ /*
+ * We need dsb(ishst) here to ensure the page-table-walker sees
+ * our new entry before set_p?d() returns. The fixmap's
+ * flush_tlb_kernel_range() via clear_fixmap() does this for us.
+ */
+ pgd_clear_fixmap();
+ spin_unlock(&swapper_pgdir_lock);
+}
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -629,34 +647,18 @@ static void __init map_kernel(pgd_t *pgdp)
*/
void __init paging_init(void)
{
- phys_addr_t pgd_phys = early_pgtable_alloc();
- pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
+ pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
map_kernel(pgdp);
map_mem(pgdp);
- /*
- * We want to reuse the original swapper_pg_dir so we don't have to
- * communicate the new address to non-coherent secondaries in
- * secondary_entry, and so cpu_switch_mm can generate the address with
- * adrp+add rather than a load from some global variable.
- *
- * To do this we need to go via a temporary pgd.
- */
- cpu_replace_ttbr1(__va(pgd_phys));
- memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
- cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
-
pgd_clear_fixmap();
- memblock_free(pgd_phys, PAGE_SIZE);
- /*
- * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
- * allocated with it.
- */
- memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
- __pa_symbol(swapper_pg_end) - __pa_symbol(swapper_pg_dir)
- - PAGE_SIZE);
+ cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+ init_mm.pgd = swapper_pg_dir;
+
+ memblock_free(__pa_symbol(init_pg_dir),
+ __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
}
/*
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 146c04ceaa51..d7b66fc5e1c5 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -391,7 +391,6 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
- numa_free_distance();
ret = numa_alloc_distance();
if (ret < 0)
@@ -399,20 +398,24 @@ static int __init numa_init(int (*init_func)(void))
ret = init_func();
if (ret < 0)
- return ret;
+ goto out_free_distance;
if (nodes_empty(numa_nodes_parsed)) {
pr_info("No NUMA configuration found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out_free_distance;
}
ret = numa_register_nodes();
if (ret < 0)
- return ret;
+ goto out_free_distance;
setup_node_to_cpumask_map();
return 0;
+out_free_distance:
+ numa_free_distance();
+ return ret;
}
/**
@@ -432,7 +435,7 @@ static int __init dummy_numa_init(void)
if (numa_off)
pr_info("NUMA disabled\n"); /* Forced off on command line. */
pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n",
- 0LLU, PFN_PHYS(max_pfn) - 1);
+ memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1);
for_each_memblock(memory, mblk) {
ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 03646e6a2ef4..2c75b0b903ae 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -160,6 +160,12 @@ ENTRY(cpu_do_switch_mm)
mrs x2, ttbr1_el1
mmid x1, x1 // get mm->context.id
phys_to_ttbr x3, x0
+
+alternative_if ARM64_HAS_CNP
+ cbz x1, 1f // skip CNP for reserved ASID
+ orr x3, x3, #TTBR_CNP_BIT
+1:
+alternative_else_nop_endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
bfi x3, x1, #48, #16 // set the ASID field in TTBR0
#endif
@@ -184,7 +190,7 @@ ENDPROC(cpu_do_switch_mm)
.endm
/*
- * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
+ * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1)
*
* This is the low-level counterpart to cpu_replace_ttbr1, and should not be
* called by anything else. It can only be executed from a TTBR0 mapping.
@@ -194,8 +200,7 @@ ENTRY(idmap_cpu_replace_ttbr1)
__idmap_cpu_set_reserved_ttbr1 x1, x3
- phys_to_ttbr x3, x0
- msr ttbr1_el1, x3
+ msr ttbr1_el1, x0
isb
restore_daif x2
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 5e4a59b3ec1b..2691a1857d20 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -211,7 +211,7 @@ static inline long ffz(int x)
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static inline long fls(int x)
+static inline int fls(int x)
{
int r;
@@ -232,7 +232,7 @@ static inline long fls(int x)
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
-static inline long ffs(int x)
+static inline int ffs(int x)
{
int r;
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index ffc4ae8e126f..706699374444 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -47,7 +47,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_addr,
panic("Can't create %s() memory pool!", __func__);
else
gen_pool_add(coherent_pool,
- pfn_to_virt(max_low_pfn),
+ (unsigned long)pfn_to_virt(max_low_pfn),
hexagon_coherent_pool_size, -1);
}
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index e9110b9b8bcd..38049357d6d3 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -73,7 +73,7 @@ static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio)
len = bvec.bv_len;
len >>= 9;
nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
- bvec_to_phys(&bvec));
+ page_to_phys(bvec.bv_page) + bvec.bv_offset);
sec += len;
}
bio_endio(bio);
diff --git a/arch/m68k/include/asm/atafd.h b/arch/m68k/include/asm/atafd.h
deleted file mode 100644
index ad7014cad633..000000000000
--- a/arch/m68k/include/asm/atafd.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_M68K_FD_H
-#define _ASM_M68K_FD_H
-
-/* Definitions for the Atari Floppy driver */
-
-struct atari_format_descr {
- int track; /* to be formatted */
- int head; /* "" "" */
- int sect_offset; /* offset of first sector */
-};
-
-#endif
diff --git a/arch/m68k/include/asm/atafdreg.h b/arch/m68k/include/asm/atafdreg.h
deleted file mode 100644
index c31b4919ed2d..000000000000
--- a/arch/m68k/include/asm/atafdreg.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_FDREG_H
-#define _LINUX_FDREG_H
-
-/*
-** WD1772 stuff
- */
-
-/* register codes */
-
-#define FDCSELREG_STP (0x80) /* command/status register */
-#define FDCSELREG_TRA (0x82) /* track register */
-#define FDCSELREG_SEC (0x84) /* sector register */
-#define FDCSELREG_DTA (0x86) /* data register */
-
-/* register names for FDC_READ/WRITE macros */
-
-#define FDCREG_CMD 0
-#define FDCREG_STATUS 0
-#define FDCREG_TRACK 2
-#define FDCREG_SECTOR 4
-#define FDCREG_DATA 6
-
-/* command opcodes */
-
-#define FDCCMD_RESTORE (0x00) /* - */
-#define FDCCMD_SEEK (0x10) /* | */
-#define FDCCMD_STEP (0x20) /* | TYP 1 Commands */
-#define FDCCMD_STIN (0x40) /* | */
-#define FDCCMD_STOT (0x60) /* - */
-#define FDCCMD_RDSEC (0x80) /* - TYP 2 Commands */
-#define FDCCMD_WRSEC (0xa0) /* - " */
-#define FDCCMD_RDADR (0xc0) /* - */
-#define FDCCMD_RDTRA (0xe0) /* | TYP 3 Commands */
-#define FDCCMD_WRTRA (0xf0) /* - */
-#define FDCCMD_FORCI (0xd0) /* - TYP 4 Command */
-
-/* command modifier bits */
-
-#define FDCCMDADD_SR6 (0x00) /* step rate settings */
-#define FDCCMDADD_SR12 (0x01)
-#define FDCCMDADD_SR2 (0x02)
-#define FDCCMDADD_SR3 (0x03)
-#define FDCCMDADD_V (0x04) /* verify */
-#define FDCCMDADD_H (0x08) /* wait for spin-up */
-#define FDCCMDADD_U (0x10) /* update track register */
-#define FDCCMDADD_M (0x10) /* multiple sector access */
-#define FDCCMDADD_E (0x04) /* head settling flag */
-#define FDCCMDADD_P (0x02) /* precompensation off */
-#define FDCCMDADD_A0 (0x01) /* DAM flag */
-
-/* status register bits */
-
-#define FDCSTAT_MOTORON (0x80) /* motor on */
-#define FDCSTAT_WPROT (0x40) /* write protected (FDCCMD_WR*) */
-#define FDCSTAT_SPINUP (0x20) /* motor speed stable (Type I) */
-#define FDCSTAT_DELDAM (0x20) /* sector has deleted DAM (Type II+III) */
-#define FDCSTAT_RECNF (0x10) /* record not found */
-#define FDCSTAT_CRC (0x08) /* CRC error */
-#define FDCSTAT_TR00 (0x04) /* Track 00 flag (Type I) */
-#define FDCSTAT_LOST (0x04) /* Lost Data (Type II+III) */
-#define FDCSTAT_IDX (0x02) /* Index status (Type I) */
-#define FDCSTAT_DRQ (0x02) /* DRQ status (Type II+III) */
-#define FDCSTAT_BUSY (0x01) /* FDC is busy */
-
-
-/* PSG Port A Bit Nr 0 .. Side Sel .. 0 -> Side 1 1 -> Side 2 */
-#define DSKSIDE (0x01)
-
-#define DSKDRVNONE (0x06)
-#define DSKDRV0 (0x02)
-#define DSKDRV1 (0x04)
-
-/* step rates */
-#define FDCSTEP_6 0x00
-#define FDCSTEP_12 0x01
-#define FDCSTEP_2 0x02
-#define FDCSTEP_3 0x03
-
-#endif
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index a9af1d2dcd69..2c1c53d12179 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -931,7 +931,6 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
bool write);
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end);
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
index 4901833498f7..8441b2698e64 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
@@ -40,6 +40,7 @@ struct ltq_dma_channel {
int desc; /* the current descriptor */
struct ltq_dma_desc *desc_base; /* the descriptor base */
int phys; /* physical addr */
+ struct device *dev;
};
enum {
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index b2fa62922d88..49d6046ca1d0 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -13,6 +13,7 @@
#include <linux/atomic.h>
#include <linux/cpumask.h>
+#include <linux/sizes.h>
#include <linux/threads.h>
#include <asm/cachectl.h>
@@ -80,11 +81,10 @@ extern unsigned int vced_count, vcei_count;
#endif
-/*
- * One page above the stack is used for branch delay slot "emulation".
- * See dsemul.c for details.
- */
-#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
+#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
+
+extern unsigned long mips_stack_top(void);
+#define STACK_TOP mips_stack_top()
/*
* This decides where the kernel will search for a free chunk of vm
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 8fc69891e117..d4f7fd4550e1 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -32,6 +32,7 @@
#include <linux/nmi.h>
#include <linux/cpu.h>
+#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
@@ -39,6 +40,7 @@
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/irq.h>
+#include <asm/mips-cps.h>
#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
@@ -645,6 +647,29 @@ out:
return pc;
}
+unsigned long mips_stack_top(void)
+{
+ unsigned long top = TASK_SIZE & PAGE_MASK;
+
+ /* One page for branch delay slot "emulation" */
+ top -= PAGE_SIZE;
+
+ /* Space for the VDSO, data page & GIC user page */
+ top -= PAGE_ALIGN(current->thread.abi->vdso->size);
+ top -= PAGE_SIZE;
+ top -= mips_gic_present() ? PAGE_SIZE : 0;
+
+ /* Space for cache colour alignment */
+ if (cpu_has_dc_aliases)
+ top -= shm_align_mask + 1;
+
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+
+ return top;
+}
+
/*
* Don't forget that the stack pointer must be aligned on a 8 bytes
* boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 6d840a44fa36..e64b9e8bb002 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -846,6 +846,34 @@ static void __init arch_mem_init(char **cmdline_p)
struct memblock_region *reg;
extern void plat_mem_setup(void);
+ /*
+ * Initialize boot_command_line to an innocuous but non-empty string in
+ * order to prevent early_init_dt_scan_chosen() from copying
+ * CONFIG_CMDLINE into it without our knowledge. We handle
+ * CONFIG_CMDLINE ourselves below & don't want to duplicate its
+ * content because repeating arguments can be problematic.
+ */
+ strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
+
+ /* call board setup routine */
+ plat_mem_setup();
+
+ /*
+ * Make sure all kernel memory is in the maps. The "UP" and
+ * "DOWN" are opposite for initdata since if it crosses over
+ * into another memory section you don't want that to be
+ * freed when the initdata is freed.
+ */
+ arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
+ PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
+ BOOT_MEM_RAM);
+ arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
+ PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
+ BOOT_MEM_INIT_RAM);
+
+ pr_info("Determined physical RAM map:\n");
+ print_memory_map();
+
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
@@ -873,26 +901,6 @@ static void __init arch_mem_init(char **cmdline_p)
}
#endif
#endif
-
- /* call board setup routine */
- plat_mem_setup();
-
- /*
- * Make sure all kernel memory is in the maps. The "UP" and
- * "DOWN" are opposite for initdata since if it crosses over
- * into another memory section you don't want that to be
- * freed when the initdata is freed.
- */
- arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
- PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
- BOOT_MEM_RAM);
- arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
- PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
- BOOT_MEM_INIT_RAM);
-
- pr_info("Determined physical RAM map:\n");
- print_memory_map();
-
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 8f845f6e5f42..48a9c6b90e07 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -15,6 +15,7 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/random.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timekeeper_internal.h>
@@ -97,6 +98,21 @@ void update_vsyscall_tz(void)
}
}
+static unsigned long vdso_base(void)
+{
+ unsigned long base;
+
+ /* Skip the delay slot emulation page */
+ base = STACK_TOP + PAGE_SIZE;
+
+ if (current->flags & PF_RANDOMIZE) {
+ base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1);
+ base = PAGE_ALIGN(base);
+ }
+
+ return base;
+}
+
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
struct mips_vdso_image *image = current->thread.abi->vdso;
@@ -137,7 +153,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
if (cpu_has_dc_aliases)
size += shm_align_mask + 1;
- base = get_unmapped_area(NULL, 0, size, 0, 0);
+ base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index ee64db032793..d8dcdb350405 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -512,16 +512,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
return 1;
}
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- unsigned long end = hva + PAGE_SIZE;
-
- handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
-
- kvm_mips_callbacks->flush_shadow_all(kvm);
- return 0;
-}
-
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 4b9fbb6744ad..664f2f7f55c1 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -130,7 +130,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
unsigned long flags;
ch->desc = 0;
- ch->desc_base = dma_zalloc_coherent(NULL,
+ ch->desc_base = dma_zalloc_coherent(ch->dev,
LTQ_DESC_NUM * LTQ_DESC_SIZE,
&ch->phys, GFP_ATOMIC);
@@ -182,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch)
if (!ch->desc_base)
return;
ltq_dma_close(ch);
- dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE,
+ dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
ch->desc_base, ch->phys);
}
EXPORT_SYMBOL_GPL(ltq_dma_free);
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 3a6f34ef5ffc..069acec3df9f 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -280,9 +280,11 @@
* unset_bytes = end_addr - current_addr + 1
* a2 = t1 - a0 + 1
*/
+ .set reorder
PTR_SUBU a2, t1, a0
+ PTR_ADDIU a2, 1
jr ra
- PTR_ADDIU a2, 1
+ .set noreorder
.endm
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index f329b466e68f..2d14f17838d2 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -426,7 +426,7 @@ void unwind_frame_init_task(struct unwind_frame_info *info,
r.gr[30] = get_parisc_stackpointer();
regs = &r;
}
- unwind_frame_init(info, task, &r);
+ unwind_frame_init(info, task, regs);
} else {
unwind_frame_init_from_blocked_task(info, task);
}
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 13a688fc8cd0..2a2486526d1f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -114,7 +114,7 @@
*/
#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \
- _PAGE_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
/*
* user access blocked by key
*/
@@ -132,7 +132,7 @@
*/
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
- _PAGE_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
@@ -1051,7 +1051,6 @@ static inline void vmemmap_remove_mapping(unsigned long start,
return hash__vmemmap_remove_mapping(start, page_size);
}
#endif
-struct page *realmode_pfn_to_page(unsigned long pfn);
static inline pte_t pmd_pte(pmd_t pmd)
{
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index ab3a4fba38e3..3d4b88cb8599 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -220,8 +220,6 @@ extern void iommu_del_device(struct device *dev);
extern int __init tce_iommu_bus_notifier_init(void);
extern long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
unsigned long *hpa, enum dma_data_direction *direction);
-extern long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
- unsigned long *hpa, enum dma_data_direction *direction);
#else
static inline void iommu_register_group(struct iommu_table_group *table_group,
int pci_domain_number,
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b2f89b621b15..b694d6af1150 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -38,6 +38,7 @@ extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
unsigned long ua, unsigned int pageshift, unsigned long *hpa);
+extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 1a951b00465d..1fffbba8d6a5 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern unsigned long long memory_limit;
+extern bool init_mem_is_free;
extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ea04dfb8c092..2d8fc8c9da7a 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
#ifdef CONFIG_PPC_DENORMALISATION
mfspr r10,SPRN_HSRR1
- mfspr r11,SPRN_HSRR0 /* save HSRR0 */
andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
- addi r11,r11,-4 /* HSRR0 is next instruction */
bne+ denorm_assist
#endif
@@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
*/
XVCPSGNDP32(32)
denorm_done:
+ mfspr r11,SPRN_HSRR0
+ subi r11,r11,4
mtspr SPRN_HSRR0,r11
mtcrf 0x80,r9
ld r9,PACA_EXGEN+EX_R9(r13)
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index af7a20dc6e09..19b4c628f3be 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1013,31 +1013,6 @@ long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
}
EXPORT_SYMBOL_GPL(iommu_tce_xchg);
-#ifdef CONFIG_PPC_BOOK3S_64
-long iommu_tce_xchg_rm(struct iommu_table *tbl, unsigned long entry,
- unsigned long *hpa, enum dma_data_direction *direction)
-{
- long ret;
-
- ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
-
- if (!ret && ((*direction == DMA_FROM_DEVICE) ||
- (*direction == DMA_BIDIRECTIONAL))) {
- struct page *pg = realmode_pfn_to_page(*hpa >> PAGE_SHIFT);
-
- if (likely(pg)) {
- SetPageDirty(pg);
- } else {
- tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
- ret = -EFAULT;
- }
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_tce_xchg_rm);
-#endif
-
int iommu_take_ownership(struct iommu_table *tbl)
{
unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 913c5725cdb2..bb6ac471a784 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1306,6 +1306,16 @@ void show_user_instructions(struct pt_regs *regs)
pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
+ /*
+ * Make sure the NIP points at userspace, not kernel text/data or
+ * elsewhere.
+ */
+ if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
+ pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
+ current->comm, current->pid);
+ return;
+ }
+
pr_info("%s[%d]: code: ", current->comm, current->pid);
for (i = 0; i < instructions_to_print; i++) {
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 6bffbc5affe7..7716374786bd 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
- /* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
+ /*
+ * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+ * clobbered by an exception once we turn on MSR_RI below.
+ */
+ ld r11, PACATMSCRATCH(r13)
+ std r11, GPR1(r1)
+
+ /*
+ * Store r13 away so we can free up the scratch SPR for the SLB fault
+ * handler (needed once we start accessing the thread_struct).
+ */
+ GET_SCRATCH0(r11)
+ std r11, GPR13(r1)
+
/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1
+ /* Store the PPR in r11 and reset to decent value */
mfspr r11, SPRN_PPR
HMT_MEDIUM
@@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */
SAVE_GPR(10, r7) /* user r10 */
- ld r3, PACATMSCRATCH(r13) /* user r1 */
+ ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */
ld r6, GPR12(r1) /* user r12 */
- GET_SCRATCH0(8) /* user r13 */
+ ld r8, GPR13(r1) /* user r13 */
std r3, GPR1(r7)
std r4, GPR7(r7)
std r5, GPR11(r7)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 3c0e8fb2b773..68e14afecac8 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -358,7 +358,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned long pp, key;
unsigned long v, orig_v, gr;
__be64 *hptep;
- int index;
+ long int index;
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
if (kvm_is_radix(vcpu->kvm))
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 0af1c0aea1fe..998f8d089ac7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -525,8 +525,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long mmu_seq, pte_size;
- unsigned long gpa, gfn, hva, pfn;
+ unsigned long mmu_seq;
+ unsigned long gpa, gfn, hva;
struct kvm_memory_slot *memslot;
struct page *page = NULL;
long ret;
@@ -623,9 +623,10 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
hva = gfn_to_hva_memslot(memslot, gfn);
if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
- pfn = page_to_pfn(page);
upgrade_write = true;
} else {
+ unsigned long pfn;
+
/* Call KVM generic code to do the slow-path check */
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
writing, upgrade_p);
@@ -639,63 +640,55 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
}
- /* See if we can insert a 1GB or 2MB large PTE here */
- level = 0;
- if (page && PageCompound(page)) {
- pte_size = PAGE_SIZE << compound_order(compound_head(page));
- if (pte_size >= PUD_SIZE &&
- (gpa & (PUD_SIZE - PAGE_SIZE)) ==
- (hva & (PUD_SIZE - PAGE_SIZE))) {
- level = 2;
- pfn &= ~((PUD_SIZE >> PAGE_SHIFT) - 1);
- } else if (pte_size >= PMD_SIZE &&
- (gpa & (PMD_SIZE - PAGE_SIZE)) ==
- (hva & (PMD_SIZE - PAGE_SIZE))) {
- level = 1;
- pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
- }
- }
-
/*
- * Compute the PTE value that we need to insert.
+ * Read the PTE from the process' radix tree and use that
+ * so we get the shift and attribute bits.
*/
- if (page) {
- pgflags = _PAGE_READ | _PAGE_EXEC | _PAGE_PRESENT | _PAGE_PTE |
- _PAGE_ACCESSED;
- if (writing || upgrade_write)
- pgflags |= _PAGE_WRITE | _PAGE_DIRTY;
- pte = pfn_pte(pfn, __pgprot(pgflags));
- } else {
- /*
- * Read the PTE from the process' radix tree and use that
- * so we get the attribute bits.
- */
- local_irq_disable();
- ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
- pte = *ptep;
+ local_irq_disable();
+ ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
+ /*
+ * If the PTE disappeared temporarily due to a THP
+ * collapse, just return and let the guest try again.
+ */
+ if (!ptep) {
local_irq_enable();
- if (shift == PUD_SHIFT &&
- (gpa & (PUD_SIZE - PAGE_SIZE)) ==
- (hva & (PUD_SIZE - PAGE_SIZE))) {
- level = 2;
- } else if (shift == PMD_SHIFT &&
- (gpa & (PMD_SIZE - PAGE_SIZE)) ==
- (hva & (PMD_SIZE - PAGE_SIZE))) {
- level = 1;
- } else if (shift && shift != PAGE_SHIFT) {
- /* Adjust PFN */
- unsigned long mask = (1ul << shift) - PAGE_SIZE;
- pte = __pte(pte_val(pte) | (hva & mask));
- }
- pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
- if (writing || upgrade_write) {
- if (pte_val(pte) & _PAGE_WRITE)
- pte = __pte(pte_val(pte) | _PAGE_DIRTY);
- } else {
- pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
+ if (page)
+ put_page(page);
+ return RESUME_GUEST;
+ }
+ pte = *ptep;
+ local_irq_enable();
+
+ /* Get pte level from shift/size */
+ if (shift == PUD_SHIFT &&
+ (gpa & (PUD_SIZE - PAGE_SIZE)) ==
+ (hva & (PUD_SIZE - PAGE_SIZE))) {
+ level = 2;
+ } else if (shift == PMD_SHIFT &&
+ (gpa & (PMD_SIZE - PAGE_SIZE)) ==
+ (hva & (PMD_SIZE - PAGE_SIZE))) {
+ level = 1;
+ } else {
+ level = 0;
+ if (shift > PAGE_SHIFT) {
+ /*
+ * If the pte maps more than one page, bring over
+ * bits from the virtual address to get the real
+ * address of the specific single page we want.
+ */
+ unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
+ pte = __pte(pte_val(pte) | (hva & rpnmask));
}
}
+ pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
+ if (writing || upgrade_write) {
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte = __pte(pte_val(pte) | _PAGE_DIRTY);
+ } else {
+ pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
+ }
+
/* Allocate space in the tree and write the PTE */
ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
@@ -725,10 +718,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
gpa, shift);
kvmppc_radix_tlbie_page(kvm, gpa, shift);
if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
- unsigned long npages = 1;
+ unsigned long psize = PAGE_SIZE;
if (shift)
- npages = 1ul << (shift - PAGE_SHIFT);
- kvmppc_update_dirty_map(memslot, gfn, npages);
+ psize = 1ul << shift;
+ kvmppc_update_dirty_map(memslot, gfn, psize);
}
}
return 0;
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 506a4d400458..6821ead4b4eb 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -187,12 +187,35 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
-static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
+static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
+ unsigned long entry, unsigned long *hpa,
+ enum dma_data_direction *direction)
+{
+ long ret;
+
+ ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
+
+ if (!ret && ((*direction == DMA_FROM_DEVICE) ||
+ (*direction == DMA_BIDIRECTIONAL))) {
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
+ /*
+ * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
+ * calling this so we still get here a valid UA.
+ */
+ if (pua && *pua)
+ mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
+ }
+
+ return ret;
+}
+
+static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
+ unsigned long entry)
{
unsigned long hpa = 0;
enum dma_data_direction dir = DMA_NONE;
- iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
}
static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -224,7 +247,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
unsigned long hpa = 0;
long ret;
- if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
+ if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
/*
* real mode xchg can fail if struct page crosses
* a page boundary
@@ -236,7 +259,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
if (ret)
- iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+ iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
return ret;
}
@@ -282,7 +305,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
return H_CLOSED;
- ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
+ ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
if (ret) {
mm_iommu_mapped_dec(mem);
/*
@@ -371,7 +394,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret;
WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
}
kvmppc_tce_put(stt, entry, tce);
@@ -520,7 +543,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
goto unlock_exit;
WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
}
kvmppc_tce_put(stt, entry + i, tce);
@@ -571,7 +594,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
return ret;
WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(stit->tbl, entry);
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
}
}
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 886ed94b9c13..d05c8af4ac51 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
addc r0, r8, r9
ld r10, 0(r4)
ld r11, 8(r4)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ rotldi r5, r5, 8
+#endif
adde r0, r0, r10
add r5, r5, r7
adde r0, r0, r11
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 850f3b8f4da5..5ffee298745f 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -142,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
return 0;
}
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
unsigned int *patch_addr = NULL;
@@ -182,12 +182,22 @@ out:
}
#else /* !CONFIG_STRICT_KERNEL_RWX */
-int patch_instruction(unsigned int *addr, unsigned int instr)
+static int do_patch_instruction(unsigned int *addr, unsigned int instr)
{
return raw_patch_instruction(addr, instr);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
+
+int patch_instruction(unsigned int *addr, unsigned int instr)
+{
+ /* Make sure we aren't patching a freed init section */
+ if (init_mem_is_free && init_section_contains(addr, 4)) {
+ pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+ return 0;
+ }
+ return do_patch_instruction(addr, instr);
+}
NOKPROBE_SYMBOL(patch_instruction);
int patch_branch(unsigned int *addr, unsigned long target, int flags)
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 51ce091914f9..7a9886f98b0c 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -308,55 +308,6 @@ void register_page_bootmem_memmap(unsigned long section_nr,
{
}
-/*
- * We do not have access to the sparsemem vmemmap, so we fallback to
- * walking the list of sparsemem blocks which we already maintain for
- * the sake of crashdump. In the long run, we might want to maintain
- * a tree if performance of that linear walk becomes a problem.
- *
- * realmode_pfn_to_page functions can fail due to:
- * 1) As real sparsemem blocks do not lay in RAM continously (they
- * are in virtual address space which is not available in the real mode),
- * the requested page struct can be split between blocks so get_page/put_page
- * may fail.
- * 2) When huge pages are used, the get_page/put_page API will fail
- * in real mode as the linked addresses in the page struct are virtual
- * too.
- */
-struct page *realmode_pfn_to_page(unsigned long pfn)
-{
- struct vmemmap_backing *vmem_back;
- struct page *page;
- unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
- unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
-
- for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
- if (pg_va < vmem_back->virt_addr)
- continue;
-
- /* After vmemmap_list entry free is possible, need check all */
- if ((pg_va + sizeof(struct page)) <=
- (vmem_back->virt_addr + page_size)) {
- page = (struct page *) (vmem_back->phys + pg_va -
- vmem_back->virt_addr);
- return page;
- }
- }
-
- /* Probably that page struct is split between real pages */
- return NULL;
-}
-EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-
-#else
-
-struct page *realmode_pfn_to_page(unsigned long pfn)
-{
- struct page *page = pfn_to_page(pfn);
- return page;
-}
-EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
-
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 5c8530d0c611..04ccb274a620 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -63,6 +63,7 @@
#endif
unsigned long long memory_limit;
+bool init_mem_is_free;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
@@ -396,6 +397,7 @@ void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
mark_initmem_nx();
+ init_mem_is_free = true;
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index c9ee9e23845f..56c2234cc6ae 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -18,11 +18,15 @@
#include <linux/migrate.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
+#include <linux/sizes.h>
#include <asm/mmu_context.h>
#include <asm/pte-walk.h>
static DEFINE_MUTEX(mem_list_mutex);
+#define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
+#define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
+
struct mm_iommu_table_group_mem_t {
struct list_head next;
struct rcu_head rcu;
@@ -263,6 +267,9 @@ static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
if (!page)
continue;
+ if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
+ SetPageDirty(page);
+
put_page(page);
mem->hpas[i] = 0;
}
@@ -360,7 +367,6 @@ struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
return ret;
}
-EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries)
@@ -390,7 +396,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
if (pageshift > mem->pageshift)
return -EFAULT;
- *hpa = *va | (ua & ~PAGE_MASK);
+ *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
return 0;
}
@@ -413,11 +419,31 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
if (!pa)
return -EFAULT;
- *hpa = *pa | (ua & ~PAGE_MASK);
+ *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
return 0;
}
-EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
+
+extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
+{
+ struct mm_iommu_table_group_mem_t *mem;
+ long entry;
+ void *va;
+ unsigned long *pa;
+
+ mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
+ if (!mem)
+ return;
+
+ entry = (ua - mem->ua) >> PAGE_SHIFT;
+ va = &mem->hpas[entry];
+
+ pa = (void *) vmalloc_to_phys(va);
+ if (!pa)
+ return;
+
+ *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
+}
long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
{
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 35ac5422903a..055b211b7126 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
int new_nid;
/* Use associativity from first thread for all siblings */
- vphn_get_associativity(cpu, associativity);
+ if (vphn_get_associativity(cpu, associativity))
+ return cpu_to_node(cpu);
+
new_nid = associativity_to_nid(associativity);
if (new_nid < 0 || !node_possible(new_nid))
new_nid = first_online_node;
@@ -1215,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
* Need to ensure that NODE_DATA is initialized for a node from
* available memory (see memblock_alloc_try_nid). If unable to
* init the node, then default to nearest node that has memory
- * installed.
+ * installed. Skip onlining a node if the subsystems are not
+ * yet initialized.
*/
- if (try_online_node(new_nid))
+ if (!topology_inited || try_online_node(new_nid))
new_nid = first_online_node;
#else
/*
@@ -1452,7 +1455,8 @@ static struct timer_list topology_timer;
static void reset_topology_timer(void)
{
- mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
+ if (vphn_enabled)
+ mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
index 333b1f80c435..b271b283c785 100644
--- a/arch/powerpc/mm/pkeys.c
+++ b/arch/powerpc/mm/pkeys.c
@@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
* Since any pkey can be used for data or execute, we will just treat
* all keys as equal and track them as one entity.
*/
- pkeys_total = be32_to_cpu(vals[0]);
+ pkeys_total = vals[0];
pkeys_devtree_defined = true;
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index 6c5db1acbe8d..fe9691040f54 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
level_shift = entries_shift + 3;
level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
- if ((level_shift - 3) * levels + page_shift >= 60)
+ if ((level_shift - 3) * levels + page_shift >= 55)
return -EINVAL;
/* Allocate TCE table */
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
new file mode 100644
index 000000000000..c9fecd120d18
--- /dev/null
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_PROTOTYPES_H
+
+#include <linux/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_RISCV_PROTOTYPES_H */
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index db20dc630e7e..b2d26d9d8489 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -85,15 +85,8 @@ atomic_t hart_lottery;
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
{
- extern char __initramfs_start[];
- extern unsigned long __initramfs_size;
unsigned long size;
- if (__initramfs_size > 0) {
- initrd_start = (unsigned long)(&__initramfs_start);
- initrd_end = initrd_start + __initramfs_size;
- }
-
if (initrd_start >= initrd_end) {
printk(KERN_INFO "initrd not found or empty");
goto disable;
@@ -193,7 +186,7 @@ static void __init setup_bootmem(void)
BUG_ON(mem_size == 0);
set_max_mapnr(PFN_DOWN(mem_size));
- max_low_pfn = pfn_base + PFN_DOWN(mem_size);
+ max_low_pfn = memblock_end_of_DRAM();
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index 80b27294c1de..ab9a0ebecc19 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -208,7 +208,7 @@ static int cbc_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
walk->dst.virt.addr, walk->src.virt.addr, n);
if (k)
ret = blkcipher_walk_done(desc, walk, nbytes - k);
- if (n < k) {
+ if (k < n) {
if (__cbc_paes_set_key(ctx) != 0)
return blkcipher_walk_done(desc, walk, -EIO);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index f31a15044c24..a8418e1379eb 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -16,7 +16,13 @@ typedef struct {
unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
- /* The mmu context allocates 4K page tables. */
+ /*
+ * The following bitfields need a down_write on the mm
+ * semaphore when they are written to. As they are only
+ * written once, they can be read without a lock.
+ *
+ * The mmu context allocates 4K page tables.
+ */
unsigned int alloc_pgste:1;
/* The mmu context uses extended page tables. */
unsigned int has_pgste:1;
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 3cae9168f63c..e44a8d7959f5 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -108,7 +108,8 @@ int sclp_early_get_core_info(struct sclp_core_info *info);
void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
void sclp_early_detect(void);
void sclp_early_printk(const char *s);
-void __sclp_early_printk(const char *s, unsigned int len);
+void sclp_early_printk_force(const char *s);
+void __sclp_early_printk(const char *s, unsigned int len, unsigned int force);
int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
diff --git a/arch/s390/kernel/early_printk.c b/arch/s390/kernel/early_printk.c
index 9431784d7796..40c1dfec944e 100644
--- a/arch/s390/kernel/early_printk.c
+++ b/arch/s390/kernel/early_printk.c
@@ -10,7 +10,7 @@
static void sclp_early_write(struct console *con, const char *s, unsigned int len)
{
- __sclp_early_printk(s, len);
+ __sclp_early_printk(s, len, 0);
}
static struct console sclp_early_console = {
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index a049a7b9d6e8..c1a080b11ae9 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -198,12 +198,10 @@ pgm_check_entry:
/* Suspend CPU not available -> panic */
larl %r15,init_thread_union
- ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+ aghi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
+ aghi %r15,-STACK_FRAME_OVERHEAD
larl %r2,.Lpanic_string
- lghi %r1,0
- sam31
- sigp %r1,%r0,SIGP_SET_ARCHITECTURE
- brasl %r14,sclp_early_printk
+ brasl %r14,sclp_early_printk_force
larl %r3,.Ldisabled_wait_31
lpsw 0(%r3)
4:
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 91ad4a9425c0..ac5da6b0b862 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -481,7 +481,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_S390_HPAGE_1M:
r = 0;
- if (hpage)
+ if (hpage && !kvm_is_ucontrol(kvm))
r = 1;
break;
case KVM_CAP_S390_MEM_OP:
@@ -691,11 +691,13 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
mutex_lock(&kvm->lock);
if (kvm->created_vcpus)
r = -EBUSY;
- else if (!hpage || kvm->arch.use_cmma)
+ else if (!hpage || kvm->arch.use_cmma || kvm_is_ucontrol(kvm))
r = -EINVAL;
else {
r = 0;
+ down_write(&kvm->mm->mmap_sem);
kvm->mm->context.allow_gmap_hpage_1m = 1;
+ up_write(&kvm->mm->mmap_sem);
/*
* We might have to create fake 4k page
* tables. To avoid that the hardware works on
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index d68f10441a16..8679bd74d337 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -280,9 +280,11 @@ retry:
goto retry;
}
}
- if (rc)
- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
up_read(&current->mm->mmap_sem);
+ if (rc == -EFAULT)
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ if (rc < 0)
+ return rc;
vcpu->run->s.regs.gprs[reg1] &= ~0xff;
vcpu->run->s.regs.gprs[reg1] |= key;
return 0;
@@ -324,9 +326,11 @@ retry:
goto retry;
}
}
- if (rc < 0)
- return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
up_read(&current->mm->mmap_sem);
+ if (rc == -EFAULT)
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+ if (rc < 0)
+ return rc;
kvm_s390_set_psw_cc(vcpu, rc);
return 0;
}
@@ -390,12 +394,12 @@ static int handle_sske(struct kvm_vcpu *vcpu)
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
+ up_read(&current->mm->mmap_sem);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
- up_read(&current->mm->mmap_sem);
- if (rc >= 0)
- start += PAGE_SIZE;
+ if (rc < 0)
+ return rc;
+ start += PAGE_SIZE;
}
if (m3 & (SSKE_MC | SSKE_MR)) {
@@ -1002,13 +1006,15 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
FAULT_FLAG_WRITE, &unlocked);
rc = !rc ? -EAGAIN : rc;
}
+ up_read(&current->mm->mmap_sem);
if (rc == -EFAULT)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
-
- up_read(&current->mm->mmap_sem);
- if (rc >= 0)
- start += PAGE_SIZE;
+ if (rc == -EAGAIN)
+ continue;
+ if (rc < 0)
+ return rc;
}
+ start += PAGE_SIZE;
}
if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 63844b95c22c..a2b28cd1e3fe 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0039U);
/* copy only the wrapping keys */
- if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
+ if (read_guest_real(vcpu, crycb_addr + 72,
+ vsie_page->crycb.dea_wrapping_key_mask, 56))
return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index bb44990c8212..911c7ded35f1 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -708,11 +708,13 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
vmaddr |= gaddr & ~PMD_MASK;
/* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr);
+ if (!vma)
+ continue;
/*
* We do not discard pages that are backed by
* hugetlbfs, so we don't have to refault them.
*/
- if (vma && is_vm_hugetlb_page(vma))
+ if (is_vm_hugetlb_page(vma))
continue;
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size);
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 666d6b5c0440..9c3fc03abe9a 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -28,7 +28,7 @@ typedef struct {
unsigned short sock_id; /* physical package */
unsigned short core_id;
unsigned short max_cache_id; /* groupings of highest shared cache */
- unsigned short proc_id; /* strand (aka HW thread) id */
+ signed short proc_id; /* strand (aka HW thread) id */
} cpuinfo_sparc;
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index 09acf0ddec10..45b4bf1875e6 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -427,8 +427,9 @@
#define __NR_preadv2 358
#define __NR_pwritev2 359
#define __NR_statx 360
+#define __NR_io_pgetevents 361
-#define NR_syscalls 361
+#define NR_syscalls 362
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 5868fc333ea8..639c8e54530a 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -122,7 +122,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->pc = addr;
linux_regs->npc = addr + 4;
}
- /* fallthru */
+ /* fall through */
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index d5f7dc6323d5..a68bbddbdba4 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -148,7 +148,7 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4;
}
- /* fallthru */
+ /* fall through */
case 'D':
case 'k':
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index d3149baaa33c..67b3e6b3ce5d 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -24,6 +24,7 @@
#include <asm/cpudata.h>
#include <linux/uaccess.h>
#include <linux/atomic.h>
+#include <linux/sched/clock.h>
#include <asm/nmi.h>
#include <asm/pcr.h>
#include <asm/cacheflush.h>
@@ -927,6 +928,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
sparc_perf_event_update(cp, &cp->hw,
cpuc->current_idx[i]);
cpuc->current_idx[i] = PIC_NO_INDEX;
+ if (cp->hw.state & PERF_HES_STOPPED)
+ cp->hw.state |= PERF_HES_ARCH;
}
}
}
@@ -959,10 +962,12 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
enc = perf_event_get_enc(cpuc->events[i]);
cpuc->pcr[0] &= ~mask_for_index(idx);
- if (hwc->state & PERF_HES_STOPPED)
+ if (hwc->state & PERF_HES_ARCH) {
cpuc->pcr[0] |= nop_for_index(idx);
- else
+ } else {
cpuc->pcr[0] |= event_encoding(enc, idx);
+ hwc->state = 0;
+ }
}
out:
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
@@ -988,6 +993,9 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
cpuc->current_idx[i] = idx;
+ if (cp->hw.state & PERF_HES_ARCH)
+ continue;
+
sparc_pmu_start(cp, PERF_EF_RELOAD);
}
out:
@@ -1079,6 +1087,8 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
event->hw.state = 0;
sparc_pmu_enable_event(cpuc, &event->hw, idx);
+
+ perf_event_update_userpage(event);
}
static void sparc_pmu_stop(struct perf_event *event, int flags)
@@ -1371,9 +1381,9 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
- event->hw.state = PERF_HES_UPTODATE;
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (!(ef_flags & PERF_EF_START))
- event->hw.state |= PERF_HES_STOPPED;
+ event->hw.state |= PERF_HES_ARCH;
/*
* If group events scheduling transaction was started,
@@ -1603,6 +1613,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
+ u64 finish_clock;
+ u64 start_clock;
int i;
if (!atomic_read(&active_events))
@@ -1616,6 +1628,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
return NOTIFY_DONE;
}
+ start_clock = sched_clock();
+
regs = args->regs;
cpuc = this_cpu_ptr(&cpu_hw_events);
@@ -1654,6 +1668,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
sparc_pmu_stop(event, 0);
}
+ finish_clock = sched_clock();
+
+ perf_sample_event_took(finish_clock - start_clock);
+
return NOTIFY_STOP;
}
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index f6528884a2c8..4073e2b87dd0 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -84,8 +84,9 @@ __handle_signal:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
+ andn %l1, %l4, %l1
ba,pt %xcc, __handle_preemption_continue
- andn %l1, %l4, %l1
+ srl %l4, 20, %l4
/* When returning from a NMI (%pil==15) interrupt we want to
* avoid running softirqs, doing IRQ tracing, preempting, etc.
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 12bee14b552c..621a363098ec 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -90,4 +90,4 @@ sys_call_table:
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
-/*360*/ .long sys_statx
+/*360*/ .long sys_statx, sys_io_pgetevents
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 387ef993880a..bb68c805b891 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -91,7 +91,7 @@ sys_call_table32:
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
.word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
-/*360*/ .word sys_statx
+/*360*/ .word sys_statx, compat_sys_io_pgetevents
#endif /* CONFIG_COMPAT */
@@ -173,4 +173,4 @@ sys_call_table:
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
.word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
-/*360*/ .word sys_statx
+/*360*/ .word sys_statx, sys_io_pgetevents
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index 635d67ffc9a3..7db5aabe9708 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -180,11 +180,17 @@ static int send_dreg(struct vio_driver_state *vio)
struct vio_dring_register pkt;
char all[sizeof(struct vio_dring_register) +
(sizeof(struct ldc_trans_cookie) *
- dr->ncookies)];
+ VIO_MAX_RING_COOKIES)];
} u;
+ size_t bytes = sizeof(struct vio_dring_register) +
+ (sizeof(struct ldc_trans_cookie) *
+ dr->ncookies);
int i;
- memset(&u, 0, sizeof(u));
+ if (WARN_ON(bytes > sizeof(u)))
+ return -EINVAL;
+
+ memset(&u, 0, bytes);
init_tag(&u.pkt.tag, VIO_TYPE_CTRL, VIO_SUBTYPE_INFO, VIO_DRING_REG);
u.pkt.dring_ident = 0;
u.pkt.num_descr = dr->num_entries;
@@ -206,7 +212,7 @@ static int send_dreg(struct vio_driver_state *vio)
(unsigned long long) u.pkt.cookies[i].cookie_size);
}
- return send_ctrl(vio, &u.pkt.tag, sizeof(u));
+ return send_ctrl(vio, &u.pkt.tag, bytes);
}
static int send_rdx(struct vio_driver_state *vio)
diff --git a/arch/sparc/vdso/Makefile b/arch/sparc/vdso/Makefile
index dd0b5a92ffd0..dc85570d8839 100644
--- a/arch/sparc/vdso/Makefile
+++ b/arch/sparc/vdso/Makefile
@@ -31,23 +31,21 @@ obj-y += $(vdso_img_objs)
targets += $(vdso_img_cfiles)
targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
-export CPPFLAGS_vdso.lds += -P -C
+CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
-Wl,--no-undefined \
-Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
$(DISABLE_LTO)
-$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@
-define cmd_vdso2c
- $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
+ cmd_vdso2c = $(obj)/vdso2c $< $(<:%.dbg=%) $@
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
$(call if_changed,vdso2c)
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
index 3feb3d960ca5..75dca9aab737 100644
--- a/arch/sparc/vdso/vclock_gettime.c
+++ b/arch/sparc/vdso/vclock_gettime.c
@@ -33,9 +33,19 @@
#define TICK_PRIV_BIT (1ULL << 63)
#endif
+#ifdef CONFIG_SPARC64
#define SYSCALL_STRING \
"ta 0x6d;" \
- "sub %%g0, %%o0, %%o0;" \
+ "bcs,a 1f;" \
+ " sub %%g0, %%o0, %%o0;" \
+ "1:"
+#else
+#define SYSCALL_STRING \
+ "ta 0x10;" \
+ "bcs,a 1f;" \
+ " sub %%g0, %%o0, %%o0;" \
+ "1:"
+#endif
#define SYSCALL_CLOBBERS \
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
index f51595f861b8..5eaff3c1aa0c 100644
--- a/arch/sparc/vdso/vma.c
+++ b/arch/sparc/vdso/vma.c
@@ -262,7 +262,9 @@ static __init int vdso_setup(char *s)
unsigned long val;
err = kstrtoul(s, 10, &val);
+ if (err)
+ return err;
vdso_enabled = val;
- return err;
+ return 0;
}
__setup("vdso=", vdso_setup);
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index 83c470364dfb..74c002ddc0ce 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
@@ -142,7 +143,6 @@ struct cow {
#define MAX_SG 64
struct ubd {
- struct list_head restart;
/* name (and fd, below) of the file opened for writing, either the
* backing or the cow file. */
char *file;
@@ -156,11 +156,8 @@ struct ubd {
struct cow cow;
struct platform_device pdev;
struct request_queue *queue;
+ struct blk_mq_tag_set tag_set;
spinlock_t lock;
- struct scatterlist sg[MAX_SG];
- struct request *request;
- int start_sg, end_sg;
- sector_t rq_pos;
};
#define DEFAULT_COW { \
@@ -182,10 +179,6 @@ struct ubd {
.shared = 0, \
.cow = DEFAULT_COW, \
.lock = __SPIN_LOCK_UNLOCKED(ubd_devs.lock), \
- .request = NULL, \
- .start_sg = 0, \
- .end_sg = 0, \
- .rq_pos = 0, \
}
/* Protected by ubd_lock */
@@ -196,6 +189,9 @@ static int fake_ide = 0;
static struct proc_dir_entry *proc_ide_root = NULL;
static struct proc_dir_entry *proc_ide = NULL;
+static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd);
+
static void make_proc_ide(void)
{
proc_ide_root = proc_mkdir("ide", NULL);
@@ -436,11 +432,8 @@ __uml_help(udb_setup,
" in the boot output.\n\n"
);
-static void do_ubd_request(struct request_queue * q);
-
/* Only changed by ubd_init, which is an initcall. */
static int thread_fd = -1;
-static LIST_HEAD(restart);
/* Function to read several request pointers at a time
* handling fractional reads if (and as) needed
@@ -498,9 +491,6 @@ static int bulk_req_safe_read(
/* Called without dev->lock held, and only in interrupt context. */
static void ubd_handler(void)
{
- struct ubd *ubd;
- struct list_head *list, *next_ele;
- unsigned long flags;
int n;
int count;
@@ -520,23 +510,17 @@ static void ubd_handler(void)
return;
}
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
- blk_end_request(
- (*irq_req_buffer)[count]->req,
- BLK_STS_OK,
- (*irq_req_buffer)[count]->length
- );
- kfree((*irq_req_buffer)[count]);
+ struct io_thread_req *io_req = (*irq_req_buffer)[count];
+ int err = io_req->error ? BLK_STS_IOERR : BLK_STS_OK;
+
+ if (!blk_update_request(io_req->req, err, io_req->length))
+ __blk_mq_end_request(io_req->req, err);
+
+ kfree(io_req);
}
}
- reactivate_fd(thread_fd, UBD_IRQ);
- list_for_each_safe(list, next_ele, &restart){
- ubd = container_of(list, struct ubd, restart);
- list_del_init(&ubd->restart);
- spin_lock_irqsave(&ubd->lock, flags);
- do_ubd_request(ubd->queue);
- spin_unlock_irqrestore(&ubd->lock, flags);
- }
+ reactivate_fd(thread_fd, UBD_IRQ);
}
static irqreturn_t ubd_intr(int irq, void *dev)
@@ -857,6 +841,7 @@ static void ubd_device_release(struct device *dev)
struct ubd *ubd_dev = dev_get_drvdata(dev);
blk_cleanup_queue(ubd_dev->queue);
+ blk_mq_free_tag_set(&ubd_dev->tag_set);
*ubd_dev = ((struct ubd) DEFAULT_UBD);
}
@@ -891,7 +876,7 @@ static int ubd_disk_register(int major, u64 size, int unit,
disk->private_data = &ubd_devs[unit];
disk->queue = ubd_devs[unit].queue;
- device_add_disk(parent, disk);
+ device_add_disk(parent, disk, NULL);
*disk_out = disk;
return 0;
@@ -899,6 +884,10 @@ static int ubd_disk_register(int major, u64 size, int unit,
#define ROUND_BLOCK(n) ((n + ((1 << 9) - 1)) & (-1 << 9))
+static const struct blk_mq_ops ubd_mq_ops = {
+ .queue_rq = ubd_queue_rq,
+};
+
static int ubd_add(int n, char **error_out)
{
struct ubd *ubd_dev = &ubd_devs[n];
@@ -915,15 +904,23 @@ static int ubd_add(int n, char **error_out)
ubd_dev->size = ROUND_BLOCK(ubd_dev->size);
- INIT_LIST_HEAD(&ubd_dev->restart);
- sg_init_table(ubd_dev->sg, MAX_SG);
+ ubd_dev->tag_set.ops = &ubd_mq_ops;
+ ubd_dev->tag_set.queue_depth = 64;
+ ubd_dev->tag_set.numa_node = NUMA_NO_NODE;
+ ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
+ ubd_dev->tag_set.driver_data = ubd_dev;
+ ubd_dev->tag_set.nr_hw_queues = 1;
- err = -ENOMEM;
- ubd_dev->queue = blk_init_queue(do_ubd_request, &ubd_dev->lock);
- if (ubd_dev->queue == NULL) {
- *error_out = "Failed to initialize device queue";
+ err = blk_mq_alloc_tag_set(&ubd_dev->tag_set);
+ if (err)
goto out;
+
+ ubd_dev->queue = blk_mq_init_queue(&ubd_dev->tag_set);
+ if (IS_ERR(ubd_dev->queue)) {
+ err = PTR_ERR(ubd_dev->queue);
+ goto out_cleanup;
}
+
ubd_dev->queue->queuedata = ubd_dev;
blk_queue_write_cache(ubd_dev->queue, true, false);
@@ -931,7 +928,7 @@ static int ubd_add(int n, char **error_out)
err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]);
if(err){
*error_out = "Failed to register device";
- goto out_cleanup;
+ goto out_cleanup_tags;
}
if (fake_major != UBD_MAJOR)
@@ -949,6 +946,8 @@ static int ubd_add(int n, char **error_out)
out:
return err;
+out_cleanup_tags:
+ blk_mq_free_tag_set(&ubd_dev->tag_set);
out_cleanup:
blk_cleanup_queue(ubd_dev->queue);
goto out;
@@ -1290,123 +1289,82 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
req->bitmap_words, bitmap_len);
}
-/* Called with dev->lock held */
-static void prepare_request(struct request *req, struct io_thread_req *io_req,
- unsigned long long offset, int page_offset,
- int len, struct page *page)
+static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
+ u64 off, struct bio_vec *bvec)
{
- struct gendisk *disk = req->rq_disk;
- struct ubd *ubd_dev = disk->private_data;
-
- io_req->req = req;
- io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
- ubd_dev->fd;
- io_req->fds[1] = ubd_dev->fd;
- io_req->cow_offset = -1;
- io_req->offset = offset;
- io_req->length = len;
- io_req->error = 0;
- io_req->sector_mask = 0;
-
- io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE;
- io_req->offsets[0] = 0;
- io_req->offsets[1] = ubd_dev->cow.data_offset;
- io_req->buffer = page_address(page) + page_offset;
- io_req->sectorsize = 1 << 9;
-
- if(ubd_dev->cow.file != NULL)
- cowify_req(io_req, ubd_dev->cow.bitmap,
- ubd_dev->cow.bitmap_offset, ubd_dev->cow.bitmap_len);
-
-}
+ struct ubd *dev = hctx->queue->queuedata;
+ struct io_thread_req *io_req;
+ int ret;
-/* Called with dev->lock held */
-static void prepare_flush_request(struct request *req,
- struct io_thread_req *io_req)
-{
- struct gendisk *disk = req->rq_disk;
- struct ubd *ubd_dev = disk->private_data;
+ io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC);
+ if (!io_req)
+ return -ENOMEM;
io_req->req = req;
- io_req->fds[0] = (ubd_dev->cow.file != NULL) ? ubd_dev->cow.fd :
- ubd_dev->fd;
- io_req->op = UBD_FLUSH;
-}
+ if (dev->cow.file)
+ io_req->fds[0] = dev->cow.fd;
+ else
+ io_req->fds[0] = dev->fd;
-static bool submit_request(struct io_thread_req *io_req, struct ubd *dev)
-{
- int n = os_write_file(thread_fd, &io_req,
- sizeof(io_req));
- if (n != sizeof(io_req)) {
- if (n != -EAGAIN)
- printk("write to io thread failed, "
- "errno = %d\n", -n);
- else if (list_empty(&dev->restart))
- list_add(&dev->restart, &restart);
+ if (req_op(req) == REQ_OP_FLUSH) {
+ io_req->op = UBD_FLUSH;
+ } else {
+ io_req->fds[1] = dev->fd;
+ io_req->cow_offset = -1;
+ io_req->offset = off;
+ io_req->length = bvec->bv_len;
+ io_req->error = 0;
+ io_req->sector_mask = 0;
+
+ io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
+ io_req->offsets[0] = 0;
+ io_req->offsets[1] = dev->cow.data_offset;
+ io_req->buffer = page_address(bvec->bv_page) + bvec->bv_offset;
+ io_req->sectorsize = 1 << 9;
+
+ if (dev->cow.file) {
+ cowify_req(io_req, dev->cow.bitmap,
+ dev->cow.bitmap_offset, dev->cow.bitmap_len);
+ }
+ }
+ ret = os_write_file(thread_fd, &io_req, sizeof(io_req));
+ if (ret != sizeof(io_req)) {
+ if (ret != -EAGAIN)
+ pr_err("write to io thread failed: %d\n", -ret);
kfree(io_req);
- return false;
}
- return true;
+
+ return ret;
}
-/* Called with dev->lock held */
-static void do_ubd_request(struct request_queue *q)
+static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct io_thread_req *io_req;
- struct request *req;
-
- while(1){
- struct ubd *dev = q->queuedata;
- if(dev->request == NULL){
- struct request *req = blk_fetch_request(q);
- if(req == NULL)
- return;
-
- dev->request = req;
- dev->rq_pos = blk_rq_pos(req);
- dev->start_sg = 0;
- dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
- }
-
- req = dev->request;
+ struct request *req = bd->rq;
+ int ret = 0;
- if (req_op(req) == REQ_OP_FLUSH) {
- io_req = kmalloc(sizeof(struct io_thread_req),
- GFP_ATOMIC);
- if (io_req == NULL) {
- if (list_empty(&dev->restart))
- list_add(&dev->restart, &restart);
- return;
- }
- prepare_flush_request(req, io_req);
- if (submit_request(io_req, dev) == false)
- return;
- }
+ blk_mq_start_request(req);
- while(dev->start_sg < dev->end_sg){
- struct scatterlist *sg = &dev->sg[dev->start_sg];
-
- io_req = kmalloc(sizeof(struct io_thread_req),
- GFP_ATOMIC);
- if(io_req == NULL){
- if(list_empty(&dev->restart))
- list_add(&dev->restart, &restart);
- return;
- }
- prepare_request(req, io_req,
- (unsigned long long)dev->rq_pos << 9,
- sg->offset, sg->length, sg_page(sg));
-
- if (submit_request(io_req, dev) == false)
- return;
-
- dev->rq_pos += sg->length >> 9;
- dev->start_sg++;
+ if (req_op(req) == REQ_OP_FLUSH) {
+ ret = ubd_queue_one_vec(hctx, req, 0, NULL);
+ } else {
+ struct req_iterator iter;
+ struct bio_vec bvec;
+ u64 off = (u64)blk_rq_pos(req) << 9;
+
+ rq_for_each_segment(bvec, req, iter) {
+ ret = ubd_queue_one_vec(hctx, req, off, &bvec);
+ if (ret < 0)
+ goto out;
+ off += bvec.bv_len;
}
- dev->end_sg = 0;
- dev->request = NULL;
}
+out:
+ if (ret < 0) {
+ blk_mq_requeue_request(req, true);
+ }
+ return BLK_STS_OK;
}
static int ubd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 28764dacf018..466f66c8a7f8 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -37,6 +37,7 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += -Wno-pointer-sign
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index eaa843a52907..a480356e0ed8 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
push %ebx
push %ecx
push %edx
- push %edi
-
- /*
- * RIP-relative addressing is needed to access the encryption bit
- * variable. Since we are running in 32-bit mode we need this call/pop
- * sequence to get the proper relative addressing.
- */
- call 1f
-1: popl %edi
- subl $1b, %edi
-
- movl enc_bit(%edi), %eax
- cmpl $0, %eax
- jge .Lsev_exit
/* Check if running under a hypervisor */
movl $1, %eax
@@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
movl %ebx, %eax
andl $0x3f, %eax /* Return the encryption bit location */
- movl %eax, enc_bit(%edi)
jmp .Lsev_exit
.Lno_sev:
xor %eax, %eax
- movl %eax, enc_bit(%edi)
.Lsev_exit:
- pop %edi
pop %edx
pop %ecx
pop %ebx
@@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
ENDPROC(set_sev_encryption_mask)
.data
-enc_bit:
- .int 0xffffffff
#ifdef CONFIG_AMD_MEM_ENCRYPT
.balign 8
diff --git a/arch/x86/crypto/aegis128-aesni-glue.c b/arch/x86/crypto/aegis128-aesni-glue.c
index acd11b3bf639..2a356b948720 100644
--- a/arch/x86/crypto/aegis128-aesni-glue.c
+++ b/arch/x86/crypto/aegis128-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis128_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/aegis128l-aesni-glue.c b/arch/x86/crypto/aegis128l-aesni-glue.c
index 2071c3d1ae07..dbe8bb980da1 100644
--- a/arch/x86/crypto/aegis128l-aesni-glue.c
+++ b/arch/x86/crypto/aegis128l-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis128l_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/aegis256-aesni-glue.c b/arch/x86/crypto/aegis256-aesni-glue.c
index b5f2a8fd5a71..8bebda2de92f 100644
--- a/arch/x86/crypto/aegis256-aesni-glue.c
+++ b/arch/x86/crypto/aegis256-aesni-glue.c
@@ -379,7 +379,6 @@ static int __init crypto_aegis256_aesni_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
!boot_cpu_has(X86_FEATURE_AES) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/morus1280-sse2-glue.c b/arch/x86/crypto/morus1280-sse2-glue.c
index 95cf857d2cbb..f40244eaf14d 100644
--- a/arch/x86/crypto/morus1280-sse2-glue.c
+++ b/arch/x86/crypto/morus1280-sse2-glue.c
@@ -40,7 +40,6 @@ MORUS1280_DECLARE_ALGS(sse2, "morus1280-sse2", 350);
static int __init crypto_morus1280_sse2_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/crypto/morus640-sse2-glue.c b/arch/x86/crypto/morus640-sse2-glue.c
index 615fb7bc9a32..9afaf8f8565a 100644
--- a/arch/x86/crypto/morus640-sse2-glue.c
+++ b/arch/x86/crypto/morus640-sse2-glue.c
@@ -40,7 +40,6 @@ MORUS640_DECLARE_ALGS(sse2, "morus640-sse2", 400);
static int __init crypto_morus640_sse2_module_init(void)
{
if (!boot_cpu_has(X86_FEATURE_XMM2) ||
- !boot_cpu_has(X86_FEATURE_OSXSAVE) ||
!cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL))
return -ENODEV;
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 2767c625a52c..fbbf1ba57ec6 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -389,6 +389,13 @@
* that register for the time this macro runs
*/
+ /*
+ * The high bits of the CS dword (__csh) are used for
+ * CS_FROM_ENTRY_STACK and CS_FROM_USER_CR3. Clear them in case
+ * hardware didn't do this for us.
+ */
+ andl $(0x0000ffff), PT_CS(%esp)
+
/* Are we on the entry stack? Bail out if not! */
movl PER_CPU_VAR(cpu_entry_area), %ecx
addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
@@ -407,12 +414,6 @@
/* Load top of task-stack into %edi */
movl TSS_entry2task_stack(%edi), %edi
- /*
- * Clear unused upper bits of the dword containing the word-sized CS
- * slot in pt_regs in case hardware didn't clear it for us.
- */
- andl $(0x0000ffff), PT_CS(%esp)
-
/* Special case - entry from kernel mode via entry stack */
#ifdef CONFIG_VM86
movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 957dfb693ecc..f95dcb209fdf 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1187,6 +1187,16 @@ ENTRY(paranoid_entry)
xorl %ebx, %ebx
1:
+ /*
+ * Always stash CR3 in %r14. This value will be restored,
+ * verbatim, at exit. Needed if paranoid_entry interrupted
+ * another entry that already switched to the user CR3 value
+ * but has not yet returned to userspace.
+ *
+ * This is also why CS (stashed in the "iret frame" by the
+ * hardware at entry) can not be used: this may be a return
+ * to kernel code, but with a user CR3 value.
+ */
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
ret
@@ -1211,11 +1221,13 @@ ENTRY(paranoid_exit)
testl %ebx, %ebx /* swapgs needed? */
jnz .Lparanoid_exit_no_swapgs
TRACE_IRQS_IRETQ
+ /* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
SWAPGS_UNSAFE_STACK
jmp .Lparanoid_exit_restore
.Lparanoid_exit_no_swapgs:
TRACE_IRQS_IRETQ_DEBUG
+ /* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%rbx save_reg=%r14
.Lparanoid_exit_restore:
jmp restore_regs_and_return_to_kernel
@@ -1626,6 +1638,7 @@ end_repeat_nmi:
movq $-1, %rsi
call do_nmi
+ /* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
testl %ebx, %ebx /* swapgs needed? */
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
index fa3f439f0a92..141d415a8c80 100644
--- a/arch/x86/entry/vdso/Makefile
+++ b/arch/x86/entry/vdso/Makefile
@@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-fno-omit-frame-pointer -foptimize-sibling-calls \
- -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
+ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+ CFL += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
@@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+
+ifdef CONFIG_RETPOLINE
+ifneq ($(RETPOLINE_VDSO_CFLAGS),)
+ KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
+endif
+endif
+
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
$(obj)/vdso32.so.dbg: FORCE \
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index f19856d95c60..e48ca3afa091 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -43,8 +43,9 @@ extern u8 hvclock_page
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+ asm ("syscall" : "=a" (ret), "=m" (*ts) :
+ "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+ "memory", "rcx", "r11");
return ret;
}
@@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+ asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+ "memory", "rcx", "r11");
return ret;
}
@@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
- asm(
+ asm (
"mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
+ "mov %[clock], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+ : "=a" (ret), "=m" (*ts)
+ : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
: "memory", "edx");
return ret;
}
@@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{
long ret;
- asm(
+ asm (
"mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
+ "mov %[tv], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+ : "=a" (ret), "=m" (*tv), "=m" (*tz)
+ : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
: "memory", "edx");
return ret;
}
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 981ba5e8241b..8671de126eac 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -36,6 +36,7 @@
static int num_counters_llc;
static int num_counters_nb;
+static bool l3_mask;
static HLIST_HEAD(uncore_unused_list);
@@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
hwc->idx = -1;
+ /*
+ * SliceMask and ThreadMask need to be set for certain L3 events in
+ * Family 17h. For other events, the two fields do not affect the count.
+ */
+ if (l3_mask)
+ hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
+
if (event->cpu < 0)
return -EINVAL;
@@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
amd_llc_pmu.name = "amd_l3";
format_attr_event_df.show = &event_show_df;
format_attr_event_l3.show = &event_show_l3;
+ l3_mask = true;
} else {
num_counters_nb = NUM_COUNTERS_NB;
num_counters_llc = NUM_COUNTERS_L2;
@@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
amd_llc_pmu.name = "amd_l2";
format_attr_event_df = format_attr_event;
format_attr_event_l3 = format_attr_event;
+ l3_mask = false;
}
amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index f3e006bed9a7..c88ed39582a1 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -1272,4 +1272,8 @@ void intel_pmu_lbr_init_knl(void)
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = snb_lbr_sel_map;
+
+ /* Knights Landing does have MISPREDICT bit */
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
+ x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
}
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 51d7c117e3c7..c07bee31abe8 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
void bdx_uncore_cpu_init(void)
{
- int pkg = topology_phys_to_logical_pkg(0);
+ int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
@@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
},
{ /* M3UPI0 Link 0 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
},
{ /* M3UPI0 Link 1 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
},
{ /* M3UPI1 Link 2 */
- PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
- .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
+ .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
},
{ /* end: all zeroes */ }
};
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index 5b0f613428c2..2c43e3055948 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -95,8 +95,8 @@ static void hv_apic_eoi_write(u32 reg, u32 val)
*/
static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
{
- struct ipi_arg_ex **arg;
- struct ipi_arg_ex *ipi_arg;
+ struct hv_send_ipi_ex **arg;
+ struct hv_send_ipi_ex *ipi_arg;
unsigned long flags;
int nr_bank = 0;
int ret = 1;
@@ -105,7 +105,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
return false;
local_irq_save(flags);
- arg = (struct ipi_arg_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
+ arg = (struct hv_send_ipi_ex **)this_cpu_ptr(hyperv_pcpu_input_arg);
ipi_arg = *arg;
if (unlikely(!ipi_arg))
@@ -135,7 +135,7 @@ ipi_mask_ex_done:
static bool __send_ipi_mask(const struct cpumask *mask, int vector)
{
int cur_cpu, vcpu;
- struct ipi_arg_non_ex ipi_arg;
+ struct hv_send_ipi ipi_arg;
int ret = 1;
trace_hyperv_send_ipi_mask(mask, vector);
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index b143717b92b3..ce84388e540c 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -80,11 +80,11 @@ static __always_inline void arch_atomic_sub(int i, atomic_t *v)
* true if the result is zero, or false for all
* other cases.
*/
-#define arch_atomic_sub_and_test arch_atomic_sub_and_test
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
}
+#define arch_atomic_sub_and_test arch_atomic_sub_and_test
/**
* arch_atomic_inc - increment atomic variable
@@ -92,12 +92,12 @@ static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
*
* Atomically increments @v by 1.
*/
-#define arch_atomic_inc arch_atomic_inc
static __always_inline void arch_atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
}
+#define arch_atomic_inc arch_atomic_inc
/**
* arch_atomic_dec - decrement atomic variable
@@ -105,12 +105,12 @@ static __always_inline void arch_atomic_inc(atomic_t *v)
*
* Atomically decrements @v by 1.
*/
-#define arch_atomic_dec arch_atomic_dec
static __always_inline void arch_atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
}
+#define arch_atomic_dec arch_atomic_dec
/**
* arch_atomic_dec_and_test - decrement and test
@@ -120,11 +120,11 @@ static __always_inline void arch_atomic_dec(atomic_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-#define arch_atomic_dec_and_test arch_atomic_dec_and_test
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
}
+#define arch_atomic_dec_and_test arch_atomic_dec_and_test
/**
* arch_atomic_inc_and_test - increment and test
@@ -134,11 +134,11 @@ static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-#define arch_atomic_inc_and_test arch_atomic_inc_and_test
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
}
+#define arch_atomic_inc_and_test arch_atomic_inc_and_test
/**
* arch_atomic_add_negative - add and test if negative
@@ -149,11 +149,11 @@ static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-#define arch_atomic_add_negative arch_atomic_add_negative
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
}
+#define arch_atomic_add_negative arch_atomic_add_negative
/**
* arch_atomic_add_return - add integer and return
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index ef959f02d070..6a5b0ec460da 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -205,12 +205,12 @@ static inline long long arch_atomic64_sub(long long i, atomic64_t *v)
*
* Atomically increments @v by 1.
*/
-#define arch_atomic64_inc arch_atomic64_inc
static inline void arch_atomic64_inc(atomic64_t *v)
{
__alternative_atomic64(inc, inc_return, /* no output */,
"S" (v) : "memory", "eax", "ecx", "edx");
}
+#define arch_atomic64_inc arch_atomic64_inc
/**
* arch_atomic64_dec - decrement atomic64 variable
@@ -218,12 +218,12 @@ static inline void arch_atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
-#define arch_atomic64_dec arch_atomic64_dec
static inline void arch_atomic64_dec(atomic64_t *v)
{
__alternative_atomic64(dec, dec_return, /* no output */,
"S" (v) : "memory", "eax", "ecx", "edx");
}
+#define arch_atomic64_dec arch_atomic64_dec
/**
* arch_atomic64_add_unless - add unless the number is a given value
@@ -245,7 +245,6 @@ static inline int arch_atomic64_add_unless(atomic64_t *v, long long a,
return (int)a;
}
-#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
{
int r;
@@ -253,8 +252,8 @@ static inline int arch_atomic64_inc_not_zero(atomic64_t *v)
"S" (v) : "ecx", "edx", "memory");
return r;
}
+#define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
-#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
{
long long r;
@@ -262,6 +261,7 @@ static inline long long arch_atomic64_dec_if_positive(atomic64_t *v)
"S" (v) : "ecx", "memory");
return r;
}
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#undef alternative_atomic64
#undef __alternative_atomic64
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 4343d9b4f30e..5f851d92eecd 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -71,11 +71,11 @@ static inline void arch_atomic64_sub(long i, atomic64_t *v)
* true if the result is zero, or false for all
* other cases.
*/
-#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
}
+#define arch_atomic64_sub_and_test arch_atomic64_sub_and_test
/**
* arch_atomic64_inc - increment atomic64 variable
@@ -83,13 +83,13 @@ static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
*
* Atomically increments @v by 1.
*/
-#define arch_atomic64_inc arch_atomic64_inc
static __always_inline void arch_atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter)
: "m" (v->counter));
}
+#define arch_atomic64_inc arch_atomic64_inc
/**
* arch_atomic64_dec - decrement atomic64 variable
@@ -97,13 +97,13 @@ static __always_inline void arch_atomic64_inc(atomic64_t *v)
*
* Atomically decrements @v by 1.
*/
-#define arch_atomic64_dec arch_atomic64_dec
static __always_inline void arch_atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0"
: "=m" (v->counter)
: "m" (v->counter));
}
+#define arch_atomic64_dec arch_atomic64_dec
/**
* arch_atomic64_dec_and_test - decrement and test
@@ -113,11 +113,11 @@ static __always_inline void arch_atomic64_dec(atomic64_t *v)
* returns true if the result is 0, or false for all other
* cases.
*/
-#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
}
+#define arch_atomic64_dec_and_test arch_atomic64_dec_and_test
/**
* arch_atomic64_inc_and_test - increment and test
@@ -127,11 +127,11 @@ static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
* and returns true if the result is zero, or false for all
* other cases.
*/
-#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
}
+#define arch_atomic64_inc_and_test arch_atomic64_inc_and_test
/**
* arch_atomic64_add_negative - add and test if negative
@@ -142,11 +142,11 @@ static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-#define arch_atomic64_add_negative arch_atomic64_add_negative
static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
}
+#define arch_atomic64_add_negative arch_atomic64_add_negative
/**
* arch_atomic64_add_return - add and return
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index e203169931c7..6390bd8c141b 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -14,6 +14,16 @@
#ifndef _ASM_X86_FIXMAP_H
#define _ASM_X86_FIXMAP_H
+/*
+ * Exposed to assembly code for setting up initial page tables. Cannot be
+ * calculated in assembly code (fixmap entries are an enum), but is sanity
+ * checked in the actual fixmap C code to make sure that the fixmap is
+ * covered fully.
+ */
+#define FIXMAP_PMD_NUM 2
+/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
+#define FIXMAP_PMD_TOP 507
+
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/acpi.h>
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index a38bf5a1e37a..69dcdf195b61 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -528,7 +528,7 @@ static inline void fpregs_activate(struct fpu *fpu)
static inline void
switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
- if (old_fpu->initialized) {
+ if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index e977b6b3a538..00e01d215f74 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -726,19 +726,21 @@ struct hv_enlightened_vmcs {
#define HV_STIMER_AUTOENABLE (1ULL << 3)
#define HV_STIMER_SINT(config) (__u8)(((config) >> 16) & 0x0F)
-struct ipi_arg_non_ex {
- u32 vector;
- u32 reserved;
- u64 cpu_mask;
-};
-
struct hv_vpset {
u64 format;
u64 valid_bank_mask;
u64 bank_contents[];
};
-struct ipi_arg_ex {
+/* HvCallSendSyntheticClusterIpi hypercall */
+struct hv_send_ipi {
+ u32 vector;
+ u32 reserved;
+ u64 cpu_mask;
+};
+
+/* HvCallSendSyntheticClusterIpiEx hypercall */
+struct hv_send_ipi_ex {
u32 vector;
u32 reserved;
struct hv_vpset vp_set;
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 6de64840dd22..9a92a3ac2ac5 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -369,18 +369,6 @@ extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
extern bool is_early_ioremap_ptep(pte_t *ptep);
-#ifdef CONFIG_XEN
-#include <xen/xen.h>
-struct bio_vec;
-
-extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
-
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
- (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
-#endif /* CONFIG_XEN */
-
#define IO_SPACE_LIMIT 0xffff
#include <asm-generic/io.h>
diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
index 395c9631e000..75f1e35e7c15 100644
--- a/arch/x86/include/asm/kdebug.h
+++ b/arch/x86/include/asm/kdebug.h
@@ -22,10 +22,20 @@ enum die_val {
DIE_NMIUNKNOWN,
};
+enum show_regs_mode {
+ SHOW_REGS_SHORT,
+ /*
+ * For when userspace crashed, but we don't think it's our fault, and
+ * therefore don't print kernel registers.
+ */
+ SHOW_REGS_USER,
+ SHOW_REGS_ALL
+};
+
extern void die(const char *, struct pt_regs *,long);
extern int __must_check __die(const char *, struct pt_regs *, long);
extern void show_stack_regs(struct pt_regs *regs);
-extern void __show_regs(struct pt_regs *regs, int all);
+extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
extern void show_iret_regs(struct pt_regs *regs);
extern unsigned long oops_begin(void);
extern void oops_end(unsigned long, struct pt_regs *, int signr);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 00ddb0c9e612..09b2e3e2cf1b 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -869,6 +869,8 @@ struct kvm_arch {
bool x2apic_format;
bool x2apic_broadcast_quirk_disabled;
+
+ bool guest_can_read_msr_platform_info;
};
struct kvm_vm_stat {
@@ -1022,6 +1024,7 @@ struct kvm_x86_ops {
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
+ bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
@@ -1055,6 +1058,7 @@ struct kvm_x86_ops {
bool (*umip_emulated)(void);
int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
+ void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
@@ -1237,19 +1241,12 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
-#define EMULTYPE_RETRY (1 << 3)
-#define EMULTYPE_NO_REEXECUTE (1 << 4)
-#define EMULTYPE_NO_UD_ON_FAIL (1 << 5)
-#define EMULTYPE_VMWARE (1 << 6)
-int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
- int emulation_type, void *insn, int insn_len);
-
-static inline int emulate_instruction(struct kvm_vcpu *vcpu,
- int emulation_type)
-{
- return x86_emulate_instruction(vcpu, 0,
- emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
-}
+#define EMULTYPE_ALLOW_RETRY (1 << 3)
+#define EMULTYPE_NO_UD_ON_FAIL (1 << 4)
+#define EMULTYPE_VMWARE (1 << 5)
+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+ void *insn, int insn_len);
void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
@@ -1450,7 +1447,6 @@ asmlinkage void kvm_spurious_fault(void);
____kvm_handle_fault_on_reboot(insn, "")
#define KVM_ARCH_WANT_MMU_NOTIFIER
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
@@ -1463,7 +1459,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
- unsigned long ipi_bitmap_high, int min,
+ unsigned long ipi_bitmap_high, u32 min,
unsigned long icr, int op_64_bit);
u64 kvm_get_arch_capabilities(void);
@@ -1490,6 +1486,7 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
+void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
int kvm_is_in_guest(void);
diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
index c0643831706e..616f8e637bc3 100644
--- a/arch/x86/include/asm/mem_encrypt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -48,10 +48,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);
+void __init mem_encrypt_free_decrypted_mem(void);
bool sme_active(void);
bool sev_active(void);
+#define __bss_decrypted __attribute__((__section__(".bss..decrypted")))
+
#else /* !CONFIG_AMD_MEM_ENCRYPT */
#define sme_me_mask 0ULL
@@ -77,6 +80,8 @@ early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0;
static inline int __init
early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+#define __bss_decrypted
+
#endif /* CONFIG_AMD_MEM_ENCRYPT */
/*
@@ -88,6 +93,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
#define __sme_pa(x) (__pa(x) | sme_me_mask)
#define __sme_pa_nodebug(x) (__pa_nodebug(x) | sme_me_mask)
+extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
+
#endif /* __ASSEMBLY__ */
#endif /* __X86_MEM_ENCRYPT_H__ */
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index e9202a0de8f0..1a19d11cfbbd 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -185,22 +185,22 @@ do { \
typeof(var) pfo_ret__; \
switch (sizeof(var)) { \
case 1: \
- asm(op "b "__percpu_arg(1)",%0" \
+ asm volatile(op "b "__percpu_arg(1)",%0"\
: "=q" (pfo_ret__) \
: "m" (var)); \
break; \
case 2: \
- asm(op "w "__percpu_arg(1)",%0" \
+ asm volatile(op "w "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
case 4: \
- asm(op "l "__percpu_arg(1)",%0" \
+ asm volatile(op "l "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
case 8: \
- asm(op "q "__percpu_arg(1)",%0" \
+ asm volatile(op "q "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 12f54082f4c8..78241b736f2a 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -46,6 +46,14 @@
#define INTEL_ARCH_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
+#define AMD64_L3_SLICE_SHIFT 48
+#define AMD64_L3_SLICE_MASK \
+ ((0xFULL) << AMD64_L3_SLICE_SHIFT)
+
+#define AMD64_L3_THREAD_SHIFT 56
+#define AMD64_L3_THREAD_MASK \
+ ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
+
#define X86_RAW_EVENT_MASK \
(ARCH_PERFMON_EVENTSEL_EVENT | \
ARCH_PERFMON_EVENTSEL_UMASK | \
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 24c6cf5f16b7..60d0f9015317 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -19,9 +19,6 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
- pmd.pud.p4d.pgd = pti_set_user_pgtbl(&pmdp->pud.p4d.pgd, pmd.pud.p4d.pgd);
-#endif
*pmdp = pmd;
}
@@ -61,9 +58,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#ifdef CONFIG_SMP
static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
{
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
- pti_set_user_pgtbl(&xp->pud.p4d.pgd, __pgd(0));
-#endif
return __pmd(xchg((pmdval_t *)xp, 0));
}
#else
@@ -73,9 +67,6 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#ifdef CONFIG_SMP
static inline pud_t native_pudp_get_and_clear(pud_t *xp)
{
-#ifdef CONFIG_PAGE_TABLE_ISOLATION
- pti_set_user_pgtbl(&xp->p4d.pgd, __pgd(0));
-#endif
return __pud(xchg((pudval_t *)xp, 0));
}
#else
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index e4ffa565a69f..690c0307afed 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -1195,7 +1195,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
return xchg(pmdp, pmd);
} else {
pmd_t old = *pmdp;
- *pmdp = pmd;
+ WRITE_ONCE(*pmdp, pmd);
return old;
}
}
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index f773d5e6c8cc..9c85b54bf03c 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -14,6 +14,7 @@
#include <asm/processor.h>
#include <linux/bitops.h>
#include <linux/threads.h>
+#include <asm/fixmap.h>
extern p4d_t level4_kernel_pgt[512];
extern p4d_t level4_ident_pgt[512];
@@ -22,7 +23,7 @@ extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512];
-extern pte_t level1_fixmap_pgt[512];
+extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
extern pgd_t init_top_pgt[];
#define swapper_pg_dir init_top_pgt
@@ -55,15 +56,15 @@ struct mm_struct;
void set_pte_vaddr_p4d(p4d_t *p4d_page, unsigned long vaddr, pte_t new_pte);
void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
-static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+static inline void native_set_pte(pte_t *ptep, pte_t pte)
{
- *ptep = native_make_pte(0);
+ WRITE_ONCE(*ptep, pte);
}
-static inline void native_set_pte(pte_t *ptep, pte_t pte)
+static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
- *ptep = pte;
+ native_set_pte(ptep, native_make_pte(0));
}
static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
@@ -73,7 +74,7 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
- *pmdp = pmd;
+ WRITE_ONCE(*pmdp, pmd);
}
static inline void native_pmd_clear(pmd_t *pmd)
@@ -109,7 +110,7 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
static inline void native_set_pud(pud_t *pudp, pud_t pud)
{
- *pudp = pud;
+ WRITE_ONCE(*pudp, pud);
}
static inline void native_pud_clear(pud_t *pud)
@@ -137,13 +138,13 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
pgd_t pgd;
if (pgtable_l5_enabled() || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) {
- *p4dp = p4d;
+ WRITE_ONCE(*p4dp, p4d);
return;
}
pgd = native_make_pgd(native_p4d_val(p4d));
pgd = pti_set_user_pgtbl((pgd_t *)p4dp, pgd);
- *p4dp = native_make_p4d(native_pgd_val(pgd));
+ WRITE_ONCE(*p4dp, native_make_p4d(native_pgd_val(pgd)));
}
static inline void native_p4d_clear(p4d_t *p4d)
@@ -153,7 +154,7 @@ static inline void native_p4d_clear(p4d_t *p4d)
static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
- *pgdp = pti_set_user_pgtbl(pgdp, pgd);
+ WRITE_ONCE(*pgdp, pti_set_user_pgtbl(pgdp, pgd));
}
static inline void native_pgd_clear(pgd_t *pgd)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index b64acb08a62b..106b7d0e2dae 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -124,7 +124,7 @@
*/
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
/*
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index a80c0673798f..e60c45fd3679 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -10,8 +10,13 @@ struct cpumask;
struct mm_struct;
#ifdef CONFIG_X86_UV
+#include <linux/efi.h>
extern enum uv_system_type get_uv_system_type(void);
+static inline bool is_early_uv_system(void)
+{
+ return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
+}
extern int is_uv_system(void);
extern int is_uv_hubless(void);
extern void uv_cpu_init(void);
@@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
#else /* X86_UV */
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
+static inline bool is_early_uv_system(void) { return 0; }
static inline int is_uv_system(void) { return 0; }
static inline int is_uv_hubless(void) { return 0; }
static inline void uv_cpu_init(void) { }
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index d383140e1dc8..068d9b067c83 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -2,6 +2,8 @@
#ifndef _ASM_X86_XEN_EVENTS_H
#define _ASM_X86_XEN_EVENTS_H
+#include <xen/xen.h>
+
enum ipi_vector {
XEN_RESCHEDULE_VECTOR,
XEN_CALL_FUNCTION_VECTOR,
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 86299efa804a..fd23d5778ea1 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -377,6 +377,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 9f148e3d45b4..7654febd5102 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -413,7 +413,7 @@ static int activate_managed(struct irq_data *irqd)
if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
/* Something in the core code broke! Survive gracefully */
pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
- return EINVAL;
+ return -EINVAL;
}
ret = assign_managed_vector(irqd, vector_searchmask);
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index ec00d1ff5098..f7151cd03cb0 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -1640,6 +1640,7 @@ static int do_open(struct inode *inode, struct file *filp)
return 0;
}
+#ifdef CONFIG_PROC_FS
static int proc_apm_show(struct seq_file *m, void *v)
{
unsigned short bx;
@@ -1719,6 +1720,7 @@ static int proc_apm_show(struct seq_file *m, void *v)
units);
return 0;
}
+#endif
static int apm(void *unused)
{
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 22ab408177b2..eeea634bee0a 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
{
/* AMD errata T13 (order #21922) */
- if ((c->x86 == 6)) {
+ if (c->x86 == 6) {
/* Duron Rev A0 */
if (c->x86_model == 3 && c->x86_stepping == 0)
size = 64;
diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
index 4e588f36228f..3736f6dc9545 100644
--- a/arch/x86/kernel/cpu/intel_rdt.h
+++ b/arch/x86/kernel/cpu/intel_rdt.h
@@ -382,6 +382,11 @@ static inline bool is_mbm_event(int e)
e <= QOS_L3_MBM_LOCAL_EVENT_ID);
}
+struct rdt_parse_data {
+ struct rdtgroup *rdtgrp;
+ char *buf;
+};
+
/**
* struct rdt_resource - attributes of an RDT resource
* @rid: The index of the resource
@@ -423,16 +428,19 @@ struct rdt_resource {
struct rdt_cache cache;
struct rdt_membw membw;
const char *format_str;
- int (*parse_ctrlval) (void *data, struct rdt_resource *r,
- struct rdt_domain *d);
+ int (*parse_ctrlval)(struct rdt_parse_data *data,
+ struct rdt_resource *r,
+ struct rdt_domain *d);
struct list_head evt_list;
int num_rmid;
unsigned int mon_scale;
unsigned long fflags;
};
-int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d);
-int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d);
+int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);
+int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d);
extern struct mutex rdtgroup_mutex;
@@ -521,14 +529,14 @@ ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
int rdtgroup_schemata_show(struct kernfs_open_file *of,
struct seq_file *s, void *v);
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
- u32 _cbm, int closid, bool exclusive);
+ unsigned long cbm, int closid, bool exclusive);
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d,
- u32 cbm);
+ unsigned long cbm);
enum rdtgrp_mode rdtgroup_mode_by_closid(int closid);
int rdtgroup_tasks_assigned(struct rdtgroup *r);
int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp);
int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp);
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm);
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm);
bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d);
int rdt_pseudo_lock_init(void);
void rdt_pseudo_lock_release(void);
@@ -536,6 +544,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp);
void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp);
struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r);
int update_domains(struct rdt_resource *r, int closid);
+int closids_supported(void);
void closid_free(int closid);
int alloc_rmid(void);
void free_rmid(u32 rmid);
diff --git a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
index af358ca05160..0f53049719cd 100644
--- a/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
+++ b/arch/x86/kernel/cpu/intel_rdt_ctrlmondata.c
@@ -64,19 +64,19 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
return true;
}
-int parse_bw(void *_buf, struct rdt_resource *r, struct rdt_domain *d)
+int parse_bw(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
{
- unsigned long data;
- char *buf = _buf;
+ unsigned long bw_val;
if (d->have_new_ctrl) {
rdt_last_cmd_printf("duplicate domain %d\n", d->id);
return -EINVAL;
}
- if (!bw_validate(buf, &data, r))
+ if (!bw_validate(data->buf, &bw_val, r))
return -EINVAL;
- d->new_ctrl = data;
+ d->new_ctrl = bw_val;
d->have_new_ctrl = true;
return 0;
@@ -123,18 +123,13 @@ static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
return true;
}
-struct rdt_cbm_parse_data {
- struct rdtgroup *rdtgrp;
- char *buf;
-};
-
/*
* Read one cache bit mask (hex). Check that it is valid for the current
* resource type.
*/
-int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
+int parse_cbm(struct rdt_parse_data *data, struct rdt_resource *r,
+ struct rdt_domain *d)
{
- struct rdt_cbm_parse_data *data = _data;
struct rdtgroup *rdtgrp = data->rdtgrp;
u32 cbm_val;
@@ -195,11 +190,17 @@ int parse_cbm(void *_data, struct rdt_resource *r, struct rdt_domain *d)
static int parse_line(char *line, struct rdt_resource *r,
struct rdtgroup *rdtgrp)
{
- struct rdt_cbm_parse_data data;
+ struct rdt_parse_data data;
char *dom = NULL, *id;
struct rdt_domain *d;
unsigned long dom_id;
+ if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
+ r->rid == RDT_RESOURCE_MBA) {
+ rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
+ return -EINVAL;
+ }
+
next:
if (!line || line[0] == '\0')
return 0;
diff --git a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
index 40f3903ae5d9..f8c260d522ca 100644
--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
+++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c
@@ -797,25 +797,27 @@ int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp)
/**
* rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked
* @d: RDT domain
- * @_cbm: CBM to test
+ * @cbm: CBM to test
*
- * @d represents a cache instance and @_cbm a capacity bitmask that is
- * considered for it. Determine if @_cbm overlaps with any existing
+ * @d represents a cache instance and @cbm a capacity bitmask that is
+ * considered for it. Determine if @cbm overlaps with any existing
* pseudo-locked region on @d.
*
- * Return: true if @_cbm overlaps with pseudo-locked region on @d, false
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
+ * Return: true if @cbm overlaps with pseudo-locked region on @d, false
* otherwise.
*/
-bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, u32 _cbm)
+bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm)
{
- unsigned long *cbm = (unsigned long *)&_cbm;
- unsigned long *cbm_b;
unsigned int cbm_len;
+ unsigned long cbm_b;
if (d->plr) {
cbm_len = d->plr->r->cache.cbm_len;
- cbm_b = (unsigned long *)&d->plr->cbm;
- if (bitmap_intersects(cbm, cbm_b, cbm_len))
+ cbm_b = d->plr->cbm;
+ if (bitmap_intersects(&cbm, &cbm_b, cbm_len))
return true;
}
return false;
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
index b799c00bef09..b140c68bc14b 100644
--- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
+++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c
@@ -97,6 +97,12 @@ void rdt_last_cmd_printf(const char *fmt, ...)
* limited as the number of resources grows.
*/
static int closid_free_map;
+static int closid_free_map_len;
+
+int closids_supported(void)
+{
+ return closid_free_map_len;
+}
static void closid_init(void)
{
@@ -111,6 +117,7 @@ static void closid_init(void)
/* CLOSID 0 is always reserved for the default group */
closid_free_map &= ~1;
+ closid_free_map_len = rdt_min_closid;
}
static int closid_alloc(void)
@@ -802,7 +809,7 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
sw_shareable = 0;
exclusive = 0;
seq_printf(seq, "%d=", dom->id);
- for (i = 0; i < r->num_closid; i++, ctrl++) {
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
if (!closid_allocated(i))
continue;
mode = rdtgroup_mode_by_closid(i);
@@ -968,33 +975,34 @@ static int rdtgroup_mode_show(struct kernfs_open_file *of,
* is false then overlaps with any resource group or hardware entities
* will be considered.
*
+ * @cbm is unsigned long, even if only 32 bits are used, to make the
+ * bitmap functions work correctly.
+ *
* Return: false if CBM does not overlap, true if it does.
*/
bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
- u32 _cbm, int closid, bool exclusive)
+ unsigned long cbm, int closid, bool exclusive)
{
- unsigned long *cbm = (unsigned long *)&_cbm;
- unsigned long *ctrl_b;
enum rdtgrp_mode mode;
+ unsigned long ctrl_b;
u32 *ctrl;
int i;
/* Check for any overlap with regions used by hardware directly */
if (!exclusive) {
- if (bitmap_intersects(cbm,
- (unsigned long *)&r->cache.shareable_bits,
- r->cache.cbm_len))
+ ctrl_b = r->cache.shareable_bits;
+ if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
return true;
}
/* Check for overlap with other resource groups */
ctrl = d->ctrl_val;
- for (i = 0; i < r->num_closid; i++, ctrl++) {
- ctrl_b = (unsigned long *)ctrl;
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
+ ctrl_b = *ctrl;
mode = rdtgroup_mode_by_closid(i);
if (closid_allocated(i) && i != closid &&
mode != RDT_MODE_PSEUDO_LOCKSETUP) {
- if (bitmap_intersects(cbm, ctrl_b, r->cache.cbm_len)) {
+ if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
if (exclusive) {
if (mode == RDT_MODE_EXCLUSIVE)
return true;
@@ -1024,16 +1032,27 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
{
int closid = rdtgrp->closid;
struct rdt_resource *r;
+ bool has_cache = false;
struct rdt_domain *d;
for_each_alloc_enabled_rdt_resource(r) {
+ if (r->rid == RDT_RESOURCE_MBA)
+ continue;
+ has_cache = true;
list_for_each_entry(d, &r->domains, list) {
if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
- rdtgrp->closid, false))
+ rdtgrp->closid, false)) {
+ rdt_last_cmd_puts("schemata overlaps\n");
return false;
+ }
}
}
+ if (!has_cache) {
+ rdt_last_cmd_puts("cannot be exclusive without CAT/CDP\n");
+ return false;
+ }
+
return true;
}
@@ -1085,7 +1104,6 @@ static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
rdtgrp->mode = RDT_MODE_SHAREABLE;
} else if (!strcmp(buf, "exclusive")) {
if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
- rdt_last_cmd_printf("schemata overlaps\n");
ret = -EINVAL;
goto out;
}
@@ -1121,15 +1139,18 @@ out:
* computed by first dividing the total cache size by the CBM length to
* determine how many bytes each bit in the bitmask represents. The result
* is multiplied with the number of bits set in the bitmask.
+ *
+ * @cbm is unsigned long, even if only 32 bits are used to make the
+ * bitmap functions work correctly.
*/
unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
- struct rdt_domain *d, u32 cbm)
+ struct rdt_domain *d, unsigned long cbm)
{
struct cpu_cacheinfo *ci;
unsigned int size = 0;
int num_b, i;
- num_b = bitmap_weight((unsigned long *)&cbm, r->cache.cbm_len);
+ num_b = bitmap_weight(&cbm, r->cache.cbm_len);
ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
for (i = 0; i < ci->num_leaves; i++) {
if (ci->info_list[i].level == r->cache_level) {
@@ -1155,8 +1176,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
struct rdt_resource *r;
struct rdt_domain *d;
unsigned int size;
- bool sep = false;
- u32 cbm;
+ bool sep;
+ u32 ctrl;
rdtgrp = rdtgroup_kn_lock_live(of->kn);
if (!rdtgrp) {
@@ -1174,6 +1195,7 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
}
for_each_alloc_enabled_rdt_resource(r) {
+ sep = false;
seq_printf(s, "%*s:", max_name_width, r->name);
list_for_each_entry(d, &r->domains, list) {
if (sep)
@@ -1181,8 +1203,13 @@ static int rdtgroup_size_show(struct kernfs_open_file *of,
if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
size = 0;
} else {
- cbm = d->ctrl_val[rdtgrp->closid];
- size = rdtgroup_cbm_to_size(r, d, cbm);
+ ctrl = (!is_mba_sc(r) ?
+ d->ctrl_val[rdtgrp->closid] :
+ d->mbps_val[rdtgrp->closid]);
+ if (r->rid == RDT_RESOURCE_MBA)
+ size = ctrl;
+ else
+ size = rdtgroup_cbm_to_size(r, d, ctrl);
}
seq_printf(s, "%d=%u", d->id, size);
sep = true;
@@ -2330,18 +2357,25 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
u32 used_b = 0, unused_b = 0;
u32 closid = rdtgrp->closid;
struct rdt_resource *r;
+ unsigned long tmp_cbm;
enum rdtgrp_mode mode;
struct rdt_domain *d;
int i, ret;
u32 *ctrl;
for_each_alloc_enabled_rdt_resource(r) {
+ /*
+ * Only initialize default allocations for CBM cache
+ * resources
+ */
+ if (r->rid == RDT_RESOURCE_MBA)
+ continue;
list_for_each_entry(d, &r->domains, list) {
d->have_new_ctrl = false;
d->new_ctrl = r->cache.shareable_bits;
used_b = r->cache.shareable_bits;
ctrl = d->ctrl_val;
- for (i = 0; i < r->num_closid; i++, ctrl++) {
+ for (i = 0; i < closids_supported(); i++, ctrl++) {
if (closid_allocated(i) && i != closid) {
mode = rdtgroup_mode_by_closid(i);
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
@@ -2361,9 +2395,14 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
* modify the CBM based on system availability.
*/
cbm_ensure_valid(&d->new_ctrl, r);
- if (bitmap_weight((unsigned long *) &d->new_ctrl,
- r->cache.cbm_len) <
- r->cache.min_cbm_bits) {
+ /*
+ * Assign the u32 CBM to an unsigned long to ensure
+ * that bitmap_weight() does not access out-of-bound
+ * memory.
+ */
+ tmp_cbm = d->new_ctrl;
+ if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) <
+ r->cache.min_cbm_bits) {
rdt_last_cmd_printf("no space on %s:%d\n",
r->name, d->id);
return -ENOSPC;
@@ -2373,6 +2412,12 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
}
for_each_alloc_enabled_rdt_resource(r) {
+ /*
+ * Only initialize default allocations for CBM cache
+ * resources
+ */
+ if (r->rid == RDT_RESOURCE_MBA)
+ continue;
ret = update_domains(r, rdtgrp->closid);
if (ret < 0) {
rdt_last_cmd_puts("failed to initialize allocations\n");
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 0624957aa068..07b5fc00b188 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
struct microcode_amd *mc_amd;
struct ucode_cpu_info *uci;
struct ucode_patch *p;
+ enum ucode_state ret;
u32 rev, dummy;
BUG_ON(raw_smp_processor_id() != cpu);
@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
/* need to apply patch? */
if (rev >= mc_amd->hdr.patch_id) {
- c->microcode = rev;
- uci->cpu_sig.rev = rev;
- return UCODE_OK;
+ ret = UCODE_OK;
+ goto out;
}
if (__apply_microcode_amd(mc_amd)) {
@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
cpu, mc_amd->hdr.patch_id);
return UCODE_ERROR;
}
- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
- mc_amd->hdr.patch_id);
- uci->cpu_sig.rev = mc_amd->hdr.patch_id;
- c->microcode = mc_amd->hdr.patch_id;
+ rev = mc_amd->hdr.patch_id;
+ ret = UCODE_UPDATED;
+
+ pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
- return UCODE_UPDATED;
+out:
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
+
+ /* Update boot_cpu_data's revision too, if we're on the BSP: */
+ if (c->cpu_index == boot_cpu_data.cpu_index)
+ boot_cpu_data.microcode = rev;
+
+ return ret;
}
static int install_equiv_cpu_table(const u8 *buf)
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 97ccf4c3b45b..16936a24795c 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_intel *mc;
+ enum ucode_state ret;
static int prev_rev;
u32 rev;
@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
*/
rev = intel_get_microcode_revision();
if (rev >= mc->hdr.rev) {
- uci->cpu_sig.rev = rev;
- c->microcode = rev;
- return UCODE_OK;
+ ret = UCODE_OK;
+ goto out;
}
/*
@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
prev_rev = rev;
}
+ ret = UCODE_UPDATED;
+
+out:
uci->cpu_sig.rev = rev;
- c->microcode = rev;
+ c->microcode = rev;
+
+ /* Update boot_cpu_data's revision too, if we're on the BSP: */
+ if (c->cpu_index == boot_cpu_data.cpu_index)
+ boot_cpu_data.microcode = rev;
- return UCODE_UPDATED;
+ return ret;
}
static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index f56895106ccf..2b5886401e5f 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -146,7 +146,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
* they can be printed in the right context.
*/
if (!partial && on_stack(info, regs, sizeof(*regs))) {
- __show_regs(regs, 0);
+ __show_regs(regs, SHOW_REGS_SHORT);
} else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
IRET_FRAME_SIZE)) {
@@ -344,7 +344,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
oops_exit();
/* Executive summary in case the oops scrolled away */
- __show_regs(&exec_summary_regs, true);
+ __show_regs(&exec_summary_regs, SHOW_REGS_ALL);
if (!signr)
return;
@@ -407,14 +407,9 @@ void die(const char *str, struct pt_regs *regs, long err)
void show_regs(struct pt_regs *regs)
{
- bool all = true;
-
show_regs_print_info(KERN_DEFAULT);
- if (IS_ENABLED(CONFIG_X86_32))
- all = !user_mode(regs);
-
- __show_regs(regs, all);
+ __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);
/*
* When in-kernel, we also print out the stack at the time of the fault..
diff --git a/arch/x86/kernel/eisa.c b/arch/x86/kernel/eisa.c
index f260e452e4f8..e8c8c5d78dbd 100644
--- a/arch/x86/kernel/eisa.c
+++ b/arch/x86/kernel/eisa.c
@@ -7,11 +7,17 @@
#include <linux/eisa.h>
#include <linux/io.h>
+#include <xen/xen.h>
+
static __init int eisa_bus_probe(void)
{
- void __iomem *p = ioremap(0x0FFFD9, 4);
+ void __iomem *p;
+
+ if (xen_pv_domain() && !xen_initial_domain())
+ return 0;
- if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
+ p = ioremap(0x0FFFD9, 4);
+ if (p && readl(p) == 'E' + ('I' << 8) + ('S' << 16) + ('A' << 24))
EISA_bus = 1;
iounmap(p);
return 0;
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 23f1691670b6..61a949d84dfa 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -314,7 +314,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
* thread's fpu state, reconstruct fxstate from the fsave
* header. Validate and sanitize the copied state.
*/
- struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
int err = 0;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 8047379e575a..ddee1f0870c4 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -35,6 +35,7 @@
#include <asm/bootparam_utils.h>
#include <asm/microcode.h>
#include <asm/kasan.h>
+#include <asm/fixmap.h>
/*
* Manage page tables very early on.
@@ -112,6 +113,7 @@ static bool __head check_la57_support(unsigned long physaddr)
unsigned long __head __startup_64(unsigned long physaddr,
struct boot_params *bp)
{
+ unsigned long vaddr, vaddr_end;
unsigned long load_delta, *p;
unsigned long pgtable_flags;
pgdval_t *pgd;
@@ -165,7 +167,8 @@ unsigned long __head __startup_64(unsigned long physaddr,
pud[511] += load_delta;
pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
- pmd[506] += load_delta;
+ for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
+ pmd[i] += load_delta;
/*
* Set up the identity mapping for the switchover. These
@@ -235,6 +238,21 @@ unsigned long __head __startup_64(unsigned long physaddr,
sme_encrypt_kernel(bp);
/*
+ * Clear the memory encryption mask from the .bss..decrypted section.
+ * The bss section will be memset to zero later in the initialization so
+ * there is no need to zero it after changing the memory encryption
+ * attribute.
+ */
+ if (mem_encrypt_active()) {
+ vaddr = (unsigned long)__start_bss_decrypted;
+ vaddr_end = (unsigned long)__end_bss_decrypted;
+ for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
+ i = pmd_index(vaddr);
+ pmd[i] -= sme_get_me_mask();
+ }
+ }
+
+ /*
* Return the SME encryption mask (if SME is active) to be used as a
* modifier for the initial pgdir entry programmed into CR3.
*/
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 15ebc2fc166e..a3618cf04cf6 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -24,6 +24,7 @@
#include "../entry/calling.h"
#include <asm/export.h>
#include <asm/nospec-branch.h>
+#include <asm/fixmap.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@@ -445,13 +446,20 @@ NEXT_PAGE(level2_kernel_pgt)
KERNEL_IMAGE_SIZE/PMD_SIZE)
NEXT_PAGE(level2_fixmap_pgt)
- .fill 506,8,0
- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
- .fill 5,8,0
+ .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
+ pgtno = 0
+ .rept (FIXMAP_PMD_NUM)
+ .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
+ + _PAGE_TABLE_NOENC;
+ pgtno = pgtno + 1
+ .endr
+ /* 6 MB reserved space + a 2MB hole */
+ .fill 4,8,0
NEXT_PAGE(level1_fixmap_pgt)
+ .rept (FIXMAP_PMD_NUM)
.fill 512,8,0
+ .endr
#undef PMDS
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1e6764648af3..013fe3d21dbb 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -28,6 +28,7 @@
#include <linux/sched/clock.h>
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/set_memory.h>
#include <asm/hypervisor.h>
#include <asm/mem_encrypt.h>
@@ -61,9 +62,10 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
(PAGE_SIZE / sizeof(struct pvclock_vsyscall_time_info))
static struct pvclock_vsyscall_time_info
- hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __aligned(PAGE_SIZE);
-static struct pvclock_wall_clock wall_clock;
+ hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE);
+static struct pvclock_wall_clock wall_clock __bss_decrypted;
static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu);
+static struct pvclock_vsyscall_time_info *hvclock_mem;
static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void)
{
@@ -236,6 +238,45 @@ static void kvm_shutdown(void)
native_machine_shutdown();
}
+static void __init kvmclock_init_mem(void)
+{
+ unsigned long ncpus;
+ unsigned int order;
+ struct page *p;
+ int r;
+
+ if (HVC_BOOT_ARRAY_SIZE >= num_possible_cpus())
+ return;
+
+ ncpus = num_possible_cpus() - HVC_BOOT_ARRAY_SIZE;
+ order = get_order(ncpus * sizeof(*hvclock_mem));
+
+ p = alloc_pages(GFP_KERNEL, order);
+ if (!p) {
+ pr_warn("%s: failed to alloc %d pages", __func__, (1U << order));
+ return;
+ }
+
+ hvclock_mem = page_address(p);
+
+ /*
+ * hvclock is shared between the guest and the hypervisor, must
+ * be mapped decrypted.
+ */
+ if (sev_active()) {
+ r = set_memory_decrypted((unsigned long) hvclock_mem,
+ 1UL << order);
+ if (r) {
+ __free_pages(p, order);
+ hvclock_mem = NULL;
+ pr_warn("kvmclock: set_memory_decrypted() failed. Disabling\n");
+ return;
+ }
+ }
+
+ memset(hvclock_mem, 0, PAGE_SIZE << order);
+}
+
static int __init kvm_setup_vsyscall_timeinfo(void)
{
#ifdef CONFIG_X86_64
@@ -250,6 +291,9 @@ static int __init kvm_setup_vsyscall_timeinfo(void)
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
#endif
+
+ kvmclock_init_mem();
+
return 0;
}
early_initcall(kvm_setup_vsyscall_timeinfo);
@@ -269,8 +313,10 @@ static int kvmclock_setup_percpu(unsigned int cpu)
/* Use the static page for the first CPUs, allocate otherwise */
if (cpu < HVC_BOOT_ARRAY_SIZE)
p = &hv_clock_boot[cpu];
+ else if (hvclock_mem)
+ p = hvclock_mem + cpu - HVC_BOOT_ARRAY_SIZE;
else
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ return -ENOMEM;
per_cpu(hv_clock_per_cpu, cpu) = p;
return p ? 0 : -ENOMEM;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index afdb303285f8..8dc69d82567e 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -91,7 +91,7 @@ unsigned paravirt_patch_call(void *insnbuf,
if (len < 5) {
#ifdef CONFIG_RETPOLINE
- WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
+ WARN_ONCE(1, "Failing to patch indirect CALL in %ps\n", (void *)addr);
#endif
return len; /* call too long for patch site */
}
@@ -111,7 +111,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
if (len < 5) {
#ifdef CONFIG_RETPOLINE
- WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
+ WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
#endif
return len; /* call too long for patch site */
}
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 661583662430..71c0b01d93b1 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -42,10 +42,8 @@ IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
int __init pci_swiotlb_detect_4gb(void)
{
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
-#ifdef CONFIG_X86_64
if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
swiotlb = 1;
-#endif
/*
* If SME is active then swiotlb will be set to 1 so that bounce
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 2924fd447e61..5046a3c9dec2 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -59,7 +59,7 @@
#include <asm/intel_rdt_sched.h>
#include <asm/proto.h>
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
unsigned long d0, d1, d2, d3, d6, d7;
@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all)
printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
(u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
- if (!all)
+ if (mode != SHOW_REGS_ALL)
return;
cr0 = read_cr0();
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index a451bc374b9b..ea5ea850348d 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -62,7 +62,7 @@
__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
/* Prints also some state that isn't saved in the pt_regs */
-void __show_regs(struct pt_regs *regs, int all)
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
{
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
unsigned long d0, d1, d2, d3, d6, d7;
@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all)
printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
regs->r13, regs->r14, regs->r15);
- if (!all)
+ if (mode == SHOW_REGS_SHORT)
return;
+ if (mode == SHOW_REGS_USER) {
+ rdmsrl(MSR_FS_BASE, fs);
+ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
+ printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
+ fs, shadowgs);
+ return;
+ }
+
asm("movl %%ds,%0" : "=r" (ds));
asm("movl %%cs,%0" : "=r" (cs));
asm("movl %%es,%0" : "=r" (es));
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index be01328eb755..fddaefc51fb6 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -25,7 +25,7 @@
#include <asm/time.h>
#ifdef CONFIG_X86_64
-__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES;
+__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
#endif
unsigned long profile_pc(struct pt_regs *regs)
diff --git a/arch/x86/kernel/topology.c b/arch/x86/kernel/topology.c
index 12cbe2b88c0f..738bf42b0218 100644
--- a/arch/x86/kernel/topology.c
+++ b/arch/x86/kernel/topology.c
@@ -111,8 +111,10 @@ int arch_register_cpu(int num)
/*
* Currently CPU0 is only hotpluggable on Intel platforms. Other
* vendors can add hotplug support later.
+ * Xen PV guests don't support CPU0 hotplug at all.
*/
- if (c->x86_vendor != X86_VENDOR_INTEL)
+ if (c->x86_vendor != X86_VENDOR_INTEL ||
+ boot_cpu_has(X86_FEATURE_XENPV))
cpu0_hotpluggable = 0;
/*
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 1463468ba9a0..6d5dc5dabfd7 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -26,6 +26,7 @@
#include <asm/apic.h>
#include <asm/intel-family.h>
#include <asm/i8259.h>
+#include <asm/uv/uv.h>
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -57,7 +58,7 @@ struct cyc2ns {
static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
-void cyc2ns_read_begin(struct cyc2ns_data *data)
+void __always_inline cyc2ns_read_begin(struct cyc2ns_data *data)
{
int seq, idx;
@@ -74,7 +75,7 @@ void cyc2ns_read_begin(struct cyc2ns_data *data)
} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
}
-void cyc2ns_read_end(void)
+void __always_inline cyc2ns_read_end(void)
{
preempt_enable_notrace();
}
@@ -103,7 +104,7 @@ void cyc2ns_read_end(void)
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+static __always_inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
struct cyc2ns_data data;
unsigned long long ns;
@@ -1415,7 +1416,7 @@ static bool __init determine_cpu_tsc_frequencies(bool early)
static unsigned long __init get_loops_per_jiffy(void)
{
- unsigned long lpj = tsc_khz * KHZ;
+ u64 lpj = (u64)tsc_khz * KHZ;
do_div(lpj, HZ);
return lpj;
@@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
{
if (!boot_cpu_has(X86_FEATURE_TSC))
return;
+ /* Don't change UV TSC multi-chassis synchronization */
+ if (is_early_uv_system())
+ return;
if (!determine_cpu_tsc_frequencies(true))
return;
loops_per_jiffy = get_loops_per_jiffy();
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 8bde0a419f86..5dd3317d761f 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -65,6 +65,23 @@ jiffies_64 = jiffies;
#define ALIGN_ENTRY_TEXT_BEGIN . = ALIGN(PMD_SIZE);
#define ALIGN_ENTRY_TEXT_END . = ALIGN(PMD_SIZE);
+/*
+ * This section contains data which will be mapped as decrypted. Memory
+ * encryption operates on a page basis. Make this section PMD-aligned
+ * to avoid splitting the pages while mapping the section early.
+ *
+ * Note: We use a separate section so that only this section gets
+ * decrypted to avoid exposing more than we wish.
+ */
+#define BSS_DECRYPTED \
+ . = ALIGN(PMD_SIZE); \
+ __start_bss_decrypted = .; \
+ *(.bss..decrypted); \
+ . = ALIGN(PAGE_SIZE); \
+ __start_bss_decrypted_unused = .; \
+ . = ALIGN(PMD_SIZE); \
+ __end_bss_decrypted = .; \
+
#else
#define X86_ALIGN_RODATA_BEGIN
@@ -74,6 +91,7 @@ jiffies_64 = jiffies;
#define ALIGN_ENTRY_TEXT_BEGIN
#define ALIGN_ENTRY_TEXT_END
+#define BSS_DECRYPTED
#endif
@@ -355,6 +373,7 @@ SECTIONS
__bss_start = .;
*(.bss..page_aligned)
*(.bss)
+ BSS_DECRYPTED
. = ALIGN(PAGE_SIZE);
__bss_stop = .;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0cefba28c864..fbb0e6df121b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -548,7 +548,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
}
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
- unsigned long ipi_bitmap_high, int min,
+ unsigned long ipi_bitmap_high, u32 min,
unsigned long icr, int op_64_bit)
{
int i;
@@ -571,18 +571,31 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
rcu_read_lock();
map = rcu_dereference(kvm->arch.apic_map);
+ if (min > map->max_apic_id)
+ goto out;
/* Bits above cluster_size are masked in the caller. */
- for_each_set_bit(i, &ipi_bitmap_low, BITS_PER_LONG) {
- vcpu = map->phys_map[min + i]->vcpu;
- count += kvm_apic_set_irq(vcpu, &irq, NULL);
+ for_each_set_bit(i, &ipi_bitmap_low,
+ min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+ if (map->phys_map[min + i]) {
+ vcpu = map->phys_map[min + i]->vcpu;
+ count += kvm_apic_set_irq(vcpu, &irq, NULL);
+ }
}
min += cluster_size;
- for_each_set_bit(i, &ipi_bitmap_high, BITS_PER_LONG) {
- vcpu = map->phys_map[min + i]->vcpu;
- count += kvm_apic_set_irq(vcpu, &irq, NULL);
+
+ if (min > map->max_apic_id)
+ goto out;
+
+ for_each_set_bit(i, &ipi_bitmap_high,
+ min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
+ if (map->phys_map[min + i]) {
+ vcpu = map->phys_map[min + i]->vcpu;
+ count += kvm_apic_set_irq(vcpu, &irq, NULL);
+ }
}
+out:
rcu_read_unlock();
return count;
}
@@ -1331,9 +1344,8 @@ EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
{
- return kvm_apic_hw_enabled(apic) &&
- addr >= apic->base_address &&
- addr < apic->base_address + LAPIC_MMIO_LENGTH;
+ return addr >= apic->base_address &&
+ addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
@@ -1345,6 +1357,15 @@ static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
+ if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+ if (!kvm_check_has_quirk(vcpu->kvm,
+ KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+ return -EOPNOTSUPP;
+
+ memset(data, 0xff, len);
+ return 0;
+ }
+
kvm_lapic_reg_read(apic, offset, len, data);
return 0;
@@ -1904,6 +1925,14 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
+ if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+ if (!kvm_check_has_quirk(vcpu->kvm,
+ KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+ return -EOPNOTSUPP;
+
+ return 0;
+ }
+
/*
* APIC register must be aligned on 128-bits boundary.
* 32/64/128 bits registers must be accessed thru 32 bits.
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a282321329b5..51b953ad9d4e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
*/
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
+/*
+ * In some cases, we need to preserve the GFN of a non-present or reserved
+ * SPTE when we usurp the upper five bits of the physical address space to
+ * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
+ * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
+ * left into the reserved bits, i.e. the GFN in the SPTE will be split into
+ * high and low parts. This mask covers the lower bits of the GFN.
+ */
+static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
+
+
static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
static gfn_t get_mmio_spte_gfn(u64 spte)
{
- u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
- shadow_nonpresent_or_rsvd_mask;
- u64 gpa = spte & ~mask;
+ u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
& shadow_nonpresent_or_rsvd_mask;
@@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
static void kvm_mmu_reset_all_pte_masks(void)
{
+ u8 low_phys_bits;
+
shadow_user_mask = 0;
shadow_accessed_mask = 0;
shadow_dirty_mask = 0;
@@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
*/
+ low_phys_bits = boot_cpu_data.x86_phys_bits;
if (boot_cpu_data.x86_phys_bits <
- 52 - shadow_nonpresent_or_rsvd_mask_len)
+ 52 - shadow_nonpresent_or_rsvd_mask_len) {
shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_phys_bits -
shadow_nonpresent_or_rsvd_mask_len,
boot_cpu_data.x86_phys_bits - 1);
+ low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
+ }
+ shadow_nonpresent_or_rsvd_lower_gfn_mask =
+ GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
}
static int is_cpuid_PSE36(void)
@@ -899,7 +915,7 @@ static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
{
/*
* Make sure the write to vcpu->mode is not reordered in front of
- * reads to sptes. If it does, kvm_commit_zap_page() can see us
+ * reads to sptes. If it does, kvm_mmu_commit_zap_page() can see us
* OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
*/
smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
@@ -1853,11 +1869,6 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
}
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
-}
-
int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
@@ -5217,7 +5228,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
void *insn, int insn_len)
{
- int r, emulation_type = EMULTYPE_RETRY;
+ int r, emulation_type = 0;
enum emulation_result er;
bool direct = vcpu->arch.mmu.direct_map;
@@ -5230,10 +5241,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
r = RET_PF_INVALID;
if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, cr2, direct);
- if (r == RET_PF_EMULATE) {
- emulation_type = 0;
+ if (r == RET_PF_EMULATE)
goto emulate;
- }
}
if (r == RET_PF_INVALID) {
@@ -5260,8 +5269,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
return 1;
}
- if (mmio_info_in_cache(vcpu, cr2, direct))
- emulation_type = 0;
+ /*
+ * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
+ * optimistically try to just unprotect the page and let the processor
+ * re-execute the instruction that caused the page fault. Do not allow
+ * retrying MMIO emulation, as it's not only pointless but could also
+ * cause us to enter an infinite loop because the processor will keep
+ * faulting on the non-existent MMIO address. Retrying an instruction
+ * from a nested guest is also pointless and dangerous as we are only
+ * explicitly shadowing L1's page tables, i.e. unprotecting something
+ * for L1 isn't going to magically fix whatever issue cause L2 to fail.
+ */
+ if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
+ emulation_type = EMULTYPE_ALLOW_RETRY;
emulate:
/*
* On AMD platforms, under certain conditions insn_len may be zero on #NPF.
@@ -5413,7 +5433,12 @@ void kvm_mmu_setup(struct kvm_vcpu *vcpu)
{
MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
- kvm_init_mmu(vcpu, true);
+ /*
+ * kvm_mmu_setup() is called only on vCPU initialization.
+ * Therefore, no need to reset mmu roots as they are not yet
+ * initialized.
+ */
+ kvm_init_mmu(vcpu, false);
}
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6276140044d0..61ccfb13899e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -436,14 +436,18 @@ static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
static inline bool svm_sev_enabled(void)
{
- return max_sev_asid;
+ return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
}
static inline bool sev_guest(struct kvm *kvm)
{
+#ifdef CONFIG_KVM_AMD_SEV
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
return sev->active;
+#else
+ return false;
+#endif
}
static inline int sev_get_asid(struct kvm *kvm)
@@ -776,7 +780,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
}
if (!svm->next_rip) {
- if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
+ if (kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) !=
EMULATE_DONE)
printk(KERN_DEBUG "%s: NOP\n", __func__);
return;
@@ -1226,8 +1230,7 @@ static __init int sev_hardware_setup(void)
min_sev_asid = cpuid_edx(0x8000001F);
/* Initialize SEV ASID bitmap */
- sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
- sizeof(unsigned long), GFP_KERNEL);
+ sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
if (!sev_asid_bitmap)
return 1;
@@ -1405,7 +1408,7 @@ static __exit void svm_hardware_unsetup(void)
int cpu;
if (svm_sev_enabled())
- kfree(sev_asid_bitmap);
+ bitmap_free(sev_asid_bitmap);
for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu);
@@ -2715,7 +2718,7 @@ static int gp_interception(struct vcpu_svm *svm)
WARN_ON_ONCE(!enable_vmware_backdoor);
- er = emulate_instruction(vcpu,
+ er = kvm_emulate_instruction(vcpu,
EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
if (er == EMULATE_USER_EXIT)
return 0;
@@ -2819,7 +2822,7 @@ static int io_interception(struct vcpu_svm *svm)
string = (io_info & SVM_IOIO_STR_MASK) != 0;
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
if (string)
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = io_info >> 16;
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
@@ -3861,7 +3864,7 @@ static int iret_interception(struct vcpu_svm *svm)
static int invlpg_interception(struct vcpu_svm *svm)
{
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
- return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
return kvm_skip_emulated_instruction(&svm->vcpu);
@@ -3869,13 +3872,13 @@ static int invlpg_interception(struct vcpu_svm *svm)
static int emulate_on_interception(struct vcpu_svm *svm)
{
- return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
}
static int rsm_interception(struct vcpu_svm *svm)
{
- return x86_emulate_instruction(&svm->vcpu, 0, 0,
- rsm_ins_bytes, 2) == EMULATE_DONE;
+ return kvm_emulate_instruction_from_buffer(&svm->vcpu,
+ rsm_ins_bytes, 2) == EMULATE_DONE;
}
static int rdpmc_interception(struct vcpu_svm *svm)
@@ -4700,7 +4703,7 @@ static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
ret = avic_unaccel_trap_write(svm);
} else {
/* Handling Fault */
- ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
+ ret = (kvm_emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
}
return ret;
@@ -6747,7 +6750,7 @@ e_free:
static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
{
unsigned long vaddr, vaddr_end, next_vaddr;
- unsigned long dst_vaddr, dst_vaddr_end;
+ unsigned long dst_vaddr;
struct page **src_p, **dst_p;
struct kvm_sev_dbg debug;
unsigned long n;
@@ -6763,7 +6766,6 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
size = debug.len;
vaddr_end = vaddr + size;
dst_vaddr = debug.dst_uaddr;
- dst_vaddr_end = dst_vaddr + size;
for (; vaddr < vaddr_end; vaddr = next_vaddr) {
int len, s_off, d_off;
@@ -7150,6 +7152,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.check_intercept = svm_check_intercept,
.handle_external_intr = svm_handle_external_intr,
+ .request_immediate_exit = __kvm_request_immediate_exit,
+
.sched_in = svm_sched_in,
.pmu_ops = &amd_pmu_ops,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1d26f3c4985b..e665aa7167cf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -121,7 +121,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
#define MSR_BITMAP_MODE_X2APIC 1
#define MSR_BITMAP_MODE_X2APIC_APICV 2
-#define MSR_BITMAP_MODE_LM 4
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
@@ -397,6 +396,7 @@ struct loaded_vmcs {
int cpu;
bool launched;
bool nmi_known_unmasked;
+ bool hv_timer_armed;
/* Support for vnmi-less CPUs */
int soft_vnmi_blocked;
ktime_t entry_time;
@@ -856,6 +856,7 @@ struct nested_vmx {
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
u64 vmcs01_debugctl;
+ u64 vmcs01_guest_bndcfgs;
u16 vpid02;
u16 last_vpid;
@@ -1019,6 +1020,8 @@ struct vcpu_vmx {
int ple_window;
bool ple_window_dirty;
+ bool req_immediate_exit;
+
/* Support for PML */
#define PML_ENTITY_NUM 512
struct page *pml_pg;
@@ -1569,8 +1572,12 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
goto out;
}
+ /*
+ * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the
+ * base of EPT PML4 table, strip off EPT configuration information.
+ */
ret = hyperv_flush_guest_mapping(
- to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer);
+ to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK);
out:
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
@@ -2864,6 +2871,8 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
u16 fs_sel, gs_sel;
int i;
+ vmx->req_immediate_exit = false;
+
if (vmx->loaded_cpu_state)
return;
@@ -2894,8 +2903,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
}
- if (is_long_mode(&vmx->vcpu))
- wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#else
savesegment(fs, fs_sel);
savesegment(gs, gs_sel);
@@ -2946,8 +2954,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
vmx->loaded_cpu_state = NULL;
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu))
- rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
kvm_load_ldt(host_state->ldt_sel);
@@ -2975,24 +2982,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
#ifdef CONFIG_X86_64
static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
{
- if (is_long_mode(&vmx->vcpu)) {
- preempt_disable();
- if (vmx->loaded_cpu_state)
- rdmsrl(MSR_KERNEL_GS_BASE,
- vmx->msr_guest_kernel_gs_base);
- preempt_enable();
- }
+ preempt_disable();
+ if (vmx->loaded_cpu_state)
+ rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+ preempt_enable();
return vmx->msr_guest_kernel_gs_base;
}
static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
{
- if (is_long_mode(&vmx->vcpu)) {
- preempt_disable();
- if (vmx->loaded_cpu_state)
- wrmsrl(MSR_KERNEL_GS_BASE, data);
- preempt_enable();
- }
+ preempt_disable();
+ if (vmx->loaded_cpu_state)
+ wrmsrl(MSR_KERNEL_GS_BASE, data);
+ preempt_enable();
vmx->msr_guest_kernel_gs_base = data;
}
#endif
@@ -3528,9 +3530,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
- if (kvm_mpx_supported())
- msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
-
/* We support free control of debug control saving. */
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
@@ -3547,8 +3546,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
VM_ENTRY_LOAD_IA32_PAT;
msrs->entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
- if (kvm_mpx_supported())
- msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
@@ -3596,12 +3593,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
msrs->secondary_ctls_high);
msrs->secondary_ctls_low = 0;
msrs->secondary_ctls_high &=
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_DESC |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING;
+
/*
* We can emulate "VMCS shadowing," even if the hardware
* doesn't support it.
@@ -3658,6 +3655,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
msrs->secondary_ctls_high |=
SECONDARY_EXEC_UNRESTRICTED_GUEST;
+ if (flexpriority_enabled)
+ msrs->secondary_ctls_high |=
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC,
msrs->misc_low,
@@ -5068,19 +5069,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
if (!msr)
return;
- /*
- * MSR_KERNEL_GS_BASE is not intercepted when the guest is in
- * 64-bit mode as a 64-bit kernel may frequently access the
- * MSR. This means we need to manually save/restore the MSR
- * when switching between guest and host state, but only if
- * the guest is in 64-bit mode. Sync our cached value if the
- * guest is transitioning to 32-bit mode and the CPU contains
- * guest state, i.e. the cache is stale.
- */
-#ifdef CONFIG_X86_64
- if (!(efer & EFER_LMA))
- (void)vmx_read_guest_kernel_gs_base(vmx);
-#endif
vcpu->arch.efer = efer;
if (efer & EFER_LMA) {
vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
@@ -5393,9 +5381,10 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
* To use VMXON (and later other VMX instructions), a guest
* must first be able to turn on cr4.VMXE (see handle_vmon()).
* So basically the check on whether to allow nested VMX
- * is here.
+ * is here. We operate under the default treatment of SMM,
+ * so VMX cannot be enabled under SMM.
*/
- if (!nested_vmx_allowed(vcpu))
+ if (!nested_vmx_allowed(vcpu) || is_smm(vcpu))
return 1;
}
@@ -6072,9 +6061,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
mode |= MSR_BITMAP_MODE_X2APIC_APICV;
}
- if (is_long_mode(vcpu))
- mode |= MSR_BITMAP_MODE_LM;
-
return mode;
}
@@ -6115,9 +6101,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
if (!changed)
return;
- vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
- !(mode & MSR_BITMAP_MODE_LM));
-
if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
@@ -6183,6 +6166,32 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
nested_mark_vmcs12_pages_dirty(vcpu);
}
+static u8 vmx_get_rvi(void)
+{
+ return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
+}
+
+static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ void *vapic_page;
+ u32 vppr;
+ int rvi;
+
+ if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
+ !nested_cpu_has_vid(get_vmcs12(vcpu)) ||
+ WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
+ return false;
+
+ rvi = vmx_get_rvi();
+
+ vapic_page = kmap(vmx->nested.virtual_apic_page);
+ vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
+ kunmap(vmx->nested.virtual_apic_page);
+
+ return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
bool nested)
{
@@ -6983,7 +6992,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
* Cause the #SS fault with 0 error code in VM86 mode.
*/
if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
- if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
+ if (kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE) {
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
return kvm_vcpu_halt(vcpu);
@@ -7054,7 +7063,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) {
WARN_ON_ONCE(!enable_vmware_backdoor);
- er = emulate_instruction(vcpu,
+ er = kvm_emulate_instruction(vcpu,
EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
if (er == EMULATE_USER_EXIT)
return 0;
@@ -7157,7 +7166,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
++vcpu->stat.io_exits;
if (string)
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
@@ -7231,7 +7240,7 @@ static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
static int handle_desc(struct kvm_vcpu *vcpu)
{
WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP));
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
static int handle_cr(struct kvm_vcpu *vcpu)
@@ -7480,7 +7489,7 @@ static int handle_vmcall(struct kvm_vcpu *vcpu)
static int handle_invd(struct kvm_vcpu *vcpu)
{
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
static int handle_invlpg(struct kvm_vcpu *vcpu)
@@ -7547,7 +7556,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
return kvm_skip_emulated_instruction(vcpu);
}
}
- return emulate_instruction(vcpu, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
@@ -7704,8 +7713,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
return kvm_skip_emulated_instruction(vcpu);
else
- return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
- NULL, 0) == EMULATE_DONE;
+ return kvm_emulate_instruction(vcpu, EMULTYPE_SKIP) ==
+ EMULATE_DONE;
}
return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
@@ -7748,7 +7757,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (kvm_test_request(KVM_REQ_EVENT, vcpu))
return 1;
- err = emulate_instruction(vcpu, 0);
+ err = kvm_emulate_instruction(vcpu, 0);
if (err == EMULATE_USER_EXIT) {
++vcpu->stat.mmio_exits;
@@ -7966,6 +7975,9 @@ static __init int hardware_setup(void)
kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
}
+ if (!cpu_has_vmx_preemption_timer())
+ kvm_x86_ops->request_immediate_exit = __kvm_request_immediate_exit;
+
if (cpu_has_vmx_preemption_timer() && enable_preemption_timer) {
u64 vmx_msr;
@@ -9208,7 +9220,8 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
static int handle_preemption_timer(struct kvm_vcpu *vcpu)
{
- kvm_lapic_expired_hv_timer(vcpu);
+ if (!to_vmx(vcpu)->req_immediate_exit)
+ kvm_lapic_expired_hv_timer(vcpu);
return 1;
}
@@ -10214,15 +10227,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
if (!lapic_in_kernel(vcpu))
return;
+ if (!flexpriority_enabled &&
+ !cpu_has_vmx_virtualize_x2apic_mode())
+ return;
+
/* Postpone execution until vmcs01 is the current VMCS. */
if (is_guest_mode(vcpu)) {
to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
return;
}
- if (!cpu_need_tpr_shadow(vcpu))
- return;
-
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
@@ -10344,6 +10358,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
return max_irr;
}
+static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
+{
+ u8 rvi = vmx_get_rvi();
+ u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
+
+ return ((rvi & 0xf0) > (vppr & 0xf0));
+}
+
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
{
if (!kvm_vcpu_apicv_active(vcpu))
@@ -10595,24 +10617,43 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
msrs[i].host, false);
}
-static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
+static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, val);
+ if (!vmx->loaded_vmcs->hv_timer_armed)
+ vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+ vmx->loaded_vmcs->hv_timer_armed = true;
+}
+
+static void vmx_update_hv_timer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 tscl;
u32 delta_tsc;
- if (vmx->hv_deadline_tsc == -1)
+ if (vmx->req_immediate_exit) {
+ vmx_arm_hv_timer(vmx, 0);
return;
+ }
- tscl = rdtsc();
- if (vmx->hv_deadline_tsc > tscl)
- /* sure to be 32 bit only because checked on set_hv_timer */
- delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
- cpu_preemption_timer_multi);
- else
- delta_tsc = 0;
+ if (vmx->hv_deadline_tsc != -1) {
+ tscl = rdtsc();
+ if (vmx->hv_deadline_tsc > tscl)
+ /* set_hv_timer ensures the delta fits in 32-bits */
+ delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >>
+ cpu_preemption_timer_multi);
+ else
+ delta_tsc = 0;
- vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
+ vmx_arm_hv_timer(vmx, delta_tsc);
+ return;
+ }
+
+ if (vmx->loaded_vmcs->hv_timer_armed)
+ vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
+ PIN_BASED_VMX_PREEMPTION_TIMER);
+ vmx->loaded_vmcs->hv_timer_armed = false;
}
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
@@ -10672,7 +10713,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
atomic_switch_perf_msrs(vmx);
- vmx_arm_hv_timer(vcpu);
+ vmx_update_hv_timer(vcpu);
/*
* If this vCPU has touched SPEC_CTRL, restore the guest's value if
@@ -11214,6 +11255,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
#undef cr4_fixed1_update
}
+static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (kvm_mpx_supported()) {
+ bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
+
+ if (mpx_enabled) {
+ vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
+ vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
+ } else {
+ vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
+ vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
+ }
+ }
+}
+
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -11230,8 +11288,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
- if (nested_vmx_allowed(vcpu))
+ if (nested_vmx_allowed(vcpu)) {
nested_vmx_cr_fixed1_bits_update(vcpu);
+ nested_vmx_entry_exit_ctls_update(vcpu);
+ }
}
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -11427,16 +11487,18 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vcpu->arch.virtual_tsc_khz == 0)
- return;
-
- /* Make sure short timeouts reliably trigger an immediate vmexit.
- * hrtimer_start does not guarantee this. */
- if (preemption_timeout <= 1) {
+ /*
+ * A timer value of zero is architecturally guaranteed to cause
+ * a VMExit prior to executing any instructions in the guest.
+ */
+ if (preemption_timeout == 0) {
vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
return;
}
+ if (vcpu->arch.virtual_tsc_khz == 0)
+ return;
+
preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
preemption_timeout *= 1000000;
do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
@@ -11646,11 +11708,15 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
* bits 15:8 should be zero in posted_intr_nv,
* the descriptor address has been already checked
* in nested_get_vmcs12_pages.
+ *
+ * bits 5:0 of posted_intr_desc_addr should be zero.
*/
if (nested_cpu_has_posted_intr(vmcs12) &&
(!nested_cpu_has_vid(vmcs12) ||
!nested_exit_intr_ack_set(vcpu) ||
- vmcs12->posted_intr_nv & 0xff00))
+ (vmcs12->posted_intr_nv & 0xff00) ||
+ (vmcs12->posted_intr_desc_addr & 0x3f) ||
+ (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
return -EINVAL;
/* tpr shadow is needed by all apicv features. */
@@ -11993,8 +12059,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
set_cr4_guest_host_mask(vmx);
- if (vmx_mpx_supported())
- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+ if (kvm_mpx_supported()) {
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+ else
+ vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+ }
if (enable_vpid) {
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@@ -12076,11 +12147,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
exec_control = vmcs12->pin_based_vm_exec_control;
- /* Preemption timer setting is only taken from vmcs01. */
- exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ /* Preemption timer setting is computed directly in vmx_vcpu_run. */
exec_control |= vmcs_config.pin_based_exec_ctrl;
- if (vmx->hv_deadline_tsc == -1)
- exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
+ vmx->loaded_vmcs->hv_timer_armed = false;
/* Posted interrupts setting is only taken from vmcs12. */
if (nested_cpu_has_posted_intr(vmcs12)) {
@@ -12318,6 +12388,9 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ if (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
if (nested_vmx_check_io_bitmap_controls(vcpu, vmcs12))
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
@@ -12537,12 +12610,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
bool from_vmentry = !!exit_qual;
u32 dummy_exit_qual;
+ bool evaluate_pending_interrupts;
int r = 0;
+ evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
+ (CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
+ if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
+ evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
+
enter_guest_mode(vcpu);
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+ if (kvm_mpx_supported() &&
+ !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
vmx_segment_cache_clear(vmx);
@@ -12575,6 +12657,23 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
}
/*
+ * If L1 had a pending IRQ/NMI until it executed
+ * VMLAUNCH/VMRESUME which wasn't delivered because it was
+ * disallowed (e.g. interrupts disabled), L0 needs to
+ * evaluate if this pending event should cause an exit from L2
+ * to L1 or delivered directly to L2 (e.g. In case L1 don't
+ * intercept EXTERNAL_INTERRUPT).
+ *
+ * Usually this would be handled by the processor noticing an
+ * IRQ/NMI window request, or checking RVI during evaluation of
+ * pending virtual interrupts. However, this setting was done
+ * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
+ * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
+ */
+ if (unlikely(evaluate_pending_interrupts))
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ /*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
* returned as far as L1 is concerned. It will only return (and set
@@ -12841,6 +12940,11 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
return 0;
}
+static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+ to_vmx(vcpu)->req_immediate_exit = true;
+}
+
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
{
ktime_t remaining =
@@ -13231,12 +13335,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
- if (vmx->hv_deadline_tsc == -1)
- vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
- else
- vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
+
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
@@ -13440,18 +13539,12 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc)
return -ERANGE;
vmx->hv_deadline_tsc = tscl + delta_tsc;
- vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
-
return delta_tsc == 0;
}
static void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx->hv_deadline_tsc = -1;
- vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
- PIN_BASED_VMX_PREEMPTION_TIMER);
+ to_vmx(vcpu)->hv_deadline_tsc = -1;
}
#endif
@@ -13932,6 +14025,14 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
+ /*
+ * SMM temporarily disables VMX, so we cannot be in guest mode,
+ * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
+ * must be zero.
+ */
+ if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
+ return -EINVAL;
+
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
!(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL;
@@ -13988,9 +14089,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
return -EINVAL;
- if (kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING)
- vmx->nested.nested_run_pending = 1;
-
vmx->nested.dirty_vmcs12 = true;
ret = enter_vmx_non_root_mode(vcpu, NULL);
if (ret)
@@ -14078,6 +14176,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.apicv_post_state_restore = vmx_apicv_post_state_restore,
.hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update,
+ .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
@@ -14111,6 +14210,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.umip_emulated = vmx_umip_emulated,
.check_nested_events = vmx_check_nested_events,
+ .request_immediate_exit = vmx_request_immediate_exit,
.sched_in = vmx_sched_in,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 506bd2b4b8bb..ca717737347e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -628,7 +628,7 @@ bool pdptrs_changed(struct kvm_vcpu *vcpu)
gfn_t gfn;
int r;
- if (is_long_mode(vcpu) || !is_pae(vcpu))
+ if (is_long_mode(vcpu) || !is_pae(vcpu) || !is_paging(vcpu))
return false;
if (!test_bit(VCPU_EXREG_PDPTR,
@@ -2537,7 +2537,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_PLATFORM_INFO:
if (!msr_info->host_initiated ||
- data & ~MSR_PLATFORM_INFO_CPUID_FAULT ||
(!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
cpuid_fault_enabled(vcpu)))
return 1;
@@ -2780,6 +2779,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vcpu->arch.osvw.status;
break;
case MSR_PLATFORM_INFO:
+ if (!msr_info->host_initiated &&
+ !vcpu->kvm->arch.guest_can_read_msr_platform_info)
+ return 1;
msr_info->data = vcpu->arch.msr_platform_info;
break;
case MSR_MISC_FEATURES_ENABLES:
@@ -2927,6 +2929,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SPLIT_IRQCHIP:
case KVM_CAP_IMMEDIATE_EXIT:
case KVM_CAP_GET_MSR_FEATURES:
+ case KVM_CAP_MSR_PLATFORM_INFO:
r = 1;
break;
case KVM_CAP_SYNC_REGS:
@@ -4007,19 +4010,23 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
+ r = -EFAULT;
if (get_user(user_data_size, &user_kvm_nested_state->size))
- return -EFAULT;
+ break;
r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state,
user_data_size);
if (r < 0)
- return r;
+ break;
if (r > user_data_size) {
if (put_user(r, &user_kvm_nested_state->size))
- return -EFAULT;
- return -E2BIG;
+ r = -EFAULT;
+ else
+ r = -E2BIG;
+ break;
}
+
r = 0;
break;
}
@@ -4031,19 +4038,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (!kvm_x86_ops->set_nested_state)
break;
+ r = -EFAULT;
if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
- return -EFAULT;
+ break;
+ r = -EINVAL;
if (kvm_state.size < sizeof(kvm_state))
- return -EINVAL;
+ break;
if (kvm_state.flags &
~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE))
- return -EINVAL;
+ break;
/* nested_run_pending implies guest_mode. */
if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING)
- return -EINVAL;
+ break;
r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
break;
@@ -4350,6 +4359,10 @@ split_irqchip_unlock:
kvm->arch.pause_in_guest = true;
r = 0;
break;
+ case KVM_CAP_MSR_PLATFORM_INFO:
+ kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -4685,7 +4698,7 @@ static void kvm_init_msr_list(void)
*/
switch (msrs_to_save[i]) {
case MSR_IA32_BNDCFGS:
- if (!kvm_x86_ops->mpx_supported())
+ if (!kvm_mpx_supported())
continue;
break;
case MSR_TSC_AUX:
@@ -4987,7 +5000,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
emul_type = 0;
}
- er = emulate_instruction(vcpu, emul_type);
+ er = kvm_emulate_instruction(vcpu, emul_type);
if (er == EMULATE_USER_EXIT)
return 0;
if (er != EMULATE_DONE)
@@ -5870,7 +5883,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
gpa_t gpa = cr2;
kvm_pfn_t pfn;
- if (emulation_type & EMULTYPE_NO_REEXECUTE)
+ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+ return false;
+
+ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return false;
if (!vcpu->arch.mmu.direct_map) {
@@ -5958,7 +5974,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
*/
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
- if (!(emulation_type & EMULTYPE_RETRY))
+ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
+ return false;
+
+ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return false;
if (x86_page_table_writing_insn(ctxt))
@@ -6276,7 +6295,19 @@ restart:
return r;
}
-EXPORT_SYMBOL_GPL(x86_emulate_instruction);
+
+int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
+{
+ return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
+
+int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+ void *insn, int insn_len)
+{
+ return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
+}
+EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
unsigned short port)
@@ -7343,6 +7374,12 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_reload_apic_access_page);
+void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
+{
+ smp_send_reschedule(vcpu->cpu);
+}
+EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
+
/*
* Returns 1 to let vcpu_run() continue the guest execution loop without
* exiting to the userspace. Otherwise, the value will be returned to the
@@ -7547,7 +7584,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (req_immediate_exit) {
kvm_make_request(KVM_REQ_EVENT, vcpu);
- smp_send_reschedule(vcpu->cpu);
+ kvm_x86_ops->request_immediate_exit(vcpu);
}
trace_kvm_entry(vcpu->vcpu_id);
@@ -7734,7 +7771,7 @@ static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
int r;
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
+ r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r != EMULATE_DONE)
return 0;
@@ -7811,6 +7848,29 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
return 0;
}
+/* Swap (qemu) user FPU context for the guest FPU context. */
+static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
+ /* PKRU is separately restored in kvm_x86_ops->run. */
+ __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
+ ~XFEATURE_MASK_PKRU);
+ preempt_enable();
+ trace_kvm_fpu(1);
+}
+
+/* When vcpu_run ends, restore user space FPU context. */
+static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
+{
+ preempt_disable();
+ copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
+ copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
+ preempt_enable();
+ ++vcpu->stat.fpu_reload;
+ trace_kvm_fpu(0);
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
@@ -8159,7 +8219,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
kvm_update_cpuid(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (!is_long_mode(vcpu) && is_pae(vcpu)) {
+ if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
mmu_reset_needed = 1;
}
@@ -8388,29 +8448,6 @@ static void fx_init(struct kvm_vcpu *vcpu)
vcpu->arch.cr0 |= X86_CR0_ET;
}
-/* Swap (qemu) user FPU context for the guest FPU context. */
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
-{
- preempt_disable();
- copy_fpregs_to_fpstate(&vcpu->arch.user_fpu);
- /* PKRU is separately restored in kvm_x86_ops->run. */
- __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state,
- ~XFEATURE_MASK_PKRU);
- preempt_enable();
- trace_kvm_fpu(1);
-}
-
-/* When vcpu_run ends, restore user space FPU context. */
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
-{
- preempt_disable();
- copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
- copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state);
- preempt_enable();
- ++vcpu->stat.fpu_reload;
- trace_kvm_fpu(0);
-}
-
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
@@ -8834,6 +8871,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm->arch.kvmclock_offset = -ktime_get_boot_ns();
pvclock_update_vm_gtod_copy(kvm);
+ kvm->arch.guest_can_read_msr_platform_info = true;
+
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
@@ -9182,6 +9221,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
kvm_page_track_flush_slot(kvm, slot);
}
+static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
+{
+ return (is_guest_mode(vcpu) &&
+ kvm_x86_ops->guest_apic_has_interrupt &&
+ kvm_x86_ops->guest_apic_has_interrupt(vcpu));
+}
+
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
if (!list_empty_careful(&vcpu->async_pf.done))
@@ -9206,7 +9252,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return true;
if (kvm_arch_interrupt_allowed(vcpu) &&
- kvm_cpu_has_interrupt(vcpu))
+ (kvm_cpu_has_interrupt(vcpu) ||
+ kvm_guest_apic_has_interrupt(vcpu)))
return true;
if (kvm_hv_has_stimer_pending(vcpu))
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 257f27620bc2..67b9568613f3 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -274,6 +274,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
int page_num);
bool kvm_vector_hashing_enabled(void);
+int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
+ int emulation_type, void *insn, int insn_len);
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 7a8fc26c1115..faca978ebf9d 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -815,10 +815,14 @@ void free_kernel_image_pages(void *begin, void *end)
set_memory_np_noalias(begin_ul, len_pages);
}
+void __weak mem_encrypt_free_decrypted_mem(void) { }
+
void __ref free_initmem(void)
{
e820__reallocate_tables();
+ mem_encrypt_free_decrypted_mem();
+
free_kernel_image_pages(&__init_begin, &__init_end);
}
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index b2de398d1fd3..006f373f54ab 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -348,6 +348,30 @@ bool sev_active(void)
EXPORT_SYMBOL(sev_active);
/* Architecture __weak replacement functions */
+void __init mem_encrypt_free_decrypted_mem(void)
+{
+ unsigned long vaddr, vaddr_end, npages;
+ int r;
+
+ vaddr = (unsigned long)__start_bss_decrypted_unused;
+ vaddr_end = (unsigned long)__end_bss_decrypted;
+ npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
+
+ /*
+ * The unused memory range was mapped decrypted, change the encryption
+ * attribute from decrypted to encrypted before freeing it.
+ */
+ if (mem_encrypt_active()) {
+ r = set_memory_encrypted(vaddr, npages);
+ if (r) {
+ pr_warn("failed to free unused decrypted pages\n");
+ return;
+ }
+ }
+
+ free_init_pages("unused decrypted", vaddr, vaddr_end);
+}
+
void __init mem_encrypt_init(void)
{
if (!sme_me_mask)
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index e848a4811785..59274e2c1ac4 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -115,6 +115,8 @@ static inline void pgd_list_del(pgd_t *pgd)
#define UNSHARED_PTRS_PER_PGD \
(SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
+#define MAX_UNSHARED_PTRS_PER_PGD \
+ max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
@@ -181,6 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
* and initialize the kernel pmds here.
*/
#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
+#define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
/*
* We allocate separate PMDs for the kernel part of the user page-table
@@ -189,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
*/
#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
KERNEL_PGD_PTRS : 0)
+#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
{
@@ -210,7 +214,9 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
/* No need to prepopulate any pagetable entries in non-PAE modes. */
#define PREALLOCATED_PMDS 0
+#define MAX_PREALLOCATED_PMDS 0
#define PREALLOCATED_USER_PMDS 0
+#define MAX_PREALLOCATED_USER_PMDS 0
#endif /* CONFIG_X86_PAE */
static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
@@ -269,7 +275,7 @@ static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
if (pgd_val(pgd) != 0) {
pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
- *pgdp = native_make_pgd(0);
+ pgd_clear(pgdp);
paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
pmd_free(mm, pmd);
@@ -428,8 +434,8 @@ static inline void _pgd_free(pgd_t *pgd)
pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
- pmd_t *u_pmds[PREALLOCATED_USER_PMDS];
- pmd_t *pmds[PREALLOCATED_PMDS];
+ pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
+ pmd_t *pmds[MAX_PREALLOCATED_PMDS];
pgd = _pgd_alloc();
@@ -494,7 +500,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
int changed = !pte_same(*ptep, entry);
if (changed && dirty)
- *ptep = entry;
+ set_pte(ptep, entry);
return changed;
}
@@ -509,7 +515,7 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
if (changed && dirty) {
- *pmdp = entry;
+ set_pmd(pmdp, entry);
/*
* We had a write-protection fault here and changed the pmd
* to to more permissive. No need to flush the TLB for that,
@@ -529,7 +535,7 @@ int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
VM_BUG_ON(address & ~HPAGE_PUD_MASK);
if (changed && dirty) {
- *pudp = entry;
+ set_pud(pudp, entry);
/*
* We had a write-protection fault here and changed the pud
* to to more permissive. No need to flush the TLB for that,
@@ -637,6 +643,15 @@ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
{
unsigned long address = __fix_to_virt(idx);
+#ifdef CONFIG_X86_64
+ /*
+ * Ensure that the static initial page tables are covering the
+ * fixmap completely.
+ */
+ BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
+ (FIXMAP_PMD_NUM * PTRS_PER_PTE));
+#endif
+
if (idx >= __end_of_fixed_addresses) {
BUG();
return;
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 05ca14222463..9959657127f4 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -85,10 +85,9 @@ pgd_t * __init efi_call_phys_prolog(void)
void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
+ load_fixmap_gdt(0);
load_cr3(save_pgd);
__flush_tlb_all();
-
- load_fixmap_gdt(0);
}
void __init efi_runtime_update_mappings(void)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2eeddd814653..0ca46e03b830 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -5,6 +5,7 @@
#include <linux/kexec.h>
#include <linux/slab.h>
+#include <xen/xen.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/interface/memory.h>
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index c85d1a88f476..2a9025343534 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -11,6 +11,7 @@
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
+#include <xen/xen.h>
#include <xen/interface/memory.h>
#include <xen/interface/hvm/start_info.h>
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
index 2fe5c9b1816b..dd461c0167ef 100644
--- a/arch/x86/xen/mmu_pv.c
+++ b/arch/x86/xen/mmu_pv.c
@@ -1907,7 +1907,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
- /* L3_k[511][506] -> level1_fixmap_pgt */
+ /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
@@ -1952,7 +1952,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+ for (i = 0; i < FIXMAP_PMD_NUM; i++) {
+ set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
+ PAGE_KERNEL_RO);
+ }
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 33a783c77d96..b99585034dd2 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/export.h>
+#include <xen/xen.h>
#include <xen/platform_pci.h>
#include "xen-ops.h"
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 7d00d4ad44d4..0972184f3f19 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -3,6 +3,7 @@
#include <linux/interrupt.h>
#include <asm/xen/hypercall.h>
+#include <xen/xen.h>
#include <xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
@@ -478,7 +479,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
{
int err, ret = IRQ_NONE;
- struct pt_regs regs;
+ struct pt_regs regs = {0};
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 516694937b7a..ea5d8d03e53b 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -4,6 +4,7 @@ config ZONE_DMA
config XTENSA
def_bool y
+ select ARCH_HAS_SG_CHAIN
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_NO_COHERENT_DMA_MMAP if !MMU
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 295c120ed099..d67e30faff9c 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -64,11 +64,7 @@ endif
vardirs := $(patsubst %,arch/xtensa/variants/%/,$(variant-y))
plfdirs := $(patsubst %,arch/xtensa/platforms/%/,$(platform-y))
-ifeq ($(KBUILD_SRC),)
-KBUILD_CPPFLAGS += $(patsubst %,-I%include,$(vardirs) $(plfdirs))
-else
KBUILD_CPPFLAGS += $(patsubst %,-I$(srctree)/%include,$(vardirs) $(plfdirs))
-endif
KBUILD_DEFCONFIG := iss_defconfig
diff --git a/arch/xtensa/platforms/iss/setup.c b/arch/xtensa/platforms/iss/setup.c
index f4bbb28026f8..58709e89a8ed 100644
--- a/arch/xtensa/platforms/iss/setup.c
+++ b/arch/xtensa/platforms/iss/setup.c
@@ -78,23 +78,28 @@ static struct notifier_block iss_panic_block = {
void __init platform_setup(char **p_cmdline)
{
+ static void *argv[COMMAND_LINE_SIZE / sizeof(void *)] __initdata;
+ static char cmdline[COMMAND_LINE_SIZE] __initdata;
int argc = simc_argc();
int argv_size = simc_argv_size();
if (argc > 1) {
- void **argv = alloc_bootmem(argv_size);
- char *cmdline = alloc_bootmem(argv_size);
- int i;
+ if (argv_size > sizeof(argv)) {
+ pr_err("%s: command line too long: argv_size = %d\n",
+ __func__, argv_size);
+ } else {
+ int i;
- cmdline[0] = 0;
- simc_argv((void *)argv);
+ cmdline[0] = 0;
+ simc_argv((void *)argv);
- for (i = 1; i < argc; ++i) {
- if (i > 1)
- strcat(cmdline, " ");
- strcat(cmdline, argv[i]);
+ for (i = 1; i < argc; ++i) {
+ if (i > 1)
+ strcat(cmdline, " ");
+ strcat(cmdline, argv[i]);
+ }
+ *p_cmdline = cmdline;
}
- *p_cmdline = cmdline;
}
atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
diff --git a/block/Kconfig b/block/Kconfig
index 1f2469a0123c..f7045aa47edb 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -74,7 +74,6 @@ config BLK_DEV_BSG
config BLK_DEV_BSGLIB
bool "Block layer SG support v4 helper lib"
- default n
select BLK_DEV_BSG
select BLK_SCSI_REQUEST
help
@@ -107,7 +106,6 @@ config BLK_DEV_ZONED
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y
- default n
---help---
Block layer bio throttling support. It can be used to limit
the IO rate to a device. IO rate policies are per cgroup and
@@ -119,7 +117,6 @@ config BLK_DEV_THROTTLING
config BLK_DEV_THROTTLING_LOW
bool "Block throttling .low limit interface support (EXPERIMENTAL)"
depends on BLK_DEV_THROTTLING
- default n
---help---
Add .low limit interface for block throttling. The low limit is a best
effort limit to prioritize cgroups. Depending on the setting, the limit
@@ -130,7 +127,6 @@ config BLK_DEV_THROTTLING_LOW
config BLK_CMDLINE_PARSER
bool "Block device command line partition parser"
- default n
---help---
Enabling this option allows you to specify the partition layout from
the kernel boot args. This is typically of use for embedded devices
@@ -141,7 +137,6 @@ config BLK_CMDLINE_PARSER
config BLK_WBT
bool "Enable support for block device writeback throttling"
- default n
---help---
Enabling this option enables the block layer to throttle buffered
background writeback from the VM, making it more smooth and having
@@ -152,7 +147,6 @@ config BLK_WBT
config BLK_CGROUP_IOLATENCY
bool "Enable support for latency based cgroup IO protection"
depends on BLK_CGROUP=y
- default n
---help---
Enabling this option enables the .latency interface for IO throttling.
The IO controller will attempt to maintain average IO latencies below
@@ -163,7 +157,6 @@ config BLK_CGROUP_IOLATENCY
config BLK_WBT_SQ
bool "Single queue writeback throttling"
- default n
depends on BLK_WBT
---help---
Enable writeback throttling by default on legacy single queue devices
@@ -228,4 +221,7 @@ config BLK_MQ_RDMA
depends on BLOCK && INFINIBAND
default y
+config BLK_PM
+ def_bool BLOCK && PM
+
source block/Kconfig.iosched
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index a4a8914bf7a4..f95a48b0d7b2 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -36,7 +36,6 @@ config IOSCHED_CFQ
config CFQ_GROUP_IOSCHED
bool "CFQ Group Scheduling support"
depends on IOSCHED_CFQ && BLK_CGROUP
- default n
---help---
Enable group IO scheduling in CFQ.
@@ -82,7 +81,6 @@ config MQ_IOSCHED_KYBER
config IOSCHED_BFQ
tristate "BFQ I/O scheduler"
- default n
---help---
BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
of the device among all processes according to their weights,
@@ -94,7 +92,6 @@ config IOSCHED_BFQ
config BFQ_GROUP_IOSCHED
bool "BFQ hierarchical scheduling support"
depends on IOSCHED_BFQ && BLK_CGROUP
- default n
---help---
Enable hierarchical scheduling in BFQ, using the blkio
diff --git a/block/Makefile b/block/Makefile
index 572b33f32c07..27eac600474f 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -37,3 +37,4 @@ obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
obj-$(CONFIG_BLK_DEBUG_FS_ZONED)+= blk-mq-debugfs-zoned.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
+obj-$(CONFIG_BLK_PM) += blk-pm.o
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 9fe5952d117d..d9a7916ff0ab 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -642,7 +642,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
uint64_t serial_nr;
rcu_read_lock();
- serial_nr = bio_blkcg(bio)->css.serial_nr;
+ serial_nr = __bio_blkcg(bio)->css.serial_nr;
/*
* Check whether blkcg has changed. The condition may trigger
@@ -651,7 +651,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
goto out;
- bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
+ bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
/*
* Update blkg_path for bfq_log_* functions. We cache this
* path, and update it here, for the following
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 653100fb719e..6075100f03a5 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -624,12 +624,13 @@ void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq)
}
/*
- * Tell whether there are active queues or groups with differentiated weights.
+ * Tell whether there are active queues with different weights or
+ * active groups.
*/
-static bool bfq_differentiated_weights(struct bfq_data *bfqd)
+static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
{
/*
- * For weights to differ, at least one of the trees must contain
+ * For queue weights to differ, queue_weights_tree must contain
* at least two nodes.
*/
return (!RB_EMPTY_ROOT(&bfqd->queue_weights_tree) &&
@@ -637,9 +638,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
bfqd->queue_weights_tree.rb_node->rb_right)
#ifdef CONFIG_BFQ_GROUP_IOSCHED
) ||
- (!RB_EMPTY_ROOT(&bfqd->group_weights_tree) &&
- (bfqd->group_weights_tree.rb_node->rb_left ||
- bfqd->group_weights_tree.rb_node->rb_right)
+ (bfqd->num_active_groups > 0
#endif
);
}
@@ -657,26 +656,25 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd)
* 3) all active groups at the same level in the groups tree have the same
* number of children.
*
- * Unfortunately, keeping the necessary state for evaluating exactly the
- * above symmetry conditions would be quite complex and time-consuming.
- * Therefore this function evaluates, instead, the following stronger
- * sub-conditions, for which it is much easier to maintain the needed
- * state:
+ * Unfortunately, keeping the necessary state for evaluating exactly
+ * the last two symmetry sub-conditions above would be quite complex
+ * and time consuming. Therefore this function evaluates, instead,
+ * only the following stronger two sub-conditions, for which it is
+ * much easier to maintain the needed state:
* 1) all active queues have the same weight,
- * 2) all active groups have the same weight,
- * 3) all active groups have at most one active child each.
- * In particular, the last two conditions are always true if hierarchical
- * support and the cgroups interface are not enabled, thus no state needs
- * to be maintained in this case.
+ * 2) there are no active groups.
+ * In particular, the last condition is always true if hierarchical
+ * support or the cgroups interface are not enabled, thus no state
+ * needs to be maintained in this case.
*/
static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
{
- return !bfq_differentiated_weights(bfqd);
+ return !bfq_varied_queue_weights_or_active_groups(bfqd);
}
/*
* If the weight-counter tree passed as input contains no counter for
- * the weight of the input entity, then add that counter; otherwise just
+ * the weight of the input queue, then add that counter; otherwise just
* increment the existing counter.
*
* Note that weight-counter trees contain few nodes in mostly symmetric
@@ -687,25 +685,25 @@ static bool bfq_symmetric_scenario(struct bfq_data *bfqd)
* In most scenarios, the rate at which nodes are created/destroyed
* should be low too.
*/
-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
+void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct rb_root *root)
{
+ struct bfq_entity *entity = &bfqq->entity;
struct rb_node **new = &(root->rb_node), *parent = NULL;
/*
- * Do not insert if the entity is already associated with a
+ * Do not insert if the queue is already associated with a
* counter, which happens if:
- * 1) the entity is associated with a queue,
- * 2) a request arrival has caused the queue to become both
+ * 1) a request arrival has caused the queue to become both
* non-weight-raised, and hence change its weight, and
* backlogged; in this respect, each of the two events
* causes an invocation of this function,
- * 3) this is the invocation of this function caused by the
+ * 2) this is the invocation of this function caused by the
* second event. This second invocation is actually useless,
* and we handle this fact by exiting immediately. More
* efficient or clearer solutions might possibly be adopted.
*/
- if (entity->weight_counter)
+ if (bfqq->weight_counter)
return;
while (*new) {
@@ -715,7 +713,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
parent = *new;
if (entity->weight == __counter->weight) {
- entity->weight_counter = __counter;
+ bfqq->weight_counter = __counter;
goto inc_counter;
}
if (entity->weight < __counter->weight)
@@ -724,66 +722,67 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
new = &((*new)->rb_right);
}
- entity->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
- GFP_ATOMIC);
+ bfqq->weight_counter = kzalloc(sizeof(struct bfq_weight_counter),
+ GFP_ATOMIC);
/*
* In the unlucky event of an allocation failure, we just
- * exit. This will cause the weight of entity to not be
- * considered in bfq_differentiated_weights, which, in its
- * turn, causes the scenario to be deemed wrongly symmetric in
- * case entity's weight would have been the only weight making
- * the scenario asymmetric. On the bright side, no unbalance
- * will however occur when entity becomes inactive again (the
- * invocation of this function is triggered by an activation
- * of entity). In fact, bfq_weights_tree_remove does nothing
- * if !entity->weight_counter.
+ * exit. This will cause the weight of queue to not be
+ * considered in bfq_varied_queue_weights_or_active_groups,
+ * which, in its turn, causes the scenario to be deemed
+ * wrongly symmetric in case bfqq's weight would have been
+ * the only weight making the scenario asymmetric. On the
+ * bright side, no unbalance will however occur when bfqq
+ * becomes inactive again (the invocation of this function
+ * is triggered by an activation of queue). In fact,
+ * bfq_weights_tree_remove does nothing if
+ * !bfqq->weight_counter.
*/
- if (unlikely(!entity->weight_counter))
+ if (unlikely(!bfqq->weight_counter))
return;
- entity->weight_counter->weight = entity->weight;
- rb_link_node(&entity->weight_counter->weights_node, parent, new);
- rb_insert_color(&entity->weight_counter->weights_node, root);
+ bfqq->weight_counter->weight = entity->weight;
+ rb_link_node(&bfqq->weight_counter->weights_node, parent, new);
+ rb_insert_color(&bfqq->weight_counter->weights_node, root);
inc_counter:
- entity->weight_counter->num_active++;
+ bfqq->weight_counter->num_active++;
}
/*
- * Decrement the weight counter associated with the entity, and, if the
+ * Decrement the weight counter associated with the queue, and, if the
* counter reaches 0, remove the counter from the tree.
* See the comments to the function bfq_weights_tree_add() for considerations
* about overhead.
*/
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
- struct bfq_entity *entity,
+ struct bfq_queue *bfqq,
struct rb_root *root)
{
- if (!entity->weight_counter)
+ if (!bfqq->weight_counter)
return;
- entity->weight_counter->num_active--;
- if (entity->weight_counter->num_active > 0)
+ bfqq->weight_counter->num_active--;
+ if (bfqq->weight_counter->num_active > 0)
goto reset_entity_pointer;
- rb_erase(&entity->weight_counter->weights_node, root);
- kfree(entity->weight_counter);
+ rb_erase(&bfqq->weight_counter->weights_node, root);
+ kfree(bfqq->weight_counter);
reset_entity_pointer:
- entity->weight_counter = NULL;
+ bfqq->weight_counter = NULL;
}
/*
- * Invoke __bfq_weights_tree_remove on bfqq and all its inactive
- * parent entities.
+ * Invoke __bfq_weights_tree_remove on bfqq and decrement the number
+ * of active groups for each queue's inactive parent entity.
*/
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
struct bfq_entity *entity = bfqq->entity.parent;
- __bfq_weights_tree_remove(bfqd, &bfqq->entity,
+ __bfq_weights_tree_remove(bfqd, bfqq,
&bfqd->queue_weights_tree);
for_each_entity(entity) {
@@ -797,17 +796,13 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
* next_in_service for details on why
* in_service_entity must be checked too).
*
- * As a consequence, the weight of entity is
- * not to be removed. In addition, if entity
- * is active, then its parent entities are
- * active as well, and thus their weights are
- * not to be removed either. In the end, this
- * loop must stop here.
+ * As a consequence, its parent entities are
+ * active as well, and thus this loop must
+ * stop here.
*/
break;
}
- __bfq_weights_tree_remove(bfqd, entity,
- &bfqd->group_weights_tree);
+ bfqd->num_active_groups--;
}
}
@@ -3182,6 +3177,13 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd,
jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4);
}
+static bool bfq_bfqq_injectable(struct bfq_queue *bfqq)
+{
+ return BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 &&
+ blk_queue_nonrot(bfqq->bfqd->queue) &&
+ bfqq->bfqd->hw_tag;
+}
+
/**
* bfq_bfqq_expire - expire a queue.
* @bfqd: device owning the queue.
@@ -3291,6 +3293,8 @@ void bfq_bfqq_expire(struct bfq_data *bfqd,
if (ref == 1) /* bfqq is gone, no more actions on it */
return;
+ bfqq->injected_service = 0;
+
/* mark bfqq as waiting a request only if a bic still points to it */
if (!bfq_bfqq_busy(bfqq) &&
reason != BFQQE_BUDGET_TIMEOUT &&
@@ -3497,9 +3501,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* symmetric scenario where:
* (i) each of these processes must get the same throughput as
* the others;
- * (ii) all these processes have the same I/O pattern
- (either sequential or random).
- * In fact, in such a scenario, the drive will tend to treat
+ * (ii) the I/O of each process has the same properties, in
+ * terms of locality (sequential or random), direction
+ * (reads or writes), request sizes, greediness
+ * (from I/O-bound to sporadic), and so on.
+ * In fact, in such a scenario, the drive tends to treat
* the requests of each of these processes in about the same
* way as the requests of the others, and thus to provide
* each of these processes with about the same throughput
@@ -3508,18 +3514,50 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* certainly needed to guarantee that bfqq receives its
* assigned fraction of the device throughput (see [1] for
* details).
+ * The problem is that idling may significantly reduce
+ * throughput with certain combinations of types of I/O and
+ * devices. An important example is sync random I/O, on flash
+ * storage with command queueing. So, unless bfqq falls in the
+ * above cases where idling also boosts throughput, it would
+ * be important to check conditions (i) and (ii) accurately,
+ * so as to avoid idling when not strictly needed for service
+ * guarantees.
+ *
+ * Unfortunately, it is extremely difficult to thoroughly
+ * check condition (ii). And, in case there are active groups,
+ * it becomes very difficult to check condition (i) too. In
+ * fact, if there are active groups, then, for condition (i)
+ * to become false, it is enough that an active group contains
+ * more active processes or sub-groups than some other active
+ * group. We address this issue with the following bi-modal
+ * behavior, implemented in the function
+ * bfq_symmetric_scenario().
*
- * We address this issue by controlling, actually, only the
- * symmetry sub-condition (i), i.e., provided that
- * sub-condition (i) holds, idling is not performed,
- * regardless of whether sub-condition (ii) holds. In other
- * words, only if sub-condition (i) holds, then idling is
+ * If there are active groups, then the scenario is tagged as
+ * asymmetric, conservatively, without checking any of the
+ * conditions (i) and (ii). So the device is idled for bfqq.
+ * This behavior matches also the fact that groups are created
+ * exactly if controlling I/O (to preserve bandwidth and
+ * latency guarantees) is a primary concern.
+ *
+ * On the opposite end, if there are no active groups, then
+ * only condition (i) is actually controlled, i.e., provided
+ * that condition (i) holds, idling is not performed,
+ * regardless of whether condition (ii) holds. In other words,
+ * only if condition (i) does not hold, then idling is
* allowed, and the device tends to be prevented from queueing
- * many requests, possibly of several processes. The reason
- * for not controlling also sub-condition (ii) is that we
- * exploit preemption to preserve guarantees in case of
- * symmetric scenarios, even if (ii) does not hold, as
- * explained in the next two paragraphs.
+ * many requests, possibly of several processes. Since there
+ * are no active groups, then, to control condition (i) it is
+ * enough to check whether all active queues have the same
+ * weight.
+ *
+ * Not checking condition (ii) evidently exposes bfqq to the
+ * risk of getting less throughput than its fair share.
+ * However, for queues with the same weight, a further
+ * mechanism, preemption, mitigates or even eliminates this
+ * problem. And it does so without consequences on overall
+ * throughput. This mechanism and its benefits are explained
+ * in the next three paragraphs.
*
* Even if a queue, say Q, is expired when it remains idle, Q
* can still preempt the new in-service queue if the next
@@ -3533,11 +3571,7 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* idling allows the internal queues of the device to contain
* many requests, and thus to reorder requests, we can rather
* safely assume that the internal scheduler still preserves a
- * minimum of mid-term fairness. The motivation for using
- * preemption instead of idling is that, by not idling,
- * service guarantees are preserved without minimally
- * sacrificing throughput. In other words, both a high
- * throughput and its desired distribution are obtained.
+ * minimum of mid-term fairness.
*
* More precisely, this preemption-based, idleless approach
* provides fairness in terms of IOPS, and not sectors per
@@ -3556,22 +3590,27 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* 1024/8 times as high as the service received by the other
* queue.
*
- * On the other hand, device idling is performed, and thus
- * pure sector-domain guarantees are provided, for the
- * following queues, which are likely to need stronger
- * throughput guarantees: weight-raised queues, and queues
- * with a higher weight than other queues. When such queues
- * are active, sub-condition (i) is false, which triggers
- * device idling.
+ * The motivation for using preemption instead of idling (for
+ * queues with the same weight) is that, by not idling,
+ * service guarantees are preserved (completely or at least in
+ * part) without minimally sacrificing throughput. And, if
+ * there is no active group, then the primary expectation for
+ * this device is probably a high throughput.
*
- * According to the above considerations, the next variable is
- * true (only) if sub-condition (i) holds. To compute the
- * value of this variable, we not only use the return value of
- * the function bfq_symmetric_scenario(), but also check
- * whether bfqq is being weight-raised, because
- * bfq_symmetric_scenario() does not take into account also
- * weight-raised queues (see comments on
- * bfq_weights_tree_add()).
+ * We are now left only with explaining the additional
+ * compound condition that is checked below for deciding
+ * whether the scenario is asymmetric. To explain this
+ * compound condition, we need to add that the function
+ * bfq_symmetric_scenario checks the weights of only
+ * non-weight-raised queues, for efficiency reasons (see
+ * comments on bfq_weights_tree_add()). Then the fact that
+ * bfqq is weight-raised is checked explicitly here. More
+ * precisely, the compound condition below takes into account
+ * also the fact that, even if bfqq is being weight-raised,
+ * the scenario is still symmetric if all active queues happen
+ * to be weight-raised. Actually, we should be even more
+ * precise here, and differentiate between interactive weight
+ * raising and soft real-time weight raising.
*
* As a side note, it is worth considering that the above
* device-idling countermeasures may however fail in the
@@ -3583,7 +3622,8 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
* to let requests be served in the desired order until all
* the requests already queued in the device have been served.
*/
- asymmetric_scenario = bfqq->wr_coeff > 1 ||
+ asymmetric_scenario = (bfqq->wr_coeff > 1 &&
+ bfqd->wr_busy_queues < bfqd->busy_queues) ||
!bfq_symmetric_scenario(bfqd);
/*
@@ -3629,6 +3669,30 @@ static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq)
return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq);
}
+static struct bfq_queue *bfq_choose_bfqq_for_injection(struct bfq_data *bfqd)
+{
+ struct bfq_queue *bfqq;
+
+ /*
+ * A linear search; but, with a high probability, very few
+ * steps are needed to find a candidate queue, i.e., a queue
+ * with enough budget left for its next request. In fact:
+ * - BFQ dynamically updates the budget of every queue so as
+ * to accommodate the expected backlog of the queue;
+ * - if a queue gets all its requests dispatched as injected
+ * service, then the queue is removed from the active list
+ * (and re-added only if it gets new requests, but with
+ * enough budget for its new backlog).
+ */
+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list)
+ if (!RB_EMPTY_ROOT(&bfqq->sort_list) &&
+ bfq_serv_to_charge(bfqq->next_rq, bfqq) <=
+ bfq_bfqq_budget_left(bfqq))
+ return bfqq;
+
+ return NULL;
+}
+
/*
* Select a queue for service. If we have a current queue in service,
* check whether to continue servicing it, or retrieve and set a new one.
@@ -3710,10 +3774,19 @@ check_queue:
* No requests pending. However, if the in-service queue is idling
* for a new request, or has requests waiting for a completion and
* may idle after their completion, then keep it anyway.
+ *
+ * Yet, to boost throughput, inject service from other queues if
+ * possible.
*/
if (bfq_bfqq_wait_request(bfqq) ||
(bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) {
- bfqq = NULL;
+ if (bfq_bfqq_injectable(bfqq) &&
+ bfqq->injected_service * bfqq->inject_coeff <
+ bfqq->entity.service * 10)
+ bfqq = bfq_choose_bfqq_for_injection(bfqd);
+ else
+ bfqq = NULL;
+
goto keep_queue;
}
@@ -3803,6 +3876,14 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
bfq_dispatch_remove(bfqd->queue, rq);
+ if (bfqq != bfqd->in_service_queue) {
+ if (likely(bfqd->in_service_queue))
+ bfqd->in_service_queue->injected_service +=
+ bfq_serv_to_charge(rq, bfqq);
+
+ goto return_rq;
+ }
+
/*
* If weight raising has to terminate for bfqq, then next
* function causes an immediate update of bfqq's weight,
@@ -3821,13 +3902,12 @@ static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd,
* belongs to CLASS_IDLE and other queues are waiting for
* service.
*/
- if (bfqd->busy_queues > 1 && bfq_class_idle(bfqq))
- goto expire;
-
- return rq;
+ if (!(bfqd->busy_queues > 1 && bfq_class_idle(bfqq)))
+ goto return_rq;
-expire:
bfq_bfqq_expire(bfqd, bfqq, false, BFQQE_BUDGET_EXHAUSTED);
+
+return_rq:
return rq;
}
@@ -4232,6 +4312,13 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_mark_bfqq_has_short_ttime(bfqq);
bfq_mark_bfqq_sync(bfqq);
bfq_mark_bfqq_just_created(bfqq);
+ /*
+ * Aggressively inject a lot of service: up to 90%.
+ * This coefficient remains constant during bfqq life,
+ * but this behavior might be changed, after enough
+ * testing and tuning.
+ */
+ bfqq->inject_coeff = 1;
} else
bfq_clear_bfqq_sync(bfqq);
@@ -4297,7 +4384,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
rcu_read_lock();
- bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
+ bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
if (!bfqg) {
bfqq = &bfqd->oom_bfqq;
goto out;
@@ -5330,7 +5417,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
bfqd->queue_weights_tree = RB_ROOT;
- bfqd->group_weights_tree = RB_ROOT;
+ bfqd->num_active_groups = 0;
INIT_LIST_HEAD(&bfqd->active_list);
INIT_LIST_HEAD(&bfqd->idle_list);
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index a8a2e5aca4d4..77651d817ecd 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -108,15 +108,14 @@ struct bfq_sched_data {
};
/**
- * struct bfq_weight_counter - counter of the number of all active entities
+ * struct bfq_weight_counter - counter of the number of all active queues
* with a given weight.
*/
struct bfq_weight_counter {
- unsigned int weight; /* weight of the entities this counter refers to */
- unsigned int num_active; /* nr of active entities with this weight */
+ unsigned int weight; /* weight of the queues this counter refers to */
+ unsigned int num_active; /* nr of active queues with this weight */
/*
- * Weights tree member (see bfq_data's @queue_weights_tree and
- * @group_weights_tree)
+ * Weights tree member (see bfq_data's @queue_weights_tree)
*/
struct rb_node weights_node;
};
@@ -151,8 +150,6 @@ struct bfq_weight_counter {
struct bfq_entity {
/* service_tree member */
struct rb_node rb_node;
- /* pointer to the weight counter associated with this entity */
- struct bfq_weight_counter *weight_counter;
/*
* Flag, true if the entity is on a tree (either the active or
@@ -266,6 +263,9 @@ struct bfq_queue {
/* entity representing this queue in the scheduler */
struct bfq_entity entity;
+ /* pointer to the weight counter associated with this entity */
+ struct bfq_weight_counter *weight_counter;
+
/* maximum budget allowed from the feedback mechanism */
int max_budget;
/* budget expiration (in jiffies) */
@@ -351,6 +351,32 @@ struct bfq_queue {
unsigned long split_time; /* time of last split */
unsigned long first_IO_time; /* time of first I/O for this queue */
+
+ /* max service rate measured so far */
+ u32 max_service_rate;
+ /*
+ * Ratio between the service received by bfqq while it is in
+ * service, and the cumulative service (of requests of other
+ * queues) that may be injected while bfqq is empty but still
+ * in service. To increase precision, the coefficient is
+ * measured in tenths of unit. Here are some example of (1)
+ * ratios, (2) resulting percentages of service injected
+ * w.r.t. to the total service dispatched while bfqq is in
+ * service, and (3) corresponding values of the coefficient:
+ * 1 (50%) -> 10
+ * 2 (33%) -> 20
+ * 10 (9%) -> 100
+ * 9.9 (9%) -> 99
+ * 1.5 (40%) -> 15
+ * 0.5 (66%) -> 5
+ * 0.1 (90%) -> 1
+ *
+ * So, if the coefficient is lower than 10, then
+ * injected service is more than bfqq service.
+ */
+ unsigned int inject_coeff;
+ /* amount of service injected in current service slot */
+ unsigned int injected_service;
};
/**
@@ -423,14 +449,9 @@ struct bfq_data {
*/
struct rb_root queue_weights_tree;
/*
- * rbtree of non-queue @bfq_entity weight counters, sorted by
- * weight. Used to keep track of whether all @bfq_groups have
- * the same weight. The tree contains one counter for each
- * distinct weight associated to some active @bfq_group (see
- * the comments to the functions bfq_weights_tree_[add|remove]
- * for further details).
+ * number of groups with requests still waiting for completion
*/
- struct rb_root group_weights_tree;
+ unsigned int num_active_groups;
/*
* Number of bfq_queues containing requests (including the
@@ -825,10 +846,10 @@ struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync);
void bic_set_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq, bool is_sync);
struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic);
void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq);
-void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_entity *entity,
+void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct rb_root *root);
void __bfq_weights_tree_remove(struct bfq_data *bfqd,
- struct bfq_entity *entity,
+ struct bfq_queue *bfqq,
struct rb_root *root);
void bfq_weights_tree_remove(struct bfq_data *bfqd,
struct bfq_queue *bfqq);
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index ae52bff43ce4..476b5a90a5a4 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -788,25 +788,29 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
new_weight = entity->orig_weight *
(bfqq ? bfqq->wr_coeff : 1);
/*
- * If the weight of the entity changes, remove the entity
- * from its old weight counter (if there is a counter
- * associated with the entity), and add it to the counter
- * associated with its new weight.
+ * If the weight of the entity changes, and the entity is a
+ * queue, remove the entity from its old weight counter (if
+ * there is a counter associated with the entity).
*/
if (prev_weight != new_weight) {
- root = bfqq ? &bfqd->queue_weights_tree :
- &bfqd->group_weights_tree;
- __bfq_weights_tree_remove(bfqd, entity, root);
+ if (bfqq) {
+ root = &bfqd->queue_weights_tree;
+ __bfq_weights_tree_remove(bfqd, bfqq, root);
+ } else
+ bfqd->num_active_groups--;
}
entity->weight = new_weight;
/*
- * Add the entity to its weights tree only if it is
- * not associated with a weight-raised queue.
+ * Add the entity, if it is not a weight-raised queue,
+ * to the counter associated with its new weight.
*/
- if (prev_weight != new_weight &&
- (bfqq ? bfqq->wr_coeff == 1 : 1))
- /* If we get here, root has been initialized. */
- bfq_weights_tree_add(bfqd, entity, root);
+ if (prev_weight != new_weight) {
+ if (bfqq && bfqq->wr_coeff == 1) {
+ /* If we get here, root has been initialized. */
+ bfq_weights_tree_add(bfqd, bfqq, root);
+ } else
+ bfqd->num_active_groups++;
+ }
new_st->wsum += entity->weight;
@@ -1012,9 +1016,9 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
if (!bfq_entity_to_bfqq(entity)) { /* bfq_group */
struct bfq_group *bfqg =
container_of(entity, struct bfq_group, entity);
+ struct bfq_data *bfqd = bfqg->bfqd;
- bfq_weights_tree_add(bfqg->bfqd, entity,
- &bfqd->group_weights_tree);
+ bfqd->num_active_groups++;
}
#endif
@@ -1181,10 +1185,17 @@ bool __bfq_deactivate_entity(struct bfq_entity *entity, bool ins_into_idle_tree)
st = bfq_entity_service_tree(entity);
is_in_service = entity == sd->in_service_entity;
- if (is_in_service) {
- bfq_calc_finish(entity, entity->service);
+ bfq_calc_finish(entity, entity->service);
+
+ if (is_in_service)
sd->in_service_entity = NULL;
- }
+ else
+ /*
+ * Non in-service entity: nobody will take care of
+ * resetting its service counter on expiration. Do it
+ * now.
+ */
+ entity->service = 0;
if (entity->tree == &st->active)
bfq_active_extract(st, entity);
@@ -1685,7 +1696,7 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
if (!bfqq->dispatched)
if (bfqq->wr_coeff == 1)
- bfq_weights_tree_add(bfqd, &bfqq->entity,
+ bfq_weights_tree_add(bfqd, bfqq,
&bfqd->queue_weights_tree);
if (bfqq->wr_coeff > 1)
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 67b5fb861a51..290af497997b 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -306,6 +306,8 @@ bool bio_integrity_prep(struct bio *bio)
if (bio_data_dir(bio) == WRITE) {
bio_integrity_process(bio, &bio->bi_iter,
bi->profile->generate_fn);
+ } else {
+ bip->bio_iter = bio->bi_iter;
}
return true;
@@ -331,20 +333,14 @@ static void bio_integrity_verify_fn(struct work_struct *work)
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
- struct bvec_iter iter = bio->bi_iter;
/*
* At the moment verify is called bio's iterator was advanced
* during split and completion, we need to rewind iterator to
* it's original position.
*/
- if (bio_rewind_iter(bio, &iter, iter.bi_done)) {
- bio->bi_status = bio_integrity_process(bio, &iter,
- bi->profile->verify_fn);
- } else {
- bio->bi_status = BLK_STS_IOERR;
- }
-
+ bio->bi_status = bio_integrity_process(bio, &bip->bio_iter,
+ bi->profile->verify_fn);
bio_integrity_free(bio);
bio_endio(bio);
}
diff --git a/block/bio.c b/block/bio.c
index 8c680a776171..bbfeb4ee2892 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -609,7 +609,9 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
- bio_clone_blkcg_association(bio, bio_src);
+ bio_clone_blkg_association(bio, bio_src);
+
+ blkcg_bio_issue_init(bio);
}
EXPORT_SYMBOL(__bio_clone_fast);
@@ -729,7 +731,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
}
/* If we may be able to merge these biovecs, force a recount */
- if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
+ if (bio->bi_vcnt > 1 && biovec_phys_mergeable(q, bvec - 1, bvec))
bio_clear_flag(bio, BIO_SEG_VALID);
done:
@@ -827,6 +829,8 @@ int bio_add_page(struct bio *bio, struct page *page,
}
EXPORT_SYMBOL(bio_add_page);
+#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
+
/**
* __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
* @bio: bio to add pages to
@@ -839,38 +843,35 @@ EXPORT_SYMBOL(bio_add_page);
*/
static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
- unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
+ unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
+ unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
+ ssize_t size, left;
+ unsigned len, i;
size_t offset;
- ssize_t size;
+
+ /*
+ * Move page array up in the allocated memory for the bio vecs as far as
+ * possible so that we can start filling biovecs from the beginning
+ * without overwriting the temporary page array.
+ */
+ BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
+ pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
- idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
- /*
- * Deep magic below: We need to walk the pinned pages backwards
- * because we are abusing the space allocated for the bio_vecs
- * for the page array. Because the bio_vecs are larger than the
- * page pointers by definition this will always work. But it also
- * means we can't use bio_add_page, so any changes to it's semantics
- * need to be reflected here as well.
- */
- bio->bi_iter.bi_size += size;
- bio->bi_vcnt += nr_pages;
+ for (left = size, i = 0; left > 0; left -= len, i++) {
+ struct page *page = pages[i];
- while (idx--) {
- bv[idx].bv_page = pages[idx];
- bv[idx].bv_len = PAGE_SIZE;
- bv[idx].bv_offset = 0;
+ len = min_t(size_t, PAGE_SIZE - offset, left);
+ if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset) != len))
+ return -EINVAL;
+ offset = 0;
}
- bv[0].bv_offset += offset;
- bv[0].bv_len -= offset;
- bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
-
iov_iter_advance(iter, size);
return 0;
}
@@ -1684,7 +1685,7 @@ void generic_end_io_acct(struct request_queue *q, int req_op,
const int sgrp = op_stat_group(req_op);
int cpu = part_stat_lock();
- part_stat_add(cpu, part, ticks[sgrp], duration);
+ part_stat_add(cpu, part, nsecs[sgrp], jiffies_to_nsecs(duration));
part_round_stats(q, cpu, part);
part_dec_in_flight(q, part, op_is_write(req_op));
@@ -1807,7 +1808,6 @@ struct bio *bio_split(struct bio *bio, int sectors,
bio_integrity_trim(split);
bio_advance(bio, split->bi_iter.bi_size);
- bio->bi_iter.bi_done = 0;
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
bio_set_flag(split, BIO_TRACE_COMPLETION);
@@ -1956,69 +1956,151 @@ EXPORT_SYMBOL(bioset_init_from_src);
#ifdef CONFIG_BLK_CGROUP
+/**
+ * bio_associate_blkg - associate a bio with the a blkg
+ * @bio: target bio
+ * @blkg: the blkg to associate
+ *
+ * This tries to associate @bio with the specified blkg. Association failure
+ * is handled by walking up the blkg tree. Therefore, the blkg associated can
+ * be anything between @blkg and the root_blkg. This situation only happens
+ * when a cgroup is dying and then the remaining bios will spill to the closest
+ * alive blkg.
+ *
+ * A reference will be taken on the @blkg and will be released when @bio is
+ * freed.
+ */
+int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
+{
+ if (unlikely(bio->bi_blkg))
+ return -EBUSY;
+ bio->bi_blkg = blkg_tryget_closest(blkg);
+ return 0;
+}
+
+/**
+ * __bio_associate_blkg_from_css - internal blkg association function
+ *
+ * This in the core association function that all association paths rely on.
+ * A blkg reference is taken which is released upon freeing of the bio.
+ */
+static int __bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{
+ struct request_queue *q = bio->bi_disk->queue;
+ struct blkcg_gq *blkg;
+ int ret;
+
+ rcu_read_lock();
+
+ if (!css || !css->parent)
+ blkg = q->root_blkg;
+ else
+ blkg = blkg_lookup_create(css_to_blkcg(css), q);
+
+ ret = bio_associate_blkg(bio, blkg);
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * bio_associate_blkg_from_css - associate a bio with a specified css
+ * @bio: target bio
+ * @css: target css
+ *
+ * Associate @bio with the blkg found by combining the css's blkg and the
+ * request_queue of the @bio. This falls back to the queue's root_blkg if
+ * the association fails with the css.
+ */
+int bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{
+ if (unlikely(bio->bi_blkg))
+ return -EBUSY;
+ return __bio_associate_blkg_from_css(bio, css);
+}
+EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
+
#ifdef CONFIG_MEMCG
/**
- * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
+ * bio_associate_blkg_from_page - associate a bio with the page's blkg
* @bio: target bio
* @page: the page to lookup the blkcg from
*
- * Associate @bio with the blkcg from @page's owning memcg. This works like
- * every other associate function wrt references.
+ * Associate @bio with the blkg from @page's owning memcg and the respective
+ * request_queue. If cgroup_e_css returns NULL, fall back to the queue's
+ * root_blkg.
+ *
+ * Note: this must be called after bio has an associated device.
*/
-int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
+int bio_associate_blkg_from_page(struct bio *bio, struct page *page)
{
- struct cgroup_subsys_state *blkcg_css;
+ struct cgroup_subsys_state *css;
+ int ret;
- if (unlikely(bio->bi_css))
+ if (unlikely(bio->bi_blkg))
return -EBUSY;
if (!page->mem_cgroup)
return 0;
- blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
- &io_cgrp_subsys);
- bio->bi_css = blkcg_css;
- return 0;
+
+ rcu_read_lock();
+
+ css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
+
+ ret = __bio_associate_blkg_from_css(bio, css);
+
+ rcu_read_unlock();
+ return ret;
}
#endif /* CONFIG_MEMCG */
/**
- * bio_associate_blkcg - associate a bio with the specified blkcg
+ * bio_associate_create_blkg - associate a bio with a blkg from q
+ * @q: request_queue where bio is going
* @bio: target bio
- * @blkcg_css: css of the blkcg to associate
- *
- * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
- * treat @bio as if it were issued by a task which belongs to the blkcg.
*
- * This function takes an extra reference of @blkcg_css which will be put
- * when @bio is released. The caller must own @bio and is responsible for
- * synchronizing calls to this function.
+ * Associate @bio with the blkg found from the bio's css and the request_queue.
+ * If one is not found, bio_lookup_blkg creates the blkg. This falls back to
+ * the queue's root_blkg if association fails.
*/
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
+int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
{
- if (unlikely(bio->bi_css))
- return -EBUSY;
- css_get(blkcg_css);
- bio->bi_css = blkcg_css;
- return 0;
+ struct cgroup_subsys_state *css;
+ int ret = 0;
+
+ /* someone has already associated this bio with a blkg */
+ if (bio->bi_blkg)
+ return ret;
+
+ rcu_read_lock();
+
+ css = blkcg_css();
+
+ ret = __bio_associate_blkg_from_css(bio, css);
+
+ rcu_read_unlock();
+ return ret;
}
-EXPORT_SYMBOL_GPL(bio_associate_blkcg);
/**
- * bio_associate_blkg - associate a bio with the specified blkg
+ * bio_reassociate_blkg - reassociate a bio with a blkg from q
+ * @q: request_queue where bio is going
* @bio: target bio
- * @blkg: the blkg to associate
*
- * Associate @bio with the blkg specified by @blkg. This is the queue specific
- * blkcg information associated with the @bio, a reference will be taken on the
- * @blkg and will be freed when the bio is freed.
+ * When submitting a bio, multiple recursive calls to make_request() may occur.
+ * This causes the initial associate done in blkcg_bio_issue_check() to be
+ * incorrect and reference the prior request_queue. This performs reassociation
+ * when this situation happens.
*/
-int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
+int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
{
- if (unlikely(bio->bi_blkg))
- return -EBUSY;
- if (!blkg_try_get(blkg))
- return -ENODEV;
- bio->bi_blkg = blkg;
- return 0;
+ if (bio->bi_blkg) {
+ blkg_put(bio->bi_blkg);
+ bio->bi_blkg = NULL;
+ }
+
+ return bio_associate_create_blkg(q, bio);
}
/**
@@ -2031,10 +2113,6 @@ void bio_disassociate_task(struct bio *bio)
put_io_context(bio->bi_ioc);
bio->bi_ioc = NULL;
}
- if (bio->bi_css) {
- css_put(bio->bi_css);
- bio->bi_css = NULL;
- }
if (bio->bi_blkg) {
blkg_put(bio->bi_blkg);
bio->bi_blkg = NULL;
@@ -2042,16 +2120,16 @@ void bio_disassociate_task(struct bio *bio)
}
/**
- * bio_clone_blkcg_association - clone blkcg association from src to dst bio
+ * bio_clone_blkg_association - clone blkg association from src to dst bio
* @dst: destination bio
* @src: source bio
*/
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
+void bio_clone_blkg_association(struct bio *dst, struct bio *src)
{
- if (src->bi_css)
- WARN_ON(bio_associate_blkcg(dst, src->bi_css));
+ if (src->bi_blkg)
+ bio_associate_blkg(dst, src->bi_blkg);
}
-EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
+EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
#endif /* CONFIG_BLK_CGROUP */
static void __init biovec_init_slabs(void)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index c19f9078da1e..992da5592c6e 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -84,6 +84,37 @@ static void blkg_free(struct blkcg_gq *blkg)
kfree(blkg);
}
+static void __blkg_release(struct rcu_head *rcu)
+{
+ struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
+
+ percpu_ref_exit(&blkg->refcnt);
+
+ /* release the blkcg and parent blkg refs this blkg has been holding */
+ css_put(&blkg->blkcg->css);
+ if (blkg->parent)
+ blkg_put(blkg->parent);
+
+ wb_congested_put(blkg->wb_congested);
+
+ blkg_free(blkg);
+}
+
+/*
+ * A group is RCU protected, but having an rcu lock does not mean that one
+ * can access all the fields of blkg and assume these are valid. For
+ * example, don't try to follow throtl_data and request queue links.
+ *
+ * Having a reference to blkg under an rcu allows accesses to only values
+ * local to groups like group stats and group rate limits.
+ */
+static void blkg_release(struct percpu_ref *ref)
+{
+ struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
+
+ call_rcu(&blkg->rcu_head, __blkg_release);
+}
+
/**
* blkg_alloc - allocate a blkg
* @blkcg: block cgroup the new blkg is associated with
@@ -110,7 +141,6 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
blkg->q = q;
INIT_LIST_HEAD(&blkg->q_node);
blkg->blkcg = blkcg;
- atomic_set(&blkg->refcnt, 1);
/* root blkg uses @q->root_rl, init rl only for !root blkgs */
if (blkcg != &blkcg_root) {
@@ -217,6 +247,11 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg_get(blkg->parent);
}
+ ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
+ GFP_NOWAIT | __GFP_NOWARN);
+ if (ret)
+ goto err_cancel_ref;
+
/* invoke per-policy init */
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -249,6 +284,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
blkg_put(blkg);
return ERR_PTR(ret);
+err_cancel_ref:
+ percpu_ref_exit(&blkg->refcnt);
err_put_congested:
wb_congested_put(wb_congested);
err_put_css:
@@ -259,7 +296,7 @@ err_free_blkg:
}
/**
- * blkg_lookup_create - lookup blkg, try to create one if not there
+ * __blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest
* @q: request_queue of interest
*
@@ -268,12 +305,11 @@ err_free_blkg:
* that all non-root blkg's have access to the parent blkg. This function
* should be called under RCU read lock and @q->queue_lock.
*
- * Returns pointer to the looked up or created blkg on success, ERR_PTR()
- * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
- * dead and bypassing, returns ERR_PTR(-EBUSY).
+ * Returns the blkg or the closest blkg if blkg_create fails as it walks
+ * down from root.
*/
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q)
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
{
struct blkcg_gq *blkg;
@@ -285,7 +321,7 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
* we shouldn't allow anything to go through for a bypassing queue.
*/
if (unlikely(blk_queue_bypass(q)))
- return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
+ return q->root_blkg;
blkg = __blkg_lookup(blkcg, q, true);
if (blkg)
@@ -293,23 +329,58 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
/*
* Create blkgs walking down from blkcg_root to @blkcg, so that all
- * non-root blkgs have access to their parents.
+ * non-root blkgs have access to their parents. Returns the closest
+ * blkg to the intended blkg should blkg_create() fail.
*/
while (true) {
struct blkcg *pos = blkcg;
struct blkcg *parent = blkcg_parent(blkcg);
-
- while (parent && !__blkg_lookup(parent, q, false)) {
+ struct blkcg_gq *ret_blkg = q->root_blkg;
+
+ while (parent) {
+ blkg = __blkg_lookup(parent, q, false);
+ if (blkg) {
+ /* remember closest blkg */
+ ret_blkg = blkg;
+ break;
+ }
pos = parent;
parent = blkcg_parent(parent);
}
blkg = blkg_create(pos, q, NULL);
- if (pos == blkcg || IS_ERR(blkg))
+ if (IS_ERR(blkg))
+ return ret_blkg;
+ if (pos == blkcg)
return blkg;
}
}
+/**
+ * blkg_lookup_create - find or create a blkg
+ * @blkcg: target block cgroup
+ * @q: target request_queue
+ *
+ * This looks up or creates the blkg representing the unique pair
+ * of the blkcg and the request_queue.
+ */
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
+{
+ struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
+ unsigned long flags;
+
+ if (unlikely(!blkg)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ blkg = __blkg_lookup_create(blkcg, q);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+
+ return blkg;
+}
+
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
@@ -353,7 +424,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
* Put the reference taken at the time of creation so that when all
* queues are gone, group can be destroyed.
*/
- blkg_put(blkg);
+ percpu_ref_kill(&blkg->refcnt);
}
/**
@@ -381,29 +452,6 @@ static void blkg_destroy_all(struct request_queue *q)
}
/*
- * A group is RCU protected, but having an rcu lock does not mean that one
- * can access all the fields of blkg and assume these are valid. For
- * example, don't try to follow throtl_data and request queue links.
- *
- * Having a reference to blkg under an rcu allows accesses to only values
- * local to groups like group stats and group rate limits.
- */
-void __blkg_release_rcu(struct rcu_head *rcu_head)
-{
- struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
-
- /* release the blkcg and parent blkg refs this blkg has been holding */
- css_put(&blkg->blkcg->css);
- if (blkg->parent)
- blkg_put(blkg->parent);
-
- wb_congested_put(blkg->wb_congested);
-
- blkg_free(blkg);
-}
-EXPORT_SYMBOL_GPL(__blkg_release_rcu);
-
-/*
* The next function used by blk_queue_for_each_rl(). It's a bit tricky
* because the root blkg uses @q->root_rl instead of its own rl.
*/
@@ -1510,8 +1558,10 @@ int blkcg_policy_register(struct blkcg_policy *pol)
for (i = 0; i < BLKCG_MAX_POLS; i++)
if (!blkcg_policy[i])
break;
- if (i >= BLKCG_MAX_POLS)
+ if (i >= BLKCG_MAX_POLS) {
+ pr_warn("blkcg_policy_register: BLKCG_MAX_POLS too small\n");
goto err_unlock;
+ }
/* Make sure cpd/pd_alloc_fn and cpd/pd_free_fn in pairs */
if ((!pol->cpd_alloc_fn ^ !pol->cpd_free_fn) ||
@@ -1746,8 +1796,7 @@ void blkcg_maybe_throttle_current(void)
blkg = blkg_lookup(blkcg, q);
if (!blkg)
goto out;
- blkg = blkg_try_get(blkg);
- if (!blkg)
+ if (!blkg_tryget(blkg))
goto out;
rcu_read_unlock();
diff --git a/block/blk-core.c b/block/blk-core.c
index 4dbc93f43b38..3ed60723e242 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -42,6 +42,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-sched.h"
+#include "blk-pm.h"
#include "blk-rq-qos.h"
#ifdef CONFIG_DEBUG_FS
@@ -421,24 +422,25 @@ void blk_sync_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_sync_queue);
/**
- * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
+ * blk_set_pm_only - increment pm_only counter
* @q: request queue pointer
- *
- * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
- * set and 1 if the flag was already set.
*/
-int blk_set_preempt_only(struct request_queue *q)
+void blk_set_pm_only(struct request_queue *q)
{
- return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
+ atomic_inc(&q->pm_only);
}
-EXPORT_SYMBOL_GPL(blk_set_preempt_only);
+EXPORT_SYMBOL_GPL(blk_set_pm_only);
-void blk_clear_preempt_only(struct request_queue *q)
+void blk_clear_pm_only(struct request_queue *q)
{
- blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
- wake_up_all(&q->mq_freeze_wq);
+ int pm_only;
+
+ pm_only = atomic_dec_return(&q->pm_only);
+ WARN_ON_ONCE(pm_only < 0);
+ if (pm_only == 0)
+ wake_up_all(&q->mq_freeze_wq);
}
-EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
+EXPORT_SYMBOL_GPL(blk_clear_pm_only);
/**
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
@@ -917,7 +919,7 @@ EXPORT_SYMBOL(blk_alloc_queue);
*/
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
- const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
+ const bool pm = flags & BLK_MQ_REQ_PREEMPT;
while (true) {
bool success = false;
@@ -925,11 +927,11 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
/*
- * The code that sets the PREEMPT_ONLY flag is
- * responsible for ensuring that that flag is globally
- * visible before the queue is unfrozen.
+ * The code that increments the pm_only counter is
+ * responsible for ensuring that that counter is
+ * globally visible before the queue is unfrozen.
*/
- if (preempt || !blk_queue_preempt_only(q)) {
+ if (pm || !blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
@@ -954,7 +956,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
wait_event(q->mq_freeze_wq,
(atomic_read(&q->mq_freeze_depth) == 0 &&
- (preempt || !blk_queue_preempt_only(q))) ||
+ (pm || (blk_pm_request_resume(q),
+ !blk_queue_pm_only(q)))) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
@@ -1051,8 +1054,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
mutex_init(&q->sysfs_lock);
spin_lock_init(&q->__queue_lock);
- if (!q->mq_ops)
- q->queue_lock = lock ? : &q->__queue_lock;
+ q->queue_lock = lock ? : &q->__queue_lock;
/*
* A queue starts its life with bypass turned on to avoid
@@ -1160,7 +1162,7 @@ int blk_init_allocated_queue(struct request_queue *q)
{
WARN_ON_ONCE(q->mq_ops);
- q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size);
+ q->fq = blk_alloc_flush_queue(q, NUMA_NO_NODE, q->cmd_size, GFP_KERNEL);
if (!q->fq)
return -ENOMEM;
@@ -1726,16 +1728,6 @@ void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part)
}
EXPORT_SYMBOL_GPL(part_round_stats);
-#ifdef CONFIG_PM
-static void blk_pm_put_request(struct request *rq)
-{
- if (rq->q->dev && !(rq->rq_flags & RQF_PM) && !--rq->q->nr_pending)
- pm_runtime_mark_last_busy(rq->q->dev);
-}
-#else
-static inline void blk_pm_put_request(struct request *rq) {}
-#endif
-
void __blk_put_request(struct request_queue *q, struct request *req)
{
req_flags_t rq_flags = req->rq_flags;
@@ -1752,6 +1744,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
blk_req_zone_write_unlock(req);
blk_pm_put_request(req);
+ blk_pm_mark_last_busy(req);
elv_completed_request(q, req);
@@ -2440,6 +2433,7 @@ blk_qc_t generic_make_request(struct bio *bio)
if (q)
blk_queue_exit(q);
q = bio->bi_disk->queue;
+ bio_reassociate_blkg(q, bio);
flags = 0;
if (bio->bi_opf & REQ_NOWAIT)
flags = BLK_MQ_REQ_NOWAIT;
@@ -2733,17 +2727,15 @@ void blk_account_io_done(struct request *req, u64 now)
* containing request is enough.
*/
if (blk_do_io_stat(req) && !(req->rq_flags & RQF_FLUSH_SEQ)) {
- unsigned long duration;
const int sgrp = op_stat_group(req_op(req));
struct hd_struct *part;
int cpu;
- duration = nsecs_to_jiffies(now - req->start_time_ns);
cpu = part_stat_lock();
part = req->part;
part_stat_inc(cpu, part, ios[sgrp]);
- part_stat_add(cpu, part, ticks[sgrp], duration);
+ part_stat_add(cpu, part, nsecs[sgrp], now - req->start_time_ns);
part_round_stats(req->q, cpu, part);
part_dec_in_flight(req->q, part, rq_data_dir(req));
@@ -2752,30 +2744,6 @@ void blk_account_io_done(struct request *req, u64 now)
}
}
-#ifdef CONFIG_PM
-/*
- * Don't process normal requests when queue is suspended
- * or in the process of suspending/resuming
- */
-static bool blk_pm_allow_request(struct request *rq)
-{
- switch (rq->q->rpm_status) {
- case RPM_RESUMING:
- case RPM_SUSPENDING:
- return rq->rq_flags & RQF_PM;
- case RPM_SUSPENDED:
- return false;
- default:
- return true;
- }
-}
-#else
-static bool blk_pm_allow_request(struct request *rq)
-{
- return true;
-}
-#endif
-
void blk_account_io_start(struct request *rq, bool new_io)
{
struct hd_struct *part;
@@ -2821,11 +2789,14 @@ static struct request *elv_next_request(struct request_queue *q)
while (1) {
list_for_each_entry(rq, &q->queue_head, queuelist) {
- if (blk_pm_allow_request(rq))
- return rq;
-
- if (rq->rq_flags & RQF_SOFTBARRIER)
- break;
+#ifdef CONFIG_PM
+ /*
+ * If a request gets queued in state RPM_SUSPENDED
+ * then that's a kernel bug.
+ */
+ WARN_ON_ONCE(q->rpm_status == RPM_SUSPENDED);
+#endif
+ return rq;
}
/*
@@ -3757,191 +3728,6 @@ void blk_finish_plug(struct blk_plug *plug)
}
EXPORT_SYMBOL(blk_finish_plug);
-#ifdef CONFIG_PM
-/**
- * blk_pm_runtime_init - Block layer runtime PM initialization routine
- * @q: the queue of the device
- * @dev: the device the queue belongs to
- *
- * Description:
- * Initialize runtime-PM-related fields for @q and start auto suspend for
- * @dev. Drivers that want to take advantage of request-based runtime PM
- * should call this function after @dev has been initialized, and its
- * request queue @q has been allocated, and runtime PM for it can not happen
- * yet(either due to disabled/forbidden or its usage_count > 0). In most
- * cases, driver should call this function before any I/O has taken place.
- *
- * This function takes care of setting up using auto suspend for the device,
- * the autosuspend delay is set to -1 to make runtime suspend impossible
- * until an updated value is either set by user or by driver. Drivers do
- * not need to touch other autosuspend settings.
- *
- * The block layer runtime PM is request based, so only works for drivers
- * that use request as their IO unit instead of those directly use bio's.
- */
-void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
-{
- /* Don't enable runtime PM for blk-mq until it is ready */
- if (q->mq_ops) {
- pm_runtime_disable(dev);
- return;
- }
-
- q->dev = dev;
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_set_autosuspend_delay(q->dev, -1);
- pm_runtime_use_autosuspend(q->dev);
-}
-EXPORT_SYMBOL(blk_pm_runtime_init);
-
-/**
- * blk_pre_runtime_suspend - Pre runtime suspend check
- * @q: the queue of the device
- *
- * Description:
- * This function will check if runtime suspend is allowed for the device
- * by examining if there are any requests pending in the queue. If there
- * are requests pending, the device can not be runtime suspended; otherwise,
- * the queue's status will be updated to SUSPENDING and the driver can
- * proceed to suspend the device.
- *
- * For the not allowed case, we mark last busy for the device so that
- * runtime PM core will try to autosuspend it some time later.
- *
- * This function should be called near the start of the device's
- * runtime_suspend callback.
- *
- * Return:
- * 0 - OK to runtime suspend the device
- * -EBUSY - Device should not be runtime suspended
- */
-int blk_pre_runtime_suspend(struct request_queue *q)
-{
- int ret = 0;
-
- if (!q->dev)
- return ret;
-
- spin_lock_irq(q->queue_lock);
- if (q->nr_pending) {
- ret = -EBUSY;
- pm_runtime_mark_last_busy(q->dev);
- } else {
- q->rpm_status = RPM_SUSPENDING;
- }
- spin_unlock_irq(q->queue_lock);
- return ret;
-}
-EXPORT_SYMBOL(blk_pre_runtime_suspend);
-
-/**
- * blk_post_runtime_suspend - Post runtime suspend processing
- * @q: the queue of the device
- * @err: return value of the device's runtime_suspend function
- *
- * Description:
- * Update the queue's runtime status according to the return value of the
- * device's runtime suspend function and mark last busy for the device so
- * that PM core will try to auto suspend the device at a later time.
- *
- * This function should be called near the end of the device's
- * runtime_suspend callback.
- */
-void blk_post_runtime_suspend(struct request_queue *q, int err)
-{
- if (!q->dev)
- return;
-
- spin_lock_irq(q->queue_lock);
- if (!err) {
- q->rpm_status = RPM_SUSPENDED;
- } else {
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_mark_last_busy(q->dev);
- }
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_post_runtime_suspend);
-
-/**
- * blk_pre_runtime_resume - Pre runtime resume processing
- * @q: the queue of the device
- *
- * Description:
- * Update the queue's runtime status to RESUMING in preparation for the
- * runtime resume of the device.
- *
- * This function should be called near the start of the device's
- * runtime_resume callback.
- */
-void blk_pre_runtime_resume(struct request_queue *q)
-{
- if (!q->dev)
- return;
-
- spin_lock_irq(q->queue_lock);
- q->rpm_status = RPM_RESUMING;
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_pre_runtime_resume);
-
-/**
- * blk_post_runtime_resume - Post runtime resume processing
- * @q: the queue of the device
- * @err: return value of the device's runtime_resume function
- *
- * Description:
- * Update the queue's runtime status according to the return value of the
- * device's runtime_resume function. If it is successfully resumed, process
- * the requests that are queued into the device's queue when it is resuming
- * and then mark last busy and initiate autosuspend for it.
- *
- * This function should be called near the end of the device's
- * runtime_resume callback.
- */
-void blk_post_runtime_resume(struct request_queue *q, int err)
-{
- if (!q->dev)
- return;
-
- spin_lock_irq(q->queue_lock);
- if (!err) {
- q->rpm_status = RPM_ACTIVE;
- __blk_run_queue(q);
- pm_runtime_mark_last_busy(q->dev);
- pm_request_autosuspend(q->dev);
- } else {
- q->rpm_status = RPM_SUSPENDED;
- }
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_post_runtime_resume);
-
-/**
- * blk_set_runtime_active - Force runtime status of the queue to be active
- * @q: the queue of the device
- *
- * If the device is left runtime suspended during system suspend the resume
- * hook typically resumes the device and corrects runtime status
- * accordingly. However, that does not affect the queue runtime PM status
- * which is still "suspended". This prevents processing requests from the
- * queue.
- *
- * This function can be used in driver's resume hook to correct queue
- * runtime PM status and re-enable peeking requests from the queue. It
- * should be called before first request is added to the queue.
- */
-void blk_set_runtime_active(struct request_queue *q)
-{
- spin_lock_irq(q->queue_lock);
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_mark_last_busy(q->dev);
- pm_request_autosuspend(q->dev);
- spin_unlock_irq(q->queue_lock);
-}
-EXPORT_SYMBOL(blk_set_runtime_active);
-#endif
-
int __init blk_dev_init(void)
{
BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS));
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ce41f666de3e..8b44b86779da 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -566,12 +566,12 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
EXPORT_SYMBOL(blkdev_issue_flush);
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
- int node, int cmd_size)
+ int node, int cmd_size, gfp_t flags)
{
struct blk_flush_queue *fq;
int rq_sz = sizeof(struct request);
- fq = kzalloc_node(sizeof(*fq), GFP_KERNEL, node);
+ fq = kzalloc_node(sizeof(*fq), flags, node);
if (!fq)
goto fail;
@@ -579,7 +579,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
spin_lock_init(&fq->mq_flush_lock);
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
- fq->flush_rq = kzalloc_node(rq_sz, GFP_KERNEL, node);
+ fq->flush_rq = kzalloc_node(rq_sz, flags, node);
if (!fq->flush_rq)
goto fail_rq;
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 6121611e1316..d1ab089e0919 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -49,12 +49,8 @@ int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
bio_for_each_integrity_vec(iv, bio, iter) {
if (prev) {
- if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
+ if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment;
-
- if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
- goto new_segment;
-
if (seg_size + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
@@ -95,12 +91,8 @@ int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
bio_for_each_integrity_vec(iv, bio, iter) {
if (prev) {
- if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
+ if (!biovec_phys_mergeable(q, &ivprv, &iv))
goto new_segment;
-
- if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
- goto new_segment;
-
if (sg->length + iv.bv_len > queue_max_segment_size(q))
goto new_segment;
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 19923f8a029d..35c48d7b8f78 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -115,9 +115,22 @@ struct child_latency_info {
atomic_t scale_cookie;
};
+struct percentile_stats {
+ u64 total;
+ u64 missed;
+};
+
+struct latency_stat {
+ union {
+ struct percentile_stats ps;
+ struct blk_rq_stat rqs;
+ };
+};
+
struct iolatency_grp {
struct blkg_policy_data pd;
- struct blk_rq_stat __percpu *stats;
+ struct latency_stat __percpu *stats;
+ struct latency_stat cur_stat;
struct blk_iolatency *blkiolat;
struct rq_depth rq_depth;
struct rq_wait rq_wait;
@@ -132,6 +145,7 @@ struct iolatency_grp {
/* Our current number of IO's for the last summation. */
u64 nr_samples;
+ bool ssd;
struct child_latency_info child_lat;
};
@@ -172,6 +186,80 @@ static inline struct blkcg_gq *lat_to_blkg(struct iolatency_grp *iolat)
return pd_to_blkg(&iolat->pd);
}
+static inline void latency_stat_init(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd) {
+ stat->ps.total = 0;
+ stat->ps.missed = 0;
+ } else
+ blk_rq_stat_init(&stat->rqs);
+}
+
+static inline void latency_stat_sum(struct iolatency_grp *iolat,
+ struct latency_stat *sum,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd) {
+ sum->ps.total += stat->ps.total;
+ sum->ps.missed += stat->ps.missed;
+ } else
+ blk_rq_stat_sum(&sum->rqs, &stat->rqs);
+}
+
+static inline void latency_stat_record_time(struct iolatency_grp *iolat,
+ u64 req_time)
+{
+ struct latency_stat *stat = get_cpu_ptr(iolat->stats);
+ if (iolat->ssd) {
+ if (req_time >= iolat->min_lat_nsec)
+ stat->ps.missed++;
+ stat->ps.total++;
+ } else
+ blk_rq_stat_add(&stat->rqs, req_time);
+ put_cpu_ptr(stat);
+}
+
+static inline bool latency_sum_ok(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd) {
+ u64 thresh = div64_u64(stat->ps.total, 10);
+ thresh = max(thresh, 1ULL);
+ return stat->ps.missed < thresh;
+ }
+ return stat->rqs.mean <= iolat->min_lat_nsec;
+}
+
+static inline u64 latency_stat_samples(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ if (iolat->ssd)
+ return stat->ps.total;
+ return stat->rqs.nr_samples;
+}
+
+static inline void iolat_update_total_lat_avg(struct iolatency_grp *iolat,
+ struct latency_stat *stat)
+{
+ int exp_idx;
+
+ if (iolat->ssd)
+ return;
+
+ /*
+ * CALC_LOAD takes in a number stored in fixed point representation.
+ * Because we are using this for IO time in ns, the values stored
+ * are significantly larger than the FIXED_1 denominator (2048).
+ * Therefore, rounding errors in the calculation are negligible and
+ * can be ignored.
+ */
+ exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
+ div64_u64(iolat->cur_win_nsec,
+ BLKIOLATENCY_EXP_BUCKET_SIZE));
+ CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat->rqs.mean);
+}
+
static inline bool iolatency_may_queue(struct iolatency_grp *iolat,
wait_queue_entry_t *wait,
bool first_block)
@@ -255,7 +343,7 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
struct child_latency_info *lat_info,
bool up)
{
- unsigned long qd = blk_queue_depth(blkiolat->rqos.q);
+ unsigned long qd = blkiolat->rqos.q->nr_requests;
unsigned long scale = scale_amount(qd, up);
unsigned long old = atomic_read(&lat_info->scale_cookie);
unsigned long max_scale = qd << 1;
@@ -295,10 +383,9 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
*/
static void scale_change(struct iolatency_grp *iolat, bool up)
{
- unsigned long qd = blk_queue_depth(iolat->blkiolat->rqos.q);
+ unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
unsigned long scale = scale_amount(qd, up);
unsigned long old = iolat->rq_depth.max_depth;
- bool changed = false;
if (old > qd)
old = qd;
@@ -308,15 +395,13 @@ static void scale_change(struct iolatency_grp *iolat, bool up)
return;
if (old < qd) {
- changed = true;
old += scale;
old = min(old, qd);
iolat->rq_depth.max_depth = old;
wake_up_all(&iolat->rq_wait.wait);
}
- } else if (old > 1) {
+ } else {
old >>= 1;
- changed = true;
iolat->rq_depth.max_depth = max(old, 1UL);
}
}
@@ -369,7 +454,7 @@ static void check_scale_change(struct iolatency_grp *iolat)
* scale down event.
*/
samples_thresh = lat_info->nr_samples * 5;
- samples_thresh = div64_u64(samples_thresh, 100);
+ samples_thresh = max(1ULL, div64_u64(samples_thresh, 100));
if (iolat->nr_samples <= samples_thresh)
return;
}
@@ -395,34 +480,12 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
spinlock_t *lock)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
- struct request_queue *q = rqos->q;
+ struct blkcg_gq *blkg = bio->bi_blkg;
bool issue_as_root = bio_issue_as_root_blkg(bio);
if (!blk_iolatency_enabled(blkiolat))
return;
- rcu_read_lock();
- blkcg = bio_blkcg(bio);
- bio_associate_blkcg(bio, &blkcg->css);
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- if (!lock)
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- if (!lock)
- spin_unlock_irq(q->queue_lock);
- }
- if (!blkg)
- goto out;
-
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
- bio_associate_blkg(bio, blkg);
-out:
- rcu_read_unlock();
while (blkg && blkg->parent) {
struct iolatency_grp *iolat = blkg_to_lat(blkg);
if (!iolat) {
@@ -443,7 +506,6 @@ static void iolatency_record_time(struct iolatency_grp *iolat,
struct bio_issue *issue, u64 now,
bool issue_as_root)
{
- struct blk_rq_stat *rq_stat;
u64 start = bio_issue_time(issue);
u64 req_time;
@@ -469,9 +531,7 @@ static void iolatency_record_time(struct iolatency_grp *iolat,
return;
}
- rq_stat = get_cpu_ptr(iolat->stats);
- blk_rq_stat_add(rq_stat, req_time);
- put_cpu_ptr(rq_stat);
+ latency_stat_record_time(iolat, req_time);
}
#define BLKIOLATENCY_MIN_ADJUST_TIME (500 * NSEC_PER_MSEC)
@@ -482,17 +542,17 @@ static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
struct blkcg_gq *blkg = lat_to_blkg(iolat);
struct iolatency_grp *parent;
struct child_latency_info *lat_info;
- struct blk_rq_stat stat;
+ struct latency_stat stat;
unsigned long flags;
- int cpu, exp_idx;
+ int cpu;
- blk_rq_stat_init(&stat);
+ latency_stat_init(iolat, &stat);
preempt_disable();
for_each_online_cpu(cpu) {
- struct blk_rq_stat *s;
+ struct latency_stat *s;
s = per_cpu_ptr(iolat->stats, cpu);
- blk_rq_stat_sum(&stat, s);
- blk_rq_stat_init(s);
+ latency_stat_sum(iolat, &stat, s);
+ latency_stat_init(iolat, s);
}
preempt_enable();
@@ -502,41 +562,36 @@ static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
lat_info = &parent->child_lat;
- /*
- * CALC_LOAD takes in a number stored in fixed point representation.
- * Because we are using this for IO time in ns, the values stored
- * are significantly larger than the FIXED_1 denominator (2048).
- * Therefore, rounding errors in the calculation are negligible and
- * can be ignored.
- */
- exp_idx = min_t(int, BLKIOLATENCY_NR_EXP_FACTORS - 1,
- div64_u64(iolat->cur_win_nsec,
- BLKIOLATENCY_EXP_BUCKET_SIZE));
- CALC_LOAD(iolat->lat_avg, iolatency_exp_factors[exp_idx], stat.mean);
+ iolat_update_total_lat_avg(iolat, &stat);
/* Everything is ok and we don't need to adjust the scale. */
- if (stat.mean <= iolat->min_lat_nsec &&
+ if (latency_sum_ok(iolat, &stat) &&
atomic_read(&lat_info->scale_cookie) == DEFAULT_SCALE_COOKIE)
return;
/* Somebody beat us to the punch, just bail. */
spin_lock_irqsave(&lat_info->lock, flags);
+
+ latency_stat_sum(iolat, &iolat->cur_stat, &stat);
lat_info->nr_samples -= iolat->nr_samples;
- lat_info->nr_samples += stat.nr_samples;
- iolat->nr_samples = stat.nr_samples;
+ lat_info->nr_samples += latency_stat_samples(iolat, &iolat->cur_stat);
+ iolat->nr_samples = latency_stat_samples(iolat, &iolat->cur_stat);
if ((lat_info->last_scale_event >= now ||
- now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME) &&
- lat_info->scale_lat <= iolat->min_lat_nsec)
+ now - lat_info->last_scale_event < BLKIOLATENCY_MIN_ADJUST_TIME))
goto out;
- if (stat.mean <= iolat->min_lat_nsec &&
- stat.nr_samples >= BLKIOLATENCY_MIN_GOOD_SAMPLES) {
+ if (latency_sum_ok(iolat, &iolat->cur_stat) &&
+ latency_sum_ok(iolat, &stat)) {
+ if (latency_stat_samples(iolat, &iolat->cur_stat) <
+ BLKIOLATENCY_MIN_GOOD_SAMPLES)
+ goto out;
if (lat_info->scale_grp == iolat) {
lat_info->last_scale_event = now;
scale_cookie_change(iolat->blkiolat, lat_info, true);
}
- } else if (stat.mean > iolat->min_lat_nsec) {
+ } else if (lat_info->scale_lat == 0 ||
+ lat_info->scale_lat >= iolat->min_lat_nsec) {
lat_info->last_scale_event = now;
if (!lat_info->scale_grp ||
lat_info->scale_lat > iolat->min_lat_nsec) {
@@ -545,6 +600,7 @@ static void iolatency_check_latencies(struct iolatency_grp *iolat, u64 now)
}
scale_cookie_change(iolat->blkiolat, lat_info, false);
}
+ latency_stat_init(iolat, &iolat->cur_stat);
out:
spin_unlock_irqrestore(&lat_info->lock, flags);
}
@@ -650,7 +706,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
* We could be exiting, don't access the pd unless we have a
* ref on the blkg.
*/
- if (!blkg_try_get(blkg))
+ if (!blkg_tryget(blkg))
continue;
iolat = blkg_to_lat(blkg);
@@ -761,7 +817,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
{
struct blkcg *blkcg = css_to_blkcg(of_css(of));
struct blkcg_gq *blkg;
- struct blk_iolatency *blkiolat;
struct blkg_conf_ctx ctx;
struct iolatency_grp *iolat;
char *p, *tok;
@@ -774,7 +829,6 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
return ret;
iolat = blkg_to_lat(ctx.blkg);
- blkiolat = iolat->blkiolat;
p = ctx.body;
ret = -EINVAL;
@@ -835,13 +889,43 @@ static int iolatency_print_limit(struct seq_file *sf, void *v)
return 0;
}
+static size_t iolatency_ssd_stat(struct iolatency_grp *iolat, char *buf,
+ size_t size)
+{
+ struct latency_stat stat;
+ int cpu;
+
+ latency_stat_init(iolat, &stat);
+ preempt_disable();
+ for_each_online_cpu(cpu) {
+ struct latency_stat *s;
+ s = per_cpu_ptr(iolat->stats, cpu);
+ latency_stat_sum(iolat, &stat, s);
+ }
+ preempt_enable();
+
+ if (iolat->rq_depth.max_depth == UINT_MAX)
+ return scnprintf(buf, size, " missed=%llu total=%llu depth=max",
+ (unsigned long long)stat.ps.missed,
+ (unsigned long long)stat.ps.total);
+ return scnprintf(buf, size, " missed=%llu total=%llu depth=%u",
+ (unsigned long long)stat.ps.missed,
+ (unsigned long long)stat.ps.total,
+ iolat->rq_depth.max_depth);
+}
+
static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
size_t size)
{
struct iolatency_grp *iolat = pd_to_lat(pd);
- unsigned long long avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
- unsigned long long cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
+ unsigned long long avg_lat;
+ unsigned long long cur_win;
+
+ if (iolat->ssd)
+ return iolatency_ssd_stat(iolat, buf, size);
+ avg_lat = div64_u64(iolat->lat_avg, NSEC_PER_USEC);
+ cur_win = div64_u64(iolat->cur_win_nsec, NSEC_PER_MSEC);
if (iolat->rq_depth.max_depth == UINT_MAX)
return scnprintf(buf, size, " depth=max avg_lat=%llu win=%llu",
avg_lat, cur_win);
@@ -858,8 +942,8 @@ static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp, int node)
iolat = kzalloc_node(sizeof(*iolat), gfp, node);
if (!iolat)
return NULL;
- iolat->stats = __alloc_percpu_gfp(sizeof(struct blk_rq_stat),
- __alignof__(struct blk_rq_stat), gfp);
+ iolat->stats = __alloc_percpu_gfp(sizeof(struct latency_stat),
+ __alignof__(struct latency_stat), gfp);
if (!iolat->stats) {
kfree(iolat);
return NULL;
@@ -876,15 +960,21 @@ static void iolatency_pd_init(struct blkg_policy_data *pd)
u64 now = ktime_to_ns(ktime_get());
int cpu;
+ if (blk_queue_nonrot(blkg->q))
+ iolat->ssd = true;
+ else
+ iolat->ssd = false;
+
for_each_possible_cpu(cpu) {
- struct blk_rq_stat *stat;
+ struct latency_stat *stat;
stat = per_cpu_ptr(iolat->stats, cpu);
- blk_rq_stat_init(stat);
+ latency_stat_init(iolat, stat);
}
+ latency_stat_init(iolat, &iolat->cur_stat);
rq_wait_init(&iolat->rq_wait);
spin_lock_init(&iolat->child_lat.lock);
- iolat->rq_depth.queue_depth = blk_queue_depth(blkg->q);
+ iolat->rq_depth.queue_depth = blkg->q->nr_requests;
iolat->rq_depth.max_depth = UINT_MAX;
iolat->rq_depth.default_depth = iolat->rq_depth.queue_depth;
iolat->blkiolat = blkiolat;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index d1b9dd03da25..bbd44666f2b5 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -29,9 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
{
struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop;
- unsigned int granularity;
unsigned int op;
- int alignment;
sector_t bs_mask;
if (!q)
@@ -54,38 +52,16 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
- /* Zero-sector (unknown) and one-sector granularities are the same. */
- granularity = max(q->limits.discard_granularity >> 9, 1U);
- alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
-
while (nr_sects) {
- unsigned int req_sects;
- sector_t end_sect, tmp;
+ unsigned int req_sects = nr_sects;
+ sector_t end_sect;
- /*
- * Issue in chunks of the user defined max discard setting,
- * ensuring that bi_size doesn't overflow
- */
- req_sects = min_t(sector_t, nr_sects,
- q->limits.max_discard_sectors);
if (!req_sects)
goto fail;
if (req_sects > UINT_MAX >> 9)
req_sects = UINT_MAX >> 9;
- /*
- * If splitting a request, and the next starting sector would be
- * misaligned, stop the discard at the previous aligned sector.
- */
end_sect = sector + req_sects;
- tmp = end_sect;
- if (req_sects < nr_sects &&
- sector_div(tmp, granularity) != alignment) {
- end_sect = end_sect - alignment;
- sector_div(end_sect, granularity);
- end_sect = end_sect * granularity + alignment;
- req_sects = end_sect - sector;
- }
bio = next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index aaec38cc37b8..42a46744c11b 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,6 +12,69 @@
#include "blk.h"
+/*
+ * Check if the two bvecs from two bios can be merged to one segment. If yes,
+ * no need to check gap between the two bios since the 1st bio and the 1st bvec
+ * in the 2nd bio can be handled in one segment.
+ */
+static inline bool bios_segs_mergeable(struct request_queue *q,
+ struct bio *prev, struct bio_vec *prev_last_bv,
+ struct bio_vec *next_first_bv)
+{
+ if (!biovec_phys_mergeable(q, prev_last_bv, next_first_bv))
+ return false;
+ if (prev->bi_seg_back_size + next_first_bv->bv_len >
+ queue_max_segment_size(q))
+ return false;
+ return true;
+}
+
+static inline bool bio_will_gap(struct request_queue *q,
+ struct request *prev_rq, struct bio *prev, struct bio *next)
+{
+ struct bio_vec pb, nb;
+
+ if (!bio_has_data(prev) || !queue_virt_boundary(q))
+ return false;
+
+ /*
+ * Don't merge if the 1st bio starts with non-zero offset, otherwise it
+ * is quite difficult to respect the sg gap limit. We work hard to
+ * merge a huge number of small single bios in case of mkfs.
+ */
+ if (prev_rq)
+ bio_get_first_bvec(prev_rq->bio, &pb);
+ else
+ bio_get_first_bvec(prev, &pb);
+ if (pb.bv_offset)
+ return true;
+
+ /*
+ * We don't need to worry about the situation that the merged segment
+ * ends in unaligned virt boundary:
+ *
+ * - if 'pb' ends aligned, the merged segment ends aligned
+ * - if 'pb' ends unaligned, the next bio must include
+ * one single bvec of 'nb', otherwise the 'nb' can't
+ * merge with 'pb'
+ */
+ bio_get_last_bvec(prev, &pb);
+ bio_get_first_bvec(next, &nb);
+ if (bios_segs_mergeable(q, prev, &pb, &nb))
+ return false;
+ return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+}
+
+static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, req, req->biotail, bio);
+}
+
+static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, NULL, bio, req->bio);
+}
+
static struct bio *blk_bio_discard_split(struct request_queue *q,
struct bio *bio,
struct bio_set *bs,
@@ -134,9 +197,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
if (bvprvp && blk_queue_cluster(q)) {
if (seg_size + bv.bv_len > queue_max_segment_size(q))
goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
+ if (!biovec_phys_mergeable(q, bvprvp, &bv))
goto new_segment;
seg_size += bv.bv_len;
@@ -267,9 +328,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
+ if (!biovec_phys_mergeable(q, &bvprv, &bv))
goto new_segment;
seg_size += bv.bv_len;
@@ -349,17 +408,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
bio_get_last_bvec(bio, &end_bv);
bio_get_first_bvec(nxt, &nxt_bv);
- if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
- return 0;
-
- /*
- * bio and nxt are contiguous in memory; check if the queue allows
- * these two to be merged into one
- */
- if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
- return 1;
-
- return 0;
+ return biovec_phys_mergeable(q, &end_bv, &nxt_bv);
}
static inline void
@@ -373,10 +422,7 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
if (*sg && *cluster) {
if ((*sg)->length + nbytes > queue_max_segment_size(q))
goto new_segment;
-
- if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
- goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+ if (!biovec_phys_mergeable(q, bvprv, bvec))
goto new_segment;
(*sg)->length += nbytes;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index cb1e6cf7ac48..41b86f50d126 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -102,6 +102,14 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
return 0;
}
+static int queue_pm_only_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+
+ seq_printf(m, "%d\n", atomic_read(&q->pm_only));
+ return 0;
+}
+
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(QUEUED),
@@ -132,7 +140,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
QUEUE_FLAG_NAME(QUIESCED),
- QUEUE_FLAG_NAME(PREEMPT_ONLY),
};
#undef QUEUE_FLAG_NAME
@@ -209,6 +216,7 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf,
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
{ "poll_stat", 0400, queue_poll_stat_show },
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
+ { "pm_only", 0600, queue_pm_only_show, NULL },
{ "state", 0600, queue_state_show, queue_state_write },
{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
@@ -423,8 +431,7 @@ static void hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
{
const struct show_busy_params *params = data;
- if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx &&
- blk_mq_rq_state(rq) != MQ_RQ_IDLE)
+ if (blk_mq_map_queue(rq->q, rq->mq_ctx->cpu) == params->hctx)
__blk_mq_debugfs_rq_show(params->m,
list_entry_rq(&rq->queuelist));
}
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 4e028ee42430..8a9544203173 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -49,12 +49,12 @@ blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
return true;
}
-static inline void blk_mq_sched_completed_request(struct request *rq)
+static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
{
struct elevator_queue *e = rq->q->elevator;
if (e && e->type->ops.mq.completed_request)
- e->type->ops.mq.completed_request(rq);
+ e->type->ops.mq.completed_request(rq, now);
}
static inline void blk_mq_sched_started_request(struct request *rq)
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 94e1ed667b6e..cfda95b85d34 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -232,13 +232,26 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
/*
* We can hit rq == NULL here, because the tagging functions
- * test and set the bit before assining ->rqs[].
+ * test and set the bit before assigning ->rqs[].
*/
if (rq && rq->q == hctx->queue)
iter_data->fn(hctx, rq, iter_data->data, reserved);
return true;
}
+/**
+ * bt_for_each - iterate over the requests associated with a hardware queue
+ * @hctx: Hardware queue to examine.
+ * @bt: sbitmap to examine. This is either the breserved_tags member
+ * or the bitmap_tags member of struct blk_mq_tags.
+ * @fn: Pointer to the function that will be called for each request
+ * associated with @hctx that has been assigned a driver tag.
+ * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved)
+ * where rq is a pointer to a request.
+ * @data: Will be passed as third argument to @fn.
+ * @reserved: Indicates whether @bt is the breserved_tags member or the
+ * bitmap_tags member of struct blk_mq_tags.
+ */
static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
busy_iter_fn *fn, void *data, bool reserved)
{
@@ -280,6 +293,18 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
return true;
}
+/**
+ * bt_tags_for_each - iterate over the requests in a tag map
+ * @tags: Tag map to iterate over.
+ * @bt: sbitmap to examine. This is either the breserved_tags member
+ * or the bitmap_tags member of struct blk_mq_tags.
+ * @fn: Pointer to the function that will be called for each started
+ * request. @fn will be called as follows: @fn(rq, @data,
+ * @reserved) where rq is a pointer to a request.
+ * @data: Will be passed as second argument to @fn.
+ * @reserved: Indicates whether @bt is the breserved_tags member or the
+ * bitmap_tags member of struct blk_mq_tags.
+ */
static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
busy_tag_iter_fn *fn, void *data, bool reserved)
{
@@ -294,6 +319,15 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
}
+/**
+ * blk_mq_all_tag_busy_iter - iterate over all started requests in a tag map
+ * @tags: Tag map to iterate over.
+ * @fn: Pointer to the function that will be called for each started
+ * request. @fn will be called as follows: @fn(rq, @priv,
+ * reserved) where rq is a pointer to a request. 'reserved'
+ * indicates whether or not @rq is a reserved request.
+ * @priv: Will be passed as second argument to @fn.
+ */
static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
busy_tag_iter_fn *fn, void *priv)
{
@@ -302,6 +336,15 @@ static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
}
+/**
+ * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set
+ * @tagset: Tag set to iterate over.
+ * @fn: Pointer to the function that will be called for each started
+ * request. @fn will be called as follows: @fn(rq, @priv,
+ * reserved) where rq is a pointer to a request. 'reserved'
+ * indicates whether or not @rq is a reserved request.
+ * @priv: Will be passed as second argument to @fn.
+ */
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv)
{
@@ -314,6 +357,20 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
+/**
+ * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag
+ * @q: Request queue to examine.
+ * @fn: Pointer to the function that will be called for each request
+ * on @q. @fn will be called as follows: @fn(hctx, rq, @priv,
+ * reserved) where rq is a pointer to a request and hctx points
+ * to the hardware queue associated with the request. 'reserved'
+ * indicates whether or not @rq is a reserved request.
+ * @priv: Will be passed as third argument to @fn.
+ *
+ * Note: if @q->tag_set is shared with other request queues then @fn will be
+ * called for all requests on all queues that share that tag set and not only
+ * for requests associated with @q.
+ */
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
{
@@ -321,23 +378,20 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
int i;
/*
- * __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
- * queue_hw_ctx after freeze the queue. So we could use q_usage_counter
- * to avoid race with it. __blk_mq_update_nr_hw_queues will users
- * synchronize_rcu to ensure all of the users go out of the critical
- * section below and see zeroed q_usage_counter.
+ * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
+ * while the queue is frozen. So we can use q_usage_counter to avoid
+ * racing with it. __blk_mq_update_nr_hw_queues() uses
+ * synchronize_rcu() to ensure this function left the critical section
+ * below.
*/
- rcu_read_lock();
- if (percpu_ref_is_zero(&q->q_usage_counter)) {
- rcu_read_unlock();
+ if (!percpu_ref_tryget(&q->q_usage_counter))
return;
- }
queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_tags *tags = hctx->tags;
/*
- * If not software queues are currently mapped to this
+ * If no software queues are currently mapped to this
* hardware queue, there's nothing to check
*/
if (!blk_mq_hw_queue_mapped(hctx))
@@ -347,7 +401,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
}
- rcu_read_unlock();
+ blk_queue_exit(q);
}
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 85a1c1a59c72..dcf10e39995a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -33,6 +33,7 @@
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
#include "blk-mq-tag.h"
+#include "blk-pm.h"
#include "blk-stat.h"
#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
@@ -198,7 +199,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
- percpu_ref_reinit(&q->q_usage_counter);
+ percpu_ref_resurrect(&q->q_usage_counter);
wake_up_all(&q->mq_freeze_wq);
}
}
@@ -475,6 +476,7 @@ static void __blk_mq_free_request(struct request *rq)
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
const int sched_tag = rq->internal_tag;
+ blk_pm_mark_last_busy(rq);
if (rq->tag != -1)
blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
@@ -526,6 +528,9 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
blk_stat_add(rq, now);
}
+ if (rq->internal_tag != -1)
+ blk_mq_sched_completed_request(rq, now);
+
blk_account_io_done(rq, now);
if (rq->end_io) {
@@ -562,8 +567,20 @@ static void __blk_mq_complete_request(struct request *rq)
if (!blk_mq_mark_complete(rq))
return;
- if (rq->internal_tag != -1)
- blk_mq_sched_completed_request(rq);
+
+ /*
+ * Most of single queue controllers, there is only one irq vector
+ * for handling IO completion, and the only irq's affinity is set
+ * as all possible CPUs. On most of ARCHs, this affinity means the
+ * irq is handled on one specific CPU.
+ *
+ * So complete IO reqeust in softirq context in case of single queue
+ * for not degrading IO performance by irqsoff latency.
+ */
+ if (rq->q->nr_hw_queues == 1) {
+ __blk_complete_request(rq);
+ return;
+ }
if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
rq->q->softirq_done_fn(rq);
@@ -1628,7 +1645,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
BUG_ON(!rq->q);
if (rq->mq_ctx != this_ctx) {
if (this_ctx) {
- trace_block_unplug(this_q, depth, from_schedule);
+ trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx,
&ctx_list,
from_schedule);
@@ -1648,7 +1665,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
* on 'ctx_list'. Do those.
*/
if (this_ctx) {
- trace_block_unplug(this_q, depth, from_schedule);
+ trace_block_unplug(this_q, depth, !from_schedule);
blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
from_schedule);
}
@@ -2137,8 +2154,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
{
- blk_mq_debugfs_unregister_hctx(hctx);
-
if (blk_mq_hw_queue_mapped(hctx))
blk_mq_tag_idle(hctx);
@@ -2165,6 +2180,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
queue_for_each_hw_ctx(q, hctx, i) {
if (i == nr_queue)
break;
+ blk_mq_debugfs_unregister_hctx(hctx);
blk_mq_exit_hctx(q, set, hctx, i);
}
}
@@ -2194,12 +2210,12 @@ static int blk_mq_init_hctx(struct request_queue *q,
* runtime
*/
hctx->ctxs = kmalloc_array_node(nr_cpu_ids, sizeof(void *),
- GFP_KERNEL, node);
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node);
if (!hctx->ctxs)
goto unregister_cpu_notifier;
- if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8), GFP_KERNEL,
- node))
+ if (sbitmap_init_node(&hctx->ctx_map, nr_cpu_ids, ilog2(8),
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY, node))
goto free_ctxs;
hctx->nr_ctx = 0;
@@ -2212,7 +2228,8 @@ static int blk_mq_init_hctx(struct request_queue *q,
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
goto free_bitmap;
- hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size);
+ hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
if (!hctx->fq)
goto exit_hctx;
@@ -2222,8 +2239,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
if (hctx->flags & BLK_MQ_F_BLOCKING)
init_srcu_struct(hctx->srcu);
- blk_mq_debugfs_register_hctx(q, hctx);
-
return 0;
free_fq:
@@ -2492,6 +2507,39 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
}
EXPORT_SYMBOL(blk_mq_init_queue);
+/*
+ * Helper for setting up a queue with mq ops, given queue depth, and
+ * the passed in mq ops flags.
+ */
+struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops,
+ unsigned int queue_depth,
+ unsigned int set_flags)
+{
+ struct request_queue *q;
+ int ret;
+
+ memset(set, 0, sizeof(*set));
+ set->ops = ops;
+ set->nr_hw_queues = 1;
+ set->queue_depth = queue_depth;
+ set->numa_node = NUMA_NO_NODE;
+ set->flags = set_flags;
+
+ ret = blk_mq_alloc_tag_set(set);
+ if (ret)
+ return ERR_PTR(ret);
+
+ q = blk_mq_init_queue(set);
+ if (IS_ERR(q)) {
+ blk_mq_free_tag_set(set);
+ return q;
+ }
+
+ return q;
+}
+EXPORT_SYMBOL(blk_mq_init_sq_queue);
+
static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
{
int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
@@ -2506,48 +2554,90 @@ static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
return hw_ctx_size;
}
+static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
+ struct blk_mq_tag_set *set, struct request_queue *q,
+ int hctx_idx, int node)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = kzalloc_node(blk_mq_hw_ctx_size(set),
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+ node);
+ if (!hctx)
+ return NULL;
+
+ if (!zalloc_cpumask_var_node(&hctx->cpumask,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
+ node)) {
+ kfree(hctx);
+ return NULL;
+ }
+
+ atomic_set(&hctx->nr_active, 0);
+ hctx->numa_node = node;
+ hctx->queue_num = hctx_idx;
+
+ if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) {
+ free_cpumask_var(hctx->cpumask);
+ kfree(hctx);
+ return NULL;
+ }
+ blk_mq_hctx_kobj_init(hctx);
+
+ return hctx;
+}
+
static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
struct request_queue *q)
{
- int i, j;
+ int i, j, end;
struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
- blk_mq_sysfs_unregister(q);
-
/* protect against switching io scheduler */
mutex_lock(&q->sysfs_lock);
for (i = 0; i < set->nr_hw_queues; i++) {
int node;
-
- if (hctxs[i])
- continue;
+ struct blk_mq_hw_ctx *hctx;
node = blk_mq_hw_queue_to_node(q->mq_map, i);
- hctxs[i] = kzalloc_node(blk_mq_hw_ctx_size(set),
- GFP_KERNEL, node);
- if (!hctxs[i])
- break;
-
- if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
- node)) {
- kfree(hctxs[i]);
- hctxs[i] = NULL;
- break;
- }
-
- atomic_set(&hctxs[i]->nr_active, 0);
- hctxs[i]->numa_node = node;
- hctxs[i]->queue_num = i;
+ /*
+ * If the hw queue has been mapped to another numa node,
+ * we need to realloc the hctx. If allocation fails, fallback
+ * to use the previous one.
+ */
+ if (hctxs[i] && (hctxs[i]->numa_node == node))
+ continue;
- if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
- free_cpumask_var(hctxs[i]->cpumask);
- kfree(hctxs[i]);
- hctxs[i] = NULL;
- break;
+ hctx = blk_mq_alloc_and_init_hctx(set, q, i, node);
+ if (hctx) {
+ if (hctxs[i]) {
+ blk_mq_exit_hctx(q, set, hctxs[i], i);
+ kobject_put(&hctxs[i]->kobj);
+ }
+ hctxs[i] = hctx;
+ } else {
+ if (hctxs[i])
+ pr_warn("Allocate new hctx on node %d fails,\
+ fallback to previous one on node %d\n",
+ node, hctxs[i]->numa_node);
+ else
+ break;
}
- blk_mq_hctx_kobj_init(hctxs[i]);
}
- for (j = i; j < q->nr_hw_queues; j++) {
+ /*
+ * Increasing nr_hw_queues fails. Free the newly allocated
+ * hctxs and keep the previous q->nr_hw_queues.
+ */
+ if (i != set->nr_hw_queues) {
+ j = q->nr_hw_queues;
+ end = i;
+ } else {
+ j = i;
+ end = q->nr_hw_queues;
+ q->nr_hw_queues = set->nr_hw_queues;
+ }
+
+ for (; j < end; j++) {
struct blk_mq_hw_ctx *hctx = hctxs[j];
if (hctx) {
@@ -2559,9 +2649,7 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
}
}
- q->nr_hw_queues = i;
mutex_unlock(&q->sysfs_lock);
- blk_mq_sysfs_register(q);
}
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
@@ -2659,25 +2747,6 @@ void blk_mq_free_queue(struct request_queue *q)
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
}
-/* Basically redo blk_mq_init_queue with queue frozen */
-static void blk_mq_queue_reinit(struct request_queue *q)
-{
- WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
-
- blk_mq_debugfs_unregister_hctxs(q);
- blk_mq_sysfs_unregister(q);
-
- /*
- * redo blk_mq_init_cpu_queues and blk_mq_init_hw_queues. FIXME: maybe
- * we should change hctx numa_node according to the new topology (this
- * involves freeing and re-allocating memory, worth doing?)
- */
- blk_mq_map_swqueue(q);
-
- blk_mq_sysfs_register(q);
- blk_mq_debugfs_register_hctxs(q);
-}
-
static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
{
int i;
@@ -2964,6 +3033,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
{
struct request_queue *q;
LIST_HEAD(head);
+ int prev_nr_hw_queues;
lockdep_assert_held(&set->tag_list_lock);
@@ -2987,11 +3057,30 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
if (!blk_mq_elv_switch_none(&head, q))
goto switch_back;
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_debugfs_unregister_hctxs(q);
+ blk_mq_sysfs_unregister(q);
+ }
+
+ prev_nr_hw_queues = set->nr_hw_queues;
set->nr_hw_queues = nr_hw_queues;
blk_mq_update_queue_map(set);
+fallback:
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);
- blk_mq_queue_reinit(q);
+ if (q->nr_hw_queues != set->nr_hw_queues) {
+ pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
+ nr_hw_queues, prev_nr_hw_queues);
+ set->nr_hw_queues = prev_nr_hw_queues;
+ blk_mq_map_queues(set);
+ goto fallback;
+ }
+ blk_mq_map_swqueue(q);
+ }
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_sysfs_register(q);
+ blk_mq_debugfs_register_hctxs(q);
}
switch_back:
diff --git a/block/blk-pm.c b/block/blk-pm.c
new file mode 100644
index 000000000000..f8fdae01bea2
--- /dev/null
+++ b/block/blk-pm.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/blk-mq.h>
+#include <linux/blk-pm.h>
+#include <linux/blkdev.h>
+#include <linux/pm_runtime.h>
+#include "blk-mq.h"
+#include "blk-mq-tag.h"
+
+/**
+ * blk_pm_runtime_init - Block layer runtime PM initialization routine
+ * @q: the queue of the device
+ * @dev: the device the queue belongs to
+ *
+ * Description:
+ * Initialize runtime-PM-related fields for @q and start auto suspend for
+ * @dev. Drivers that want to take advantage of request-based runtime PM
+ * should call this function after @dev has been initialized, and its
+ * request queue @q has been allocated, and runtime PM for it can not happen
+ * yet(either due to disabled/forbidden or its usage_count > 0). In most
+ * cases, driver should call this function before any I/O has taken place.
+ *
+ * This function takes care of setting up using auto suspend for the device,
+ * the autosuspend delay is set to -1 to make runtime suspend impossible
+ * until an updated value is either set by user or by driver. Drivers do
+ * not need to touch other autosuspend settings.
+ *
+ * The block layer runtime PM is request based, so only works for drivers
+ * that use request as their IO unit instead of those directly use bio's.
+ */
+void blk_pm_runtime_init(struct request_queue *q, struct device *dev)
+{
+ q->dev = dev;
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_set_autosuspend_delay(q->dev, -1);
+ pm_runtime_use_autosuspend(q->dev);
+}
+EXPORT_SYMBOL(blk_pm_runtime_init);
+
+/**
+ * blk_pre_runtime_suspend - Pre runtime suspend check
+ * @q: the queue of the device
+ *
+ * Description:
+ * This function will check if runtime suspend is allowed for the device
+ * by examining if there are any requests pending in the queue. If there
+ * are requests pending, the device can not be runtime suspended; otherwise,
+ * the queue's status will be updated to SUSPENDING and the driver can
+ * proceed to suspend the device.
+ *
+ * For the not allowed case, we mark last busy for the device so that
+ * runtime PM core will try to autosuspend it some time later.
+ *
+ * This function should be called near the start of the device's
+ * runtime_suspend callback.
+ *
+ * Return:
+ * 0 - OK to runtime suspend the device
+ * -EBUSY - Device should not be runtime suspended
+ */
+int blk_pre_runtime_suspend(struct request_queue *q)
+{
+ int ret = 0;
+
+ if (!q->dev)
+ return ret;
+
+ WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
+
+ /*
+ * Increase the pm_only counter before checking whether any
+ * non-PM blk_queue_enter() calls are in progress to avoid that any
+ * new non-PM blk_queue_enter() calls succeed before the pm_only
+ * counter is decreased again.
+ */
+ blk_set_pm_only(q);
+ ret = -EBUSY;
+ /* Switch q_usage_counter from per-cpu to atomic mode. */
+ blk_freeze_queue_start(q);
+ /*
+ * Wait until atomic mode has been reached. Since that
+ * involves calling call_rcu(), it is guaranteed that later
+ * blk_queue_enter() calls see the pm-only state. See also
+ * http://lwn.net/Articles/573497/.
+ */
+ percpu_ref_switch_to_atomic_sync(&q->q_usage_counter);
+ if (percpu_ref_is_zero(&q->q_usage_counter))
+ ret = 0;
+ /* Switch q_usage_counter back to per-cpu mode. */
+ blk_mq_unfreeze_queue(q);
+
+ spin_lock_irq(q->queue_lock);
+ if (ret < 0)
+ pm_runtime_mark_last_busy(q->dev);
+ else
+ q->rpm_status = RPM_SUSPENDING;
+ spin_unlock_irq(q->queue_lock);
+
+ if (ret)
+ blk_clear_pm_only(q);
+
+ return ret;
+}
+EXPORT_SYMBOL(blk_pre_runtime_suspend);
+
+/**
+ * blk_post_runtime_suspend - Post runtime suspend processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_suspend function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime suspend function and mark last busy for the device so
+ * that PM core will try to auto suspend the device at a later time.
+ *
+ * This function should be called near the end of the device's
+ * runtime_suspend callback.
+ */
+void blk_post_runtime_suspend(struct request_queue *q, int err)
+{
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_SUSPENDED;
+ } else {
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ }
+ spin_unlock_irq(q->queue_lock);
+
+ if (err)
+ blk_clear_pm_only(q);
+}
+EXPORT_SYMBOL(blk_post_runtime_suspend);
+
+/**
+ * blk_pre_runtime_resume - Pre runtime resume processing
+ * @q: the queue of the device
+ *
+ * Description:
+ * Update the queue's runtime status to RESUMING in preparation for the
+ * runtime resume of the device.
+ *
+ * This function should be called near the start of the device's
+ * runtime_resume callback.
+ */
+void blk_pre_runtime_resume(struct request_queue *q)
+{
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(q->queue_lock);
+ q->rpm_status = RPM_RESUMING;
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_pre_runtime_resume);
+
+/**
+ * blk_post_runtime_resume - Post runtime resume processing
+ * @q: the queue of the device
+ * @err: return value of the device's runtime_resume function
+ *
+ * Description:
+ * Update the queue's runtime status according to the return value of the
+ * device's runtime_resume function. If it is successfully resumed, process
+ * the requests that are queued into the device's queue when it is resuming
+ * and then mark last busy and initiate autosuspend for it.
+ *
+ * This function should be called near the end of the device's
+ * runtime_resume callback.
+ */
+void blk_post_runtime_resume(struct request_queue *q, int err)
+{
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(q->queue_lock);
+ if (!err) {
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ pm_request_autosuspend(q->dev);
+ } else {
+ q->rpm_status = RPM_SUSPENDED;
+ }
+ spin_unlock_irq(q->queue_lock);
+
+ if (!err)
+ blk_clear_pm_only(q);
+}
+EXPORT_SYMBOL(blk_post_runtime_resume);
+
+/**
+ * blk_set_runtime_active - Force runtime status of the queue to be active
+ * @q: the queue of the device
+ *
+ * If the device is left runtime suspended during system suspend the resume
+ * hook typically resumes the device and corrects runtime status
+ * accordingly. However, that does not affect the queue runtime PM status
+ * which is still "suspended". This prevents processing requests from the
+ * queue.
+ *
+ * This function can be used in driver's resume hook to correct queue
+ * runtime PM status and re-enable peeking requests from the queue. It
+ * should be called before first request is added to the queue.
+ */
+void blk_set_runtime_active(struct request_queue *q)
+{
+ spin_lock_irq(q->queue_lock);
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ pm_request_autosuspend(q->dev);
+ spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(blk_set_runtime_active);
diff --git a/block/blk-pm.h b/block/blk-pm.h
new file mode 100644
index 000000000000..a8564ea72a41
--- /dev/null
+++ b/block/blk-pm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BLOCK_BLK_PM_H_
+#define _BLOCK_BLK_PM_H_
+
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_PM
+static inline void blk_pm_request_resume(struct request_queue *q)
+{
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+ q->rpm_status == RPM_SUSPENDING))
+ pm_request_resume(q->dev);
+}
+
+static inline void blk_pm_mark_last_busy(struct request *rq)
+{
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
+ pm_runtime_mark_last_busy(rq->q->dev);
+}
+
+static inline void blk_pm_requeue_request(struct request *rq)
+{
+ lockdep_assert_held(rq->q->queue_lock);
+
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
+ rq->q->nr_pending--;
+}
+
+static inline void blk_pm_add_request(struct request_queue *q,
+ struct request *rq)
+{
+ lockdep_assert_held(q->queue_lock);
+
+ if (q->dev && !(rq->rq_flags & RQF_PM))
+ q->nr_pending++;
+}
+
+static inline void blk_pm_put_request(struct request *rq)
+{
+ lockdep_assert_held(rq->q->queue_lock);
+
+ if (rq->q->dev && !(rq->rq_flags & RQF_PM))
+ --rq->q->nr_pending;
+}
+#else
+static inline void blk_pm_request_resume(struct request_queue *q)
+{
+}
+
+static inline void blk_pm_mark_last_busy(struct request *rq)
+{
+}
+
+static inline void blk_pm_requeue_request(struct request *rq)
+{
+}
+
+static inline void blk_pm_add_request(struct request_queue *q,
+ struct request *rq)
+{
+}
+
+static inline void blk_pm_put_request(struct request *rq)
+{
+}
+#endif
+
+#endif /* _BLOCK_BLK_PM_H_ */
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 15c1f5e12eb8..e47a2f751884 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -97,8 +97,8 @@ static int blk_softirq_cpu_dead(unsigned int cpu)
void __blk_complete_request(struct request *req)
{
- int ccpu, cpu;
struct request_queue *q = req->q;
+ int cpu, ccpu = q->mq_ops ? req->mq_ctx->cpu : req->cpu;
unsigned long flags;
bool shared = false;
@@ -110,8 +110,7 @@ void __blk_complete_request(struct request *req)
/*
* Select completion CPU
*/
- if (req->cpu != -1) {
- ccpu = req->cpu;
+ if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) {
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
shared = cpus_share_cache(cpu, ccpu);
} else
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 7587b1c3caaf..90561af85a62 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -190,6 +190,7 @@ void blk_stat_enable_accounting(struct request_queue *q)
blk_queue_flag_set(QUEUE_FLAG_STATS, q);
spin_unlock(&q->stats->lock);
}
+EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
struct blk_queue_stats *blk_alloc_queue_stats(void)
{
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 01d0620a4e4a..4bda70e8db48 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -84,8 +84,7 @@ struct throtl_service_queue {
* RB tree of active children throtl_grp's, which are sorted by
* their ->disptime.
*/
- struct rb_root pending_tree; /* RB tree of active tgs */
- struct rb_node *first_pending; /* first node in the tree */
+ struct rb_root_cached pending_tree; /* RB tree of active tgs */
unsigned int nr_pending; /* # queued in the tree */
unsigned long first_pending_disptime; /* disptime of the first tg */
struct timer_list pending_timer; /* fires on first_pending_disptime */
@@ -475,7 +474,7 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq)
{
INIT_LIST_HEAD(&sq->queued[0]);
INIT_LIST_HEAD(&sq->queued[1]);
- sq->pending_tree = RB_ROOT;
+ sq->pending_tree = RB_ROOT_CACHED;
timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
}
@@ -616,31 +615,23 @@ static void throtl_pd_free(struct blkg_policy_data *pd)
static struct throtl_grp *
throtl_rb_first(struct throtl_service_queue *parent_sq)
{
+ struct rb_node *n;
/* Service tree is empty */
if (!parent_sq->nr_pending)
return NULL;
- if (!parent_sq->first_pending)
- parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
-
- if (parent_sq->first_pending)
- return rb_entry_tg(parent_sq->first_pending);
-
- return NULL;
-}
-
-static void rb_erase_init(struct rb_node *n, struct rb_root *root)
-{
- rb_erase(n, root);
- RB_CLEAR_NODE(n);
+ n = rb_first_cached(&parent_sq->pending_tree);
+ WARN_ON_ONCE(!n);
+ if (!n)
+ return NULL;
+ return rb_entry_tg(n);
}
static void throtl_rb_erase(struct rb_node *n,
struct throtl_service_queue *parent_sq)
{
- if (parent_sq->first_pending == n)
- parent_sq->first_pending = NULL;
- rb_erase_init(n, &parent_sq->pending_tree);
+ rb_erase_cached(n, &parent_sq->pending_tree);
+ RB_CLEAR_NODE(n);
--parent_sq->nr_pending;
}
@@ -658,11 +649,11 @@ static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
static void tg_service_queue_add(struct throtl_grp *tg)
{
struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
- struct rb_node **node = &parent_sq->pending_tree.rb_node;
+ struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
struct rb_node *parent = NULL;
struct throtl_grp *__tg;
unsigned long key = tg->disptime;
- int left = 1;
+ bool leftmost = true;
while (*node != NULL) {
parent = *node;
@@ -672,15 +663,13 @@ static void tg_service_queue_add(struct throtl_grp *tg)
node = &parent->rb_left;
else {
node = &parent->rb_right;
- left = 0;
+ leftmost = false;
}
}
- if (left)
- parent_sq->first_pending = &tg->rb_node;
-
rb_link_node(&tg->rb_node, parent, node);
- rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
+ rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
+ leftmost);
}
static void __throtl_enqueue_tg(struct throtl_grp *tg)
@@ -2126,21 +2115,11 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
}
#endif
-static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
-{
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- /* fallback to root_blkg if we fail to get a blkg ref */
- if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
- bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
- bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-#endif
-}
-
bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio)
{
struct throtl_qnode *qn = NULL;
- struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
+ struct throtl_grp *tg = blkg_to_tg(blkg);
struct throtl_service_queue *sq;
bool rw = bio_data_dir(bio);
bool throttled = false;
@@ -2159,7 +2138,6 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
if (unlikely(blk_queue_bypass(q)))
goto out_unlock;
- blk_throtl_assoc_bio(tg, bio);
blk_throtl_update_idletime(tg);
sq = &tg->service_queue;
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 8e20a0677dcf..8ac93fcbaa2e 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -310,6 +310,7 @@ static void scale_up(struct rq_wb *rwb)
rq_depth_scale_up(&rwb->rq_depth);
calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
+ rwb_wake_all(rwb);
rwb_trace_step(rwb, "scale up");
}
@@ -318,7 +319,6 @@ static void scale_down(struct rq_wb *rwb, bool hard_throttle)
rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
calc_wb_limits(rwb);
rwb->unknown_cnt = 0;
- rwb_wake_all(rwb);
rwb_trace_step(rwb, "scale down");
}
diff --git a/block/blk.h b/block/blk.h
index 9db4e389582c..3d2aecba96a4 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -4,6 +4,7 @@
#include <linux/idr.h>
#include <linux/blk-mq.h>
+#include <xen/xen.h>
#include "blk-mq.h"
/* Amount of time in which a process may batch requests */
@@ -124,7 +125,7 @@ static inline void __blk_get_queue(struct request_queue *q)
}
struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
- int node, int cmd_size);
+ int node, int cmd_size, gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
int blk_init_rl(struct request_list *rl, struct request_queue *q,
@@ -149,6 +150,41 @@ static inline void blk_queue_enter_live(struct request_queue *q)
percpu_ref_get(&q->q_usage_counter);
}
+static inline bool biovec_phys_mergeable(struct request_queue *q,
+ struct bio_vec *vec1, struct bio_vec *vec2)
+{
+ unsigned long mask = queue_segment_boundary(q);
+ phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
+ phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
+
+ if (addr1 + vec1->bv_len != addr2)
+ return false;
+ if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2))
+ return false;
+ if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask))
+ return false;
+ return true;
+}
+
+static inline bool __bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ return offset ||
+ ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+}
+
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct request_queue *q,
+ struct bio_vec *bprv, unsigned int offset)
+{
+ if (!queue_virt_boundary(q))
+ return false;
+ return __bvec_gap_to_prev(q, bprv, offset);
+}
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
void blk_flush_integrity(void);
bool __bio_integrity_endio(struct bio *);
@@ -158,7 +194,38 @@ static inline bool bio_integrity_endio(struct bio *bio)
return __bio_integrity_endio(bio);
return true;
}
-#else
+
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ struct bio_integrity_payload *bip = bio_integrity(req->bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(next);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+#else /* CONFIG_BLK_DEV_INTEGRITY */
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ return false;
+}
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ return false;
+}
+
static inline void blk_flush_integrity(void)
{
}
@@ -166,7 +233,7 @@ static inline bool bio_integrity_endio(struct bio *bio)
{
return true;
}
-#endif
+#endif /* CONFIG_BLK_DEV_INTEGRITY */
void blk_timeout_work(struct work_struct *work);
unsigned long blk_rq_timeout(unsigned long timeout);
diff --git a/block/bounce.c b/block/bounce.c
index bc63b3a2d18c..ec0d99995f5f 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -31,6 +31,24 @@
static struct bio_set bounce_bio_set, bounce_bio_split;
static mempool_t page_pool, isa_page_pool;
+static void init_bounce_bioset(void)
+{
+ static bool bounce_bs_setup;
+ int ret;
+
+ if (bounce_bs_setup)
+ return;
+
+ ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
+ BUG_ON(ret);
+ if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
+ BUG_ON(1);
+
+ ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
+ BUG_ON(ret);
+ bounce_bs_setup = true;
+}
+
#if defined(CONFIG_HIGHMEM)
static __init int init_emergency_pool(void)
{
@@ -44,14 +62,7 @@ static __init int init_emergency_pool(void)
BUG_ON(ret);
pr_info("pool size: %d pages\n", POOL_SIZE);
- ret = bioset_init(&bounce_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
- BUG_ON(ret);
- if (bioset_integrity_create(&bounce_bio_set, BIO_POOL_SIZE))
- BUG_ON(1);
-
- ret = bioset_init(&bounce_bio_split, BIO_POOL_SIZE, 0, 0);
- BUG_ON(ret);
-
+ init_bounce_bioset();
return 0;
}
@@ -86,6 +97,8 @@ static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
}
+static DEFINE_MUTEX(isa_mutex);
+
/*
* gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
* as the max address, so check if the pool has already been created.
@@ -94,14 +107,20 @@ int init_emergency_isa_pool(void)
{
int ret;
- if (mempool_initialized(&isa_page_pool))
+ mutex_lock(&isa_mutex);
+
+ if (mempool_initialized(&isa_page_pool)) {
+ mutex_unlock(&isa_mutex);
return 0;
+ }
ret = mempool_init(&isa_page_pool, ISA_POOL_SIZE, mempool_alloc_pages_isa,
mempool_free_pages, (void *) 0);
BUG_ON(ret);
pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
+ init_bounce_bioset();
+ mutex_unlock(&isa_mutex);
return 0;
}
@@ -257,7 +276,9 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
}
}
- bio_clone_blkcg_association(bio, bio_src);
+ bio_clone_blkg_association(bio, bio_src);
+
+ blkcg_bio_issue_init(bio);
return bio;
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 2eb87444b157..6a3d87dd3c1a 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1644,14 +1644,20 @@ static void cfq_pd_offline(struct blkg_policy_data *pd)
int i;
for (i = 0; i < IOPRIO_BE_NR; i++) {
- if (cfqg->async_cfqq[0][i])
+ if (cfqg->async_cfqq[0][i]) {
cfq_put_queue(cfqg->async_cfqq[0][i]);
- if (cfqg->async_cfqq[1][i])
+ cfqg->async_cfqq[0][i] = NULL;
+ }
+ if (cfqg->async_cfqq[1][i]) {
cfq_put_queue(cfqg->async_cfqq[1][i]);
+ cfqg->async_cfqq[1][i] = NULL;
+ }
}
- if (cfqg->async_idle_cfqq)
+ if (cfqg->async_idle_cfqq) {
cfq_put_queue(cfqg->async_idle_cfqq);
+ cfqg->async_idle_cfqq = NULL;
+ }
/*
* @blkg is going offline and will be ignored by
@@ -3753,7 +3759,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
uint64_t serial_nr;
rcu_read_lock();
- serial_nr = bio_blkcg(bio)->css.serial_nr;
+ serial_nr = __bio_blkcg(bio)->css.serial_nr;
rcu_read_unlock();
/*
@@ -3818,7 +3824,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
struct cfq_group *cfqg;
rcu_read_lock();
- cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
+ cfqg = cfq_lookup_cfqg(cfqd, __bio_blkcg(bio));
if (!cfqg) {
cfqq = &cfqd->oom_cfqq;
goto out;
diff --git a/block/elevator.c b/block/elevator.c
index 6a06b5d040e5..8fdcd64ae12e 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -41,6 +41,7 @@
#include "blk.h"
#include "blk-mq-sched.h"
+#include "blk-pm.h"
#include "blk-wbt.h"
static DEFINE_SPINLOCK(elv_list_lock);
@@ -557,27 +558,6 @@ void elv_bio_merged(struct request_queue *q, struct request *rq,
e->type->ops.sq.elevator_bio_merged_fn(q, rq, bio);
}
-#ifdef CONFIG_PM
-static void blk_pm_requeue_request(struct request *rq)
-{
- if (rq->q->dev && !(rq->rq_flags & RQF_PM))
- rq->q->nr_pending--;
-}
-
-static void blk_pm_add_request(struct request_queue *q, struct request *rq)
-{
- if (q->dev && !(rq->rq_flags & RQF_PM) && q->nr_pending++ == 0 &&
- (q->rpm_status == RPM_SUSPENDED || q->rpm_status == RPM_SUSPENDING))
- pm_request_resume(q->dev);
-}
-#else
-static inline void blk_pm_requeue_request(struct request *rq) {}
-static inline void blk_pm_add_request(struct request_queue *q,
- struct request *rq)
-{
-}
-#endif
-
void elv_requeue_request(struct request_queue *q, struct request *rq)
{
/*
@@ -609,7 +589,7 @@ void elv_drain_elevator(struct request_queue *q)
while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
;
- if (q->nr_sorted && printed++ < 10) {
+ if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
printk(KERN_ERR "%s: forced dispatching is broken "
"(nr_sorted=%u), please report this\n",
q->elevator->type->elevator_name, q->nr_sorted);
diff --git a/block/genhd.c b/block/genhd.c
index 8cc719a37b32..cff6bdf27226 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -567,7 +567,8 @@ static int exact_lock(dev_t devt, void *data)
return 0;
}
-static void register_disk(struct device *parent, struct gendisk *disk)
+static void register_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups)
{
struct device *ddev = disk_to_dev(disk);
struct block_device *bdev;
@@ -582,6 +583,10 @@ static void register_disk(struct device *parent, struct gendisk *disk)
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
+ if (groups) {
+ WARN_ON(ddev->groups);
+ ddev->groups = groups;
+ }
if (device_add(ddev))
return;
if (!sysfs_deprecated) {
@@ -647,6 +652,7 @@ exit:
* __device_add_disk - add disk information to kernel list
* @parent: parent device for the disk
* @disk: per-device partitioning information
+ * @groups: Additional per-device sysfs groups
* @register_queue: register the queue if set to true
*
* This function registers the partitioning information in @disk
@@ -655,6 +661,7 @@ exit:
* FIXME: error handling
*/
static void __device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups,
bool register_queue)
{
dev_t devt;
@@ -698,7 +705,7 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
}
- register_disk(parent, disk);
+ register_disk(parent, disk, groups);
if (register_queue)
blk_register_queue(disk);
@@ -712,15 +719,17 @@ static void __device_add_disk(struct device *parent, struct gendisk *disk,
blk_integrity_add(disk);
}
-void device_add_disk(struct device *parent, struct gendisk *disk)
+void device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups)
+
{
- __device_add_disk(parent, disk, true);
+ __device_add_disk(parent, disk, groups, true);
}
EXPORT_SYMBOL(device_add_disk);
void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk)
{
- __device_add_disk(parent, disk, false);
+ __device_add_disk(parent, disk, NULL, false);
}
EXPORT_SYMBOL(device_add_disk_no_queue_reg);
@@ -1343,18 +1352,18 @@ static int diskstats_show(struct seq_file *seqf, void *v)
part_stat_read(hd, ios[STAT_READ]),
part_stat_read(hd, merges[STAT_READ]),
part_stat_read(hd, sectors[STAT_READ]),
- jiffies_to_msecs(part_stat_read(hd, ticks[STAT_READ])),
+ (unsigned int)part_stat_read_msecs(hd, STAT_READ),
part_stat_read(hd, ios[STAT_WRITE]),
part_stat_read(hd, merges[STAT_WRITE]),
part_stat_read(hd, sectors[STAT_WRITE]),
- jiffies_to_msecs(part_stat_read(hd, ticks[STAT_WRITE])),
+ (unsigned int)part_stat_read_msecs(hd, STAT_WRITE),
inflight[0],
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
jiffies_to_msecs(part_stat_read(hd, time_in_queue)),
part_stat_read(hd, ios[STAT_DISCARD]),
part_stat_read(hd, merges[STAT_DISCARD]),
part_stat_read(hd, sectors[STAT_DISCARD]),
- jiffies_to_msecs(part_stat_read(hd, ticks[STAT_DISCARD]))
+ (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD)
);
}
disk_part_iter_exit(&piter);
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index a1660bafc912..eccac01a10b6 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -29,19 +29,30 @@
#include "blk-mq-debugfs.h"
#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
-#include "blk-stat.h"
-/* Scheduling domains. */
+#define CREATE_TRACE_POINTS
+#include <trace/events/kyber.h>
+
+/*
+ * Scheduling domains: the device is divided into multiple domains based on the
+ * request type.
+ */
enum {
KYBER_READ,
- KYBER_SYNC_WRITE,
- KYBER_OTHER, /* Async writes, discard, etc. */
+ KYBER_WRITE,
+ KYBER_DISCARD,
+ KYBER_OTHER,
KYBER_NUM_DOMAINS,
};
-enum {
- KYBER_MIN_DEPTH = 256,
+static const char *kyber_domain_names[] = {
+ [KYBER_READ] = "READ",
+ [KYBER_WRITE] = "WRITE",
+ [KYBER_DISCARD] = "DISCARD",
+ [KYBER_OTHER] = "OTHER",
+};
+enum {
/*
* In order to prevent starvation of synchronous requests by a flood of
* asynchronous requests, we reserve 25% of requests for synchronous
@@ -51,25 +62,87 @@ enum {
};
/*
- * Initial device-wide depths for each scheduling domain.
+ * Maximum device-wide depth for each scheduling domain.
*
- * Even for fast devices with lots of tags like NVMe, you can saturate
- * the device with only a fraction of the maximum possible queue depth.
- * So, we cap these to a reasonable value.
+ * Even for fast devices with lots of tags like NVMe, you can saturate the
+ * device with only a fraction of the maximum possible queue depth. So, we cap
+ * these to a reasonable value.
*/
static const unsigned int kyber_depth[] = {
[KYBER_READ] = 256,
- [KYBER_SYNC_WRITE] = 128,
- [KYBER_OTHER] = 64,
+ [KYBER_WRITE] = 128,
+ [KYBER_DISCARD] = 64,
+ [KYBER_OTHER] = 16,
};
/*
- * Scheduling domain batch sizes. We favor reads.
+ * Default latency targets for each scheduling domain.
+ */
+static const u64 kyber_latency_targets[] = {
+ [KYBER_READ] = 2ULL * NSEC_PER_MSEC,
+ [KYBER_WRITE] = 10ULL * NSEC_PER_MSEC,
+ [KYBER_DISCARD] = 5ULL * NSEC_PER_SEC,
+};
+
+/*
+ * Batch size (number of requests we'll dispatch in a row) for each scheduling
+ * domain.
*/
static const unsigned int kyber_batch_size[] = {
[KYBER_READ] = 16,
- [KYBER_SYNC_WRITE] = 8,
- [KYBER_OTHER] = 8,
+ [KYBER_WRITE] = 8,
+ [KYBER_DISCARD] = 1,
+ [KYBER_OTHER] = 1,
+};
+
+/*
+ * Requests latencies are recorded in a histogram with buckets defined relative
+ * to the target latency:
+ *
+ * <= 1/4 * target latency
+ * <= 1/2 * target latency
+ * <= 3/4 * target latency
+ * <= target latency
+ * <= 1 1/4 * target latency
+ * <= 1 1/2 * target latency
+ * <= 1 3/4 * target latency
+ * > 1 3/4 * target latency
+ */
+enum {
+ /*
+ * The width of the latency histogram buckets is
+ * 1 / (1 << KYBER_LATENCY_SHIFT) * target latency.
+ */
+ KYBER_LATENCY_SHIFT = 2,
+ /*
+ * The first (1 << KYBER_LATENCY_SHIFT) buckets are <= target latency,
+ * thus, "good".
+ */
+ KYBER_GOOD_BUCKETS = 1 << KYBER_LATENCY_SHIFT,
+ /* There are also (1 << KYBER_LATENCY_SHIFT) "bad" buckets. */
+ KYBER_LATENCY_BUCKETS = 2 << KYBER_LATENCY_SHIFT,
+};
+
+/*
+ * We measure both the total latency and the I/O latency (i.e., latency after
+ * submitting to the device).
+ */
+enum {
+ KYBER_TOTAL_LATENCY,
+ KYBER_IO_LATENCY,
+};
+
+static const char *kyber_latency_type_names[] = {
+ [KYBER_TOTAL_LATENCY] = "total",
+ [KYBER_IO_LATENCY] = "I/O",
+};
+
+/*
+ * Per-cpu latency histograms: total latency and I/O latency for each scheduling
+ * domain except for KYBER_OTHER.
+ */
+struct kyber_cpu_latency {
+ atomic_t buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
};
/*
@@ -88,12 +161,9 @@ struct kyber_ctx_queue {
struct kyber_queue_data {
struct request_queue *q;
- struct blk_stat_callback *cb;
-
/*
- * The device is divided into multiple scheduling domains based on the
- * request type. Each domain has a fixed number of in-flight requests of
- * that type device-wide, limited by these tokens.
+ * Each scheduling domain has a limited number of in-flight requests
+ * device-wide, limited by these tokens.
*/
struct sbitmap_queue domain_tokens[KYBER_NUM_DOMAINS];
@@ -103,8 +173,19 @@ struct kyber_queue_data {
*/
unsigned int async_depth;
+ struct kyber_cpu_latency __percpu *cpu_latency;
+
+ /* Timer for stats aggregation and adjusting domain tokens. */
+ struct timer_list timer;
+
+ unsigned int latency_buckets[KYBER_OTHER][2][KYBER_LATENCY_BUCKETS];
+
+ unsigned long latency_timeout[KYBER_OTHER];
+
+ int domain_p99[KYBER_OTHER];
+
/* Target latencies in nanoseconds. */
- u64 read_lat_nsec, write_lat_nsec;
+ u64 latency_targets[KYBER_OTHER];
};
struct kyber_hctx_data {
@@ -124,233 +205,219 @@ static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
static unsigned int kyber_sched_domain(unsigned int op)
{
- if ((op & REQ_OP_MASK) == REQ_OP_READ)
+ switch (op & REQ_OP_MASK) {
+ case REQ_OP_READ:
return KYBER_READ;
- else if ((op & REQ_OP_MASK) == REQ_OP_WRITE && op_is_sync(op))
- return KYBER_SYNC_WRITE;
- else
+ case REQ_OP_WRITE:
+ return KYBER_WRITE;
+ case REQ_OP_DISCARD:
+ return KYBER_DISCARD;
+ default:
return KYBER_OTHER;
+ }
}
-enum {
- NONE = 0,
- GOOD = 1,
- GREAT = 2,
- BAD = -1,
- AWFUL = -2,
-};
-
-#define IS_GOOD(status) ((status) > 0)
-#define IS_BAD(status) ((status) < 0)
-
-static int kyber_lat_status(struct blk_stat_callback *cb,
- unsigned int sched_domain, u64 target)
+static void flush_latency_buckets(struct kyber_queue_data *kqd,
+ struct kyber_cpu_latency *cpu_latency,
+ unsigned int sched_domain, unsigned int type)
{
- u64 latency;
-
- if (!cb->stat[sched_domain].nr_samples)
- return NONE;
+ unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
+ atomic_t *cpu_buckets = cpu_latency->buckets[sched_domain][type];
+ unsigned int bucket;
- latency = cb->stat[sched_domain].mean;
- if (latency >= 2 * target)
- return AWFUL;
- else if (latency > target)
- return BAD;
- else if (latency <= target / 2)
- return GREAT;
- else /* (latency <= target) */
- return GOOD;
+ for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
+ buckets[bucket] += atomic_xchg(&cpu_buckets[bucket], 0);
}
/*
- * Adjust the read or synchronous write depth given the status of reads and
- * writes. The goal is that the latencies of the two domains are fair (i.e., if
- * one is good, then the other is good).
+ * Calculate the histogram bucket with the given percentile rank, or -1 if there
+ * aren't enough samples yet.
*/
-static void kyber_adjust_rw_depth(struct kyber_queue_data *kqd,
- unsigned int sched_domain, int this_status,
- int other_status)
+static int calculate_percentile(struct kyber_queue_data *kqd,
+ unsigned int sched_domain, unsigned int type,
+ unsigned int percentile)
{
- unsigned int orig_depth, depth;
+ unsigned int *buckets = kqd->latency_buckets[sched_domain][type];
+ unsigned int bucket, samples = 0, percentile_samples;
+
+ for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS; bucket++)
+ samples += buckets[bucket];
+
+ if (!samples)
+ return -1;
/*
- * If this domain had no samples, or reads and writes are both good or
- * both bad, don't adjust the depth.
+ * We do the calculation once we have 500 samples or one second passes
+ * since the first sample was recorded, whichever comes first.
*/
- if (this_status == NONE ||
- (IS_GOOD(this_status) && IS_GOOD(other_status)) ||
- (IS_BAD(this_status) && IS_BAD(other_status)))
- return;
-
- orig_depth = depth = kqd->domain_tokens[sched_domain].sb.depth;
+ if (!kqd->latency_timeout[sched_domain])
+ kqd->latency_timeout[sched_domain] = max(jiffies + HZ, 1UL);
+ if (samples < 500 &&
+ time_is_after_jiffies(kqd->latency_timeout[sched_domain])) {
+ return -1;
+ }
+ kqd->latency_timeout[sched_domain] = 0;
- if (other_status == NONE) {
- depth++;
- } else {
- switch (this_status) {
- case GOOD:
- if (other_status == AWFUL)
- depth -= max(depth / 4, 1U);
- else
- depth -= max(depth / 8, 1U);
- break;
- case GREAT:
- if (other_status == AWFUL)
- depth /= 2;
- else
- depth -= max(depth / 4, 1U);
+ percentile_samples = DIV_ROUND_UP(samples * percentile, 100);
+ for (bucket = 0; bucket < KYBER_LATENCY_BUCKETS - 1; bucket++) {
+ if (buckets[bucket] >= percentile_samples)
break;
- case BAD:
- depth++;
- break;
- case AWFUL:
- if (other_status == GREAT)
- depth += 2;
- else
- depth++;
- break;
- }
+ percentile_samples -= buckets[bucket];
}
+ memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
- depth = clamp(depth, 1U, kyber_depth[sched_domain]);
- if (depth != orig_depth)
- sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
+ trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain],
+ kyber_latency_type_names[type], percentile,
+ bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
+
+ return bucket;
}
-/*
- * Adjust the depth of other requests given the status of reads and synchronous
- * writes. As long as either domain is doing fine, we don't throttle, but if
- * both domains are doing badly, we throttle heavily.
- */
-static void kyber_adjust_other_depth(struct kyber_queue_data *kqd,
- int read_status, int write_status,
- bool have_samples)
-{
- unsigned int orig_depth, depth;
- int status;
-
- orig_depth = depth = kqd->domain_tokens[KYBER_OTHER].sb.depth;
-
- if (read_status == NONE && write_status == NONE) {
- depth += 2;
- } else if (have_samples) {
- if (read_status == NONE)
- status = write_status;
- else if (write_status == NONE)
- status = read_status;
- else
- status = max(read_status, write_status);
- switch (status) {
- case GREAT:
- depth += 2;
- break;
- case GOOD:
- depth++;
- break;
- case BAD:
- depth -= max(depth / 4, 1U);
- break;
- case AWFUL:
- depth /= 2;
- break;
- }
+static void kyber_resize_domain(struct kyber_queue_data *kqd,
+ unsigned int sched_domain, unsigned int depth)
+{
+ depth = clamp(depth, 1U, kyber_depth[sched_domain]);
+ if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
+ sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
+ trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain],
+ depth);
}
-
- depth = clamp(depth, 1U, kyber_depth[KYBER_OTHER]);
- if (depth != orig_depth)
- sbitmap_queue_resize(&kqd->domain_tokens[KYBER_OTHER], depth);
}
-/*
- * Apply heuristics for limiting queue depths based on gathered latency
- * statistics.
- */
-static void kyber_stat_timer_fn(struct blk_stat_callback *cb)
+static void kyber_timer_fn(struct timer_list *t)
{
- struct kyber_queue_data *kqd = cb->data;
- int read_status, write_status;
+ struct kyber_queue_data *kqd = from_timer(kqd, t, timer);
+ unsigned int sched_domain;
+ int cpu;
+ bool bad = false;
+
+ /* Sum all of the per-cpu latency histograms. */
+ for_each_online_cpu(cpu) {
+ struct kyber_cpu_latency *cpu_latency;
+
+ cpu_latency = per_cpu_ptr(kqd->cpu_latency, cpu);
+ for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
+ flush_latency_buckets(kqd, cpu_latency, sched_domain,
+ KYBER_TOTAL_LATENCY);
+ flush_latency_buckets(kqd, cpu_latency, sched_domain,
+ KYBER_IO_LATENCY);
+ }
+ }
- read_status = kyber_lat_status(cb, KYBER_READ, kqd->read_lat_nsec);
- write_status = kyber_lat_status(cb, KYBER_SYNC_WRITE, kqd->write_lat_nsec);
+ /*
+ * Check if any domains have a high I/O latency, which might indicate
+ * congestion in the device. Note that we use the p90; we don't want to
+ * be too sensitive to outliers here.
+ */
+ for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
+ int p90;
- kyber_adjust_rw_depth(kqd, KYBER_READ, read_status, write_status);
- kyber_adjust_rw_depth(kqd, KYBER_SYNC_WRITE, write_status, read_status);
- kyber_adjust_other_depth(kqd, read_status, write_status,
- cb->stat[KYBER_OTHER].nr_samples != 0);
+ p90 = calculate_percentile(kqd, sched_domain, KYBER_IO_LATENCY,
+ 90);
+ if (p90 >= KYBER_GOOD_BUCKETS)
+ bad = true;
+ }
/*
- * Continue monitoring latencies if we aren't hitting the targets or
- * we're still throttling other requests.
+ * Adjust the scheduling domain depths. If we determined that there was
+ * congestion, we throttle all domains with good latencies. Either way,
+ * we ease up on throttling domains with bad latencies.
*/
- if (!blk_stat_is_active(kqd->cb) &&
- ((IS_BAD(read_status) || IS_BAD(write_status) ||
- kqd->domain_tokens[KYBER_OTHER].sb.depth < kyber_depth[KYBER_OTHER])))
- blk_stat_activate_msecs(kqd->cb, 100);
+ for (sched_domain = 0; sched_domain < KYBER_OTHER; sched_domain++) {
+ unsigned int orig_depth, depth;
+ int p99;
+
+ p99 = calculate_percentile(kqd, sched_domain,
+ KYBER_TOTAL_LATENCY, 99);
+ /*
+ * This is kind of subtle: different domains will not
+ * necessarily have enough samples to calculate the latency
+ * percentiles during the same window, so we have to remember
+ * the p99 for the next time we observe congestion; once we do,
+ * we don't want to throttle again until we get more data, so we
+ * reset it to -1.
+ */
+ if (bad) {
+ if (p99 < 0)
+ p99 = kqd->domain_p99[sched_domain];
+ kqd->domain_p99[sched_domain] = -1;
+ } else if (p99 >= 0) {
+ kqd->domain_p99[sched_domain] = p99;
+ }
+ if (p99 < 0)
+ continue;
+
+ /*
+ * If this domain has bad latency, throttle less. Otherwise,
+ * throttle more iff we determined that there is congestion.
+ *
+ * The new depth is scaled linearly with the p99 latency vs the
+ * latency target. E.g., if the p99 is 3/4 of the target, then
+ * we throttle down to 3/4 of the current depth, and if the p99
+ * is 2x the target, then we double the depth.
+ */
+ if (bad || p99 >= KYBER_GOOD_BUCKETS) {
+ orig_depth = kqd->domain_tokens[sched_domain].sb.depth;
+ depth = (orig_depth * (p99 + 1)) >> KYBER_LATENCY_SHIFT;
+ kyber_resize_domain(kqd, sched_domain, depth);
+ }
+ }
}
-static unsigned int kyber_sched_tags_shift(struct kyber_queue_data *kqd)
+static unsigned int kyber_sched_tags_shift(struct request_queue *q)
{
/*
* All of the hardware queues have the same depth, so we can just grab
* the shift of the first one.
*/
- return kqd->q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
-}
-
-static int kyber_bucket_fn(const struct request *rq)
-{
- return kyber_sched_domain(rq->cmd_flags);
+ return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift;
}
static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
{
struct kyber_queue_data *kqd;
- unsigned int max_tokens;
unsigned int shift;
int ret = -ENOMEM;
int i;
- kqd = kmalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
+ kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node);
if (!kqd)
goto err;
+
kqd->q = q;
- kqd->cb = blk_stat_alloc_callback(kyber_stat_timer_fn, kyber_bucket_fn,
- KYBER_NUM_DOMAINS, kqd);
- if (!kqd->cb)
+ kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!kqd->cpu_latency)
goto err_kqd;
- /*
- * The maximum number of tokens for any scheduling domain is at least
- * the queue depth of a single hardware queue. If the hardware doesn't
- * have many tags, still provide a reasonable number.
- */
- max_tokens = max_t(unsigned int, q->tag_set->queue_depth,
- KYBER_MIN_DEPTH);
+ timer_setup(&kqd->timer, kyber_timer_fn, 0);
+
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
WARN_ON(!kyber_depth[i]);
WARN_ON(!kyber_batch_size[i]);
ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
- max_tokens, -1, false, GFP_KERNEL,
- q->node);
+ kyber_depth[i], -1, false,
+ GFP_KERNEL, q->node);
if (ret) {
while (--i >= 0)
sbitmap_queue_free(&kqd->domain_tokens[i]);
- goto err_cb;
+ goto err_buckets;
}
- sbitmap_queue_resize(&kqd->domain_tokens[i], kyber_depth[i]);
}
- shift = kyber_sched_tags_shift(kqd);
- kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
+ for (i = 0; i < KYBER_OTHER; i++) {
+ kqd->domain_p99[i] = -1;
+ kqd->latency_targets[i] = kyber_latency_targets[i];
+ }
- kqd->read_lat_nsec = 2000000ULL;
- kqd->write_lat_nsec = 10000000ULL;
+ shift = kyber_sched_tags_shift(q);
+ kqd->async_depth = (1U << shift) * KYBER_ASYNC_PERCENT / 100U;
return kqd;
-err_cb:
- blk_stat_free_callback(kqd->cb);
+err_buckets:
+ free_percpu(kqd->cpu_latency);
err_kqd:
kfree(kqd);
err:
@@ -372,25 +439,24 @@ static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
return PTR_ERR(kqd);
}
+ blk_stat_enable_accounting(q);
+
eq->elevator_data = kqd;
q->elevator = eq;
- blk_stat_add_callback(q, kqd->cb);
-
return 0;
}
static void kyber_exit_sched(struct elevator_queue *e)
{
struct kyber_queue_data *kqd = e->elevator_data;
- struct request_queue *q = kqd->q;
int i;
- blk_stat_remove_callback(q, kqd->cb);
+ del_timer_sync(&kqd->timer);
for (i = 0; i < KYBER_NUM_DOMAINS; i++)
sbitmap_queue_free(&kqd->domain_tokens[i]);
- blk_stat_free_callback(kqd->cb);
+ free_percpu(kqd->cpu_latency);
kfree(kqd);
}
@@ -558,41 +624,44 @@ static void kyber_finish_request(struct request *rq)
rq_clear_domain_token(kqd, rq);
}
-static void kyber_completed_request(struct request *rq)
+static void add_latency_sample(struct kyber_cpu_latency *cpu_latency,
+ unsigned int sched_domain, unsigned int type,
+ u64 target, u64 latency)
{
- struct request_queue *q = rq->q;
- struct kyber_queue_data *kqd = q->elevator->elevator_data;
- unsigned int sched_domain;
- u64 now, latency, target;
+ unsigned int bucket;
+ u64 divisor;
- /*
- * Check if this request met our latency goal. If not, quickly gather
- * some statistics and start throttling.
- */
- sched_domain = kyber_sched_domain(rq->cmd_flags);
- switch (sched_domain) {
- case KYBER_READ:
- target = kqd->read_lat_nsec;
- break;
- case KYBER_SYNC_WRITE:
- target = kqd->write_lat_nsec;
- break;
- default:
- return;
+ if (latency > 0) {
+ divisor = max_t(u64, target >> KYBER_LATENCY_SHIFT, 1);
+ bucket = min_t(unsigned int, div64_u64(latency - 1, divisor),
+ KYBER_LATENCY_BUCKETS - 1);
+ } else {
+ bucket = 0;
}
- /* If we are already monitoring latencies, don't check again. */
- if (blk_stat_is_active(kqd->cb))
- return;
+ atomic_inc(&cpu_latency->buckets[sched_domain][type][bucket]);
+}
- now = ktime_get_ns();
- if (now < rq->io_start_time_ns)
+static void kyber_completed_request(struct request *rq, u64 now)
+{
+ struct kyber_queue_data *kqd = rq->q->elevator->elevator_data;
+ struct kyber_cpu_latency *cpu_latency;
+ unsigned int sched_domain;
+ u64 target;
+
+ sched_domain = kyber_sched_domain(rq->cmd_flags);
+ if (sched_domain == KYBER_OTHER)
return;
- latency = now - rq->io_start_time_ns;
+ cpu_latency = get_cpu_ptr(kqd->cpu_latency);
+ target = kqd->latency_targets[sched_domain];
+ add_latency_sample(cpu_latency, sched_domain, KYBER_TOTAL_LATENCY,
+ target, now - rq->start_time_ns);
+ add_latency_sample(cpu_latency, sched_domain, KYBER_IO_LATENCY, target,
+ now - rq->io_start_time_ns);
+ put_cpu_ptr(kqd->cpu_latency);
- if (latency > target)
- blk_stat_activate_msecs(kqd->cb, 10);
+ timer_reduce(&kqd->timer, jiffies + HZ / 10);
}
struct flush_kcq_data {
@@ -713,6 +782,9 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
rq_set_domain_token(rq, nr);
list_del_init(&rq->queuelist);
return rq;
+ } else {
+ trace_kyber_throttled(kqd->q,
+ kyber_domain_names[khd->cur_domain]);
}
} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
nr = kyber_get_domain_token(kqd, khd, hctx);
@@ -723,6 +795,9 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
rq_set_domain_token(rq, nr);
list_del_init(&rq->queuelist);
return rq;
+ } else {
+ trace_kyber_throttled(kqd->q,
+ kyber_domain_names[khd->cur_domain]);
}
}
@@ -790,17 +865,17 @@ static bool kyber_has_work(struct blk_mq_hw_ctx *hctx)
return false;
}
-#define KYBER_LAT_SHOW_STORE(op) \
-static ssize_t kyber_##op##_lat_show(struct elevator_queue *e, \
- char *page) \
+#define KYBER_LAT_SHOW_STORE(domain, name) \
+static ssize_t kyber_##name##_lat_show(struct elevator_queue *e, \
+ char *page) \
{ \
struct kyber_queue_data *kqd = e->elevator_data; \
\
- return sprintf(page, "%llu\n", kqd->op##_lat_nsec); \
+ return sprintf(page, "%llu\n", kqd->latency_targets[domain]); \
} \
\
-static ssize_t kyber_##op##_lat_store(struct elevator_queue *e, \
- const char *page, size_t count) \
+static ssize_t kyber_##name##_lat_store(struct elevator_queue *e, \
+ const char *page, size_t count) \
{ \
struct kyber_queue_data *kqd = e->elevator_data; \
unsigned long long nsec; \
@@ -810,12 +885,12 @@ static ssize_t kyber_##op##_lat_store(struct elevator_queue *e, \
if (ret) \
return ret; \
\
- kqd->op##_lat_nsec = nsec; \
+ kqd->latency_targets[domain] = nsec; \
\
return count; \
}
-KYBER_LAT_SHOW_STORE(read);
-KYBER_LAT_SHOW_STORE(write);
+KYBER_LAT_SHOW_STORE(KYBER_READ, read);
+KYBER_LAT_SHOW_STORE(KYBER_WRITE, write);
#undef KYBER_LAT_SHOW_STORE
#define KYBER_LAT_ATTR(op) __ATTR(op##_lat_nsec, 0644, kyber_##op##_lat_show, kyber_##op##_lat_store)
@@ -882,7 +957,8 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
return 0; \
}
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
-KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_SYNC_WRITE, sync_write)
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_WRITE, write)
+KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_DISCARD, discard)
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_OTHER, other)
#undef KYBER_DEBUGFS_DOMAIN_ATTRS
@@ -900,20 +976,7 @@ static int kyber_cur_domain_show(void *data, struct seq_file *m)
struct blk_mq_hw_ctx *hctx = data;
struct kyber_hctx_data *khd = hctx->sched_data;
- switch (khd->cur_domain) {
- case KYBER_READ:
- seq_puts(m, "READ\n");
- break;
- case KYBER_SYNC_WRITE:
- seq_puts(m, "SYNC_WRITE\n");
- break;
- case KYBER_OTHER:
- seq_puts(m, "OTHER\n");
- break;
- default:
- seq_printf(m, "%u\n", khd->cur_domain);
- break;
- }
+ seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]);
return 0;
}
@@ -930,7 +993,8 @@ static int kyber_batching_show(void *data, struct seq_file *m)
{#name "_tokens", 0400, kyber_##name##_tokens_show}
static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
KYBER_QUEUE_DOMAIN_ATTRS(read),
- KYBER_QUEUE_DOMAIN_ATTRS(sync_write),
+ KYBER_QUEUE_DOMAIN_ATTRS(write),
+ KYBER_QUEUE_DOMAIN_ATTRS(discard),
KYBER_QUEUE_DOMAIN_ATTRS(other),
{"async_depth", 0400, kyber_async_depth_show},
{},
@@ -942,7 +1006,8 @@ static const struct blk_mq_debugfs_attr kyber_queue_debugfs_attrs[] = {
{#name "_waiting", 0400, kyber_##name##_waiting_show}
static const struct blk_mq_debugfs_attr kyber_hctx_debugfs_attrs[] = {
KYBER_HCTX_DOMAIN_ATTRS(read),
- KYBER_HCTX_DOMAIN_ATTRS(sync_write),
+ KYBER_HCTX_DOMAIN_ATTRS(write),
+ KYBER_HCTX_DOMAIN_ATTRS(discard),
KYBER_HCTX_DOMAIN_ATTRS(other),
{"cur_domain", 0400, kyber_cur_domain_show},
{"batching", 0400, kyber_batching_show},
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 5a8975a1201c..d3d14e81fb12 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -136,18 +136,18 @@ ssize_t part_stat_show(struct device *dev,
part_stat_read(p, ios[STAT_READ]),
part_stat_read(p, merges[STAT_READ]),
(unsigned long long)part_stat_read(p, sectors[STAT_READ]),
- jiffies_to_msecs(part_stat_read(p, ticks[STAT_READ])),
+ (unsigned int)part_stat_read_msecs(p, STAT_READ),
part_stat_read(p, ios[STAT_WRITE]),
part_stat_read(p, merges[STAT_WRITE]),
(unsigned long long)part_stat_read(p, sectors[STAT_WRITE]),
- jiffies_to_msecs(part_stat_read(p, ticks[STAT_WRITE])),
+ (unsigned int)part_stat_read_msecs(p, STAT_WRITE),
inflight[0],
jiffies_to_msecs(part_stat_read(p, io_ticks)),
jiffies_to_msecs(part_stat_read(p, time_in_queue)),
part_stat_read(p, ios[STAT_DISCARD]),
part_stat_read(p, merges[STAT_DISCARD]),
(unsigned long long)part_stat_read(p, sectors[STAT_DISCARD]),
- jiffies_to_msecs(part_stat_read(p, ticks[STAT_DISCARD])));
+ (unsigned int)part_stat_read_msecs(p, STAT_DISCARD));
}
ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 3f3b7b253445..64fd96eada31 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -332,6 +332,35 @@ err_no_vma:
return vma ? -ENOMEM : -ESRCH;
}
+
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ if (vma)
+ alloc->vma_vm_mm = vma->vm_mm;
+ /*
+ * If we see alloc->vma is not NULL, buffer data structures set up
+ * completely. Look at smp_rmb side binder_alloc_get_vma.
+ * We also want to guarantee new alloc->vma_vm_mm is always visible
+ * if alloc->vma is set.
+ */
+ smp_wmb();
+ alloc->vma = vma;
+}
+
+static inline struct vm_area_struct *binder_alloc_get_vma(
+ struct binder_alloc *alloc)
+{
+ struct vm_area_struct *vma = NULL;
+
+ if (alloc->vma) {
+ /* Look at description in binder_alloc_set_vma */
+ smp_rmb();
+ vma = alloc->vma;
+ }
+ return vma;
+}
+
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
size_t data_size,
@@ -348,7 +377,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
size_t size, data_offsets_size;
int ret;
- if (alloc->vma == NULL) {
+ if (!binder_alloc_get_vma(alloc)) {
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
"%d: binder_alloc_buf, no vma\n",
alloc->pid);
@@ -723,9 +752,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
- barrier();
- alloc->vma = vma;
- alloc->vma_vm_mm = vma->vm_mm;
+ binder_alloc_set_vma(alloc, vma);
mmgrab(alloc->vma_vm_mm);
return 0;
@@ -754,10 +781,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int buffers, page_count;
struct binder_buffer *buffer;
- BUG_ON(alloc->vma);
-
buffers = 0;
mutex_lock(&alloc->mutex);
+ BUG_ON(alloc->vma);
+
while ((n = rb_first(&alloc->allocated_buffers))) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -900,7 +927,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
*/
void binder_alloc_vma_close(struct binder_alloc *alloc)
{
- WRITE_ONCE(alloc->vma, NULL);
+ binder_alloc_set_vma(alloc, NULL);
}
/**
@@ -935,7 +962,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
index = page - alloc->pages;
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
- vma = alloc->vma;
+ vma = binder_alloc_get_vma(alloc);
if (vma) {
if (!mmget_not_zero(alloc->vma_vm_mm))
goto err_mmget;
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 39b181d6bd0d..4ca7a6b4eaae 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -33,7 +33,6 @@ if ATA
config ATA_NONSTANDARD
bool
- default n
config ATA_VERBOSE_ERROR
bool "Verbose ATA error reporting"
@@ -62,7 +61,6 @@ config ATA_ACPI
config SATA_ZPODD
bool "SATA Zero Power Optical Disc Drive (ZPODD) support"
depends on ATA_ACPI && PM
- default n
help
This option adds support for SATA Zero Power Optical Disc
Drive (ZPODD). It requires both the ODD and the platform
@@ -121,7 +119,8 @@ config SATA_AHCI_PLATFORM
config AHCI_BRCM
tristate "Broadcom AHCI SATA support"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
+ ARCH_BCM_63XX
help
This option enables support for the AHCI SATA3 controller found on
Broadcom SoC's.
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 6a1515f0da40..ef356e70e6de 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -352,6 +352,8 @@ struct ahci_host_priv {
struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
struct reset_control *rsts; /* Optional */
struct regulator **target_pwrs; /* Optional */
+ struct regulator *ahci_regulator;/* Optional */
+ struct regulator *phy_regulator;/* Optional */
/*
* If platform uses PHYs. There is a 1:1 relation between the port number and
* the PHY position in this array.
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index f3d557777d82..fba5a3044c8a 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/string.h>
#include "ahci.h"
@@ -94,6 +95,7 @@ struct brcm_ahci_priv {
u32 port_mask;
u32 quirks;
enum brcm_ahci_version version;
+ struct reset_control *rcdev;
};
static inline u32 brcm_sata_readreg(void __iomem *addr)
@@ -381,6 +383,7 @@ static struct scsi_host_template ahci_platform_sht = {
static const struct of_device_id ahci_of_match[] = {
{.compatible = "brcm,bcm7425-ahci", .data = (void *)BRCM_SATA_BCM7425},
{.compatible = "brcm,bcm7445-ahci", .data = (void *)BRCM_SATA_BCM7445},
+ {.compatible = "brcm,bcm63138-ahci", .data = (void *)BRCM_SATA_BCM7445},
{.compatible = "brcm,bcm-nsp-ahci", .data = (void *)BRCM_SATA_NSP},
{},
};
@@ -411,6 +414,11 @@ static int brcm_ahci_probe(struct platform_device *pdev)
if (IS_ERR(priv->top_ctrl))
return PTR_ERR(priv->top_ctrl);
+ /* Reset is optional depending on platform */
+ priv->rcdev = devm_reset_control_get(&pdev->dev, "ahci");
+ if (!IS_ERR_OR_NULL(priv->rcdev))
+ reset_control_deassert(priv->rcdev);
+
if ((priv->version == BRCM_SATA_BCM7425) ||
(priv->version == BRCM_SATA_NSP)) {
priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 46f0bd75eff7..cf1e0e18a7a9 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -33,6 +33,13 @@ static const struct ata_port_info ahci_port_info = {
.port_ops = &ahci_platform_ops,
};
+static const struct ata_port_info ahci_port_info_nolpm = {
+ .flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_LPM,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
static struct scsi_host_template ahci_platform_sht = {
AHCI_SHT(DRV_NAME),
};
@@ -41,6 +48,7 @@ static int ahci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ahci_host_priv *hpriv;
+ const struct ata_port_info *port;
int rc;
hpriv = ahci_platform_get_resources(pdev,
@@ -58,7 +66,11 @@ static int ahci_probe(struct platform_device *pdev)
if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
- rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
+ port = acpi_device_get_match_data(dev);
+ if (!port)
+ port = &ahci_port_info;
+
+ rc = ahci_platform_init_host(pdev, hpriv, port,
&ahci_platform_sht);
if (rc)
goto disable_resources;
@@ -85,6 +97,7 @@ static const struct of_device_id ahci_of_match[] = {
MODULE_DEVICE_TABLE(of, ahci_of_match);
static const struct acpi_device_id ahci_acpi_match[] = {
+ { "APMC0D33", (unsigned long)&ahci_port_info_nolpm },
{ ACPI_DEVICE_CLASS(PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff) },
{},
};
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index 631610b72aa5..911710643305 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -181,7 +181,7 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
struct ahci_host_priv *hpriv;
int rc;
- hpriv = ahci_platform_get_resources(pdev, 0);
+ hpriv = ahci_platform_get_resources(pdev, AHCI_PLATFORM_GET_RESETS);
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
@@ -250,6 +250,7 @@ static SIMPLE_DEV_PM_OPS(ahci_sunxi_pm_ops, ahci_platform_suspend,
static const struct of_device_id ahci_sunxi_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-ahci", },
+ { .compatible = "allwinner,sun8i-r40-ahci", },
{ },
};
MODULE_DEVICE_TABLE(of, ahci_sunxi_of_match);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index c92c10d55374..4b900fc659f7 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -139,7 +139,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
* ahci_platform_enable_regulators - Enable regulators
* @hpriv: host private area to store config values
*
- * This function enables all the regulators found in
+ * This function enables all the regulators found in controller and
* hpriv->target_pwrs, if any. If a regulator fails to be enabled, it
* disables all the regulators already enabled in reverse order and
* returns an error.
@@ -151,6 +151,18 @@ int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv)
{
int rc, i;
+ if (hpriv->ahci_regulator) {
+ rc = regulator_enable(hpriv->ahci_regulator);
+ if (rc)
+ return rc;
+ }
+
+ if (hpriv->phy_regulator) {
+ rc = regulator_enable(hpriv->phy_regulator);
+ if (rc)
+ goto disable_ahci_pwrs;
+ }
+
for (i = 0; i < hpriv->nports; i++) {
if (!hpriv->target_pwrs[i])
continue;
@@ -167,6 +179,11 @@ disable_target_pwrs:
if (hpriv->target_pwrs[i])
regulator_disable(hpriv->target_pwrs[i]);
+ if (hpriv->phy_regulator)
+ regulator_disable(hpriv->phy_regulator);
+disable_ahci_pwrs:
+ if (hpriv->ahci_regulator)
+ regulator_disable(hpriv->ahci_regulator);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
@@ -175,7 +192,8 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
* ahci_platform_disable_regulators - Disable regulators
* @hpriv: host private area to store config values
*
- * This function disables all regulators found in hpriv->target_pwrs.
+ * This function disables all regulators found in hpriv->target_pwrs and
+ * AHCI controller.
*/
void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
{
@@ -186,6 +204,11 @@ void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
continue;
regulator_disable(hpriv->target_pwrs[i]);
}
+
+ if (hpriv->ahci_regulator)
+ regulator_disable(hpriv->ahci_regulator);
+ if (hpriv->phy_regulator)
+ regulator_disable(hpriv->phy_regulator);
}
EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
/**
@@ -303,8 +326,8 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
/* No PHY support. Check if PHY is required. */
if (of_find_property(node, "phys", NULL)) {
dev_err(dev,
- "couldn't get PHY in node %s: ENOSYS\n",
- node->name);
+ "couldn't get PHY in node %pOFn: ENOSYS\n",
+ node);
break;
}
/* fall through */
@@ -316,8 +339,8 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
default:
dev_err(dev,
- "couldn't get PHY in node %s: %d\n",
- node->name, rc);
+ "couldn't get PHY in node %pOFn: %d\n",
+ node, rc);
break;
}
@@ -351,6 +374,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
*
* 1) mmio registers (IORESOURCE_MEM 0, mandatory)
* 2) regulator for controlling the targets power (optional)
+ * regulator for controlling the AHCI controller (optional)
* 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
* or for non devicetree enabled platforms a single clock
* 4) resets, if flags has AHCI_PLATFORM_GET_RESETS (optional)
@@ -408,6 +432,24 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
hpriv->clks[i] = clk;
}
+ hpriv->ahci_regulator = devm_regulator_get_optional(dev, "ahci");
+ if (IS_ERR(hpriv->ahci_regulator)) {
+ rc = PTR_ERR(hpriv->ahci_regulator);
+ if (rc == -EPROBE_DEFER)
+ goto err_out;
+ rc = 0;
+ hpriv->ahci_regulator = NULL;
+ }
+
+ hpriv->phy_regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(hpriv->phy_regulator)) {
+ rc = PTR_ERR(hpriv->phy_regulator);
+ if (rc == -EPROBE_DEFER)
+ goto err_out;
+ rc = 0;
+ hpriv->phy_regulator = NULL;
+ }
+
if (flags & AHCI_PLATFORM_GET_RESETS) {
hpriv->rsts = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(hpriv->rsts)) {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 599e01bcdef2..a9dd4ea7467d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5359,10 +5359,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
*/
int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
{
+ u64 done_mask, ap_qc_active = ap->qc_active;
int nr_done = 0;
- u64 done_mask;
- done_mask = ap->qc_active ^ qc_active;
+ /*
+ * If the internal tag is set on ap->qc_active, then we care about
+ * bit0 on the passed in qc_active mask. Move that bit up to match
+ * the internal tag.
+ */
+ if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
+ qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
+ qc_active ^= qc_active & 0x01;
+ }
+
+ done_mask = ap_qc_active ^ qc_active;
if (unlikely(done_mask & qc_active)) {
ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1984fc78c750..3d4887d0e84a 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -639,8 +639,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
if (args[0] == ATA_CMD_SMART) { /* hack -- ide driver does this too */
scsi_cmd[6] = args[3];
scsi_cmd[8] = args[1];
- scsi_cmd[10] = 0x4f;
- scsi_cmd[12] = 0xc2;
+ scsi_cmd[10] = ATA_SMART_LBAM_PASS;
+ scsi_cmd[12] = ATA_SMART_LBAH_PASS;
} else {
scsi_cmd[6] = args[1];
}
diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
index 4d49fd3c927b..843bb200a1ee 100644
--- a/drivers/ata/pata_atiixp.c
+++ b/drivers/ata/pata_atiixp.c
@@ -279,7 +279,7 @@ static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
const struct ata_port_info *ppi[] = { &info, &info };
/* SB600 doesn't have secondary port wired */
- if((pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE))
+ if (pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE)
ppi[1] = &ata_dummy_port_info;
return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 0a550190955a..cc6d06c1b2c7 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -659,7 +659,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
* start of new transfer.
*/
drv_data->dma_rx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_rx_data.direction = DMA_FROM_DEVICE;
+ drv_data->dma_rx_data.direction = DMA_DEV_TO_MEM;
drv_data->dma_rx_data.name = "ep93xx-pata-rx";
drv_data->dma_rx_channel = dma_request_channel(mask,
ep93xx_pata_dma_filter, &drv_data->dma_rx_data);
@@ -667,7 +667,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
return;
drv_data->dma_tx_data.port = EP93XX_DMA_IDE;
- drv_data->dma_tx_data.direction = DMA_TO_DEVICE;
+ drv_data->dma_tx_data.direction = DMA_MEM_TO_DEV;
drv_data->dma_tx_data.name = "ep93xx-pata-tx";
drv_data->dma_tx_channel = dma_request_channel(mask,
ep93xx_pata_dma_filter, &drv_data->dma_tx_data);
@@ -678,7 +678,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
/* Configure receive channel direction and source address */
memset(&conf, 0, sizeof(conf));
- conf.direction = DMA_FROM_DEVICE;
+ conf.direction = DMA_DEV_TO_MEM;
conf.src_addr = drv_data->udma_in_phys;
conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (dmaengine_slave_config(drv_data->dma_rx_channel, &conf)) {
@@ -689,7 +689,7 @@ static void ep93xx_pata_dma_init(struct ep93xx_pata_data *drv_data)
/* Configure transmit channel direction and destination address */
memset(&conf, 0, sizeof(conf));
- conf.direction = DMA_TO_DEVICE;
+ conf.direction = DMA_MEM_TO_DEV;
conf.dst_addr = drv_data->udma_out_phys;
conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (dmaengine_slave_config(drv_data->dma_tx_channel, &conf)) {
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index 0943e7065e0e..8e9213b36e31 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -209,22 +209,28 @@ static struct fw_priv *__lookup_fw_priv(const char *fw_name)
static int alloc_lookup_fw_priv(const char *fw_name,
struct firmware_cache *fwc,
struct fw_priv **fw_priv, void *dbuf,
- size_t size)
+ size_t size, enum fw_opt opt_flags)
{
struct fw_priv *tmp;
spin_lock(&fwc->lock);
- tmp = __lookup_fw_priv(fw_name);
- if (tmp) {
- kref_get(&tmp->ref);
- spin_unlock(&fwc->lock);
- *fw_priv = tmp;
- pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
- return 1;
+ if (!(opt_flags & FW_OPT_NOCACHE)) {
+ tmp = __lookup_fw_priv(fw_name);
+ if (tmp) {
+ kref_get(&tmp->ref);
+ spin_unlock(&fwc->lock);
+ *fw_priv = tmp;
+ pr_debug("batched request - sharing the same struct fw_priv and lookup for multiple requests\n");
+ return 1;
+ }
}
+
tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
- if (tmp)
- list_add(&tmp->list, &fwc->head);
+ if (tmp) {
+ INIT_LIST_HEAD(&tmp->list);
+ if (!(opt_flags & FW_OPT_NOCACHE))
+ list_add(&tmp->list, &fwc->head);
+ }
spin_unlock(&fwc->lock);
*fw_priv = tmp;
@@ -493,7 +499,8 @@ int assign_fw(struct firmware *fw, struct device *device,
*/
static int
_request_firmware_prepare(struct firmware **firmware_p, const char *name,
- struct device *device, void *dbuf, size_t size)
+ struct device *device, void *dbuf, size_t size,
+ enum fw_opt opt_flags)
{
struct firmware *firmware;
struct fw_priv *fw_priv;
@@ -511,7 +518,8 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name,
return 0; /* assigned */
}
- ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size);
+ ret = alloc_lookup_fw_priv(name, &fw_cache, &fw_priv, dbuf, size,
+ opt_flags);
/*
* bind with 'priv' now to avoid warning in failure path
@@ -571,7 +579,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
goto out;
}
- ret = _request_firmware_prepare(&fw, name, device, buf, size);
+ ret = _request_firmware_prepare(&fw, name, device, buf, size,
+ opt_flags);
if (ret <= 0) /* error or already assigned */
goto out;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 3f68e2919dc5..a690fd400260 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (async_error) {
+ dev->power.direct_complete = false;
goto Complete;
+ }
/*
* If a device configured to wake up the system from sleep states
@@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) {
+ dev->power.direct_complete = false;
async_error = -EBUSY;
goto Complete;
}
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
deleted file mode 100644
index 581312ac375f..000000000000
--- a/drivers/block/DAC960.c
+++ /dev/null
@@ -1,7229 +0,0 @@
-/*
-
- Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
-
- Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
- Portions Copyright 2002 by Mylex (An IBM Business Unit)
-
- This program is free software; you may redistribute and/or modify it under
- the terms of the GNU General Public License Version 2 as published by the
- Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- for complete details.
-
-*/
-
-
-#define DAC960_DriverVersion "2.5.49"
-#define DAC960_DriverDate "21 Aug 2007"
-
-
-#include <linux/compiler.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-#include <linux/blkdev.h>
-#include <linux/bio.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/genhd.h>
-#include <linux/hdreg.h>
-#include <linux/blkpg.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/reboot.h>
-#include <linux/spinlock.h>
-#include <linux/timer.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include "DAC960.h"
-
-#define DAC960_GAM_MINOR 252
-
-
-static DEFINE_MUTEX(DAC960_mutex);
-static DAC960_Controller_T *DAC960_Controllers[DAC960_MaxControllers];
-static int DAC960_ControllerCount;
-static struct proc_dir_entry *DAC960_ProcDirectoryEntry;
-
-static long disk_size(DAC960_Controller_T *p, int drive_nr)
-{
- if (p->FirmwareType == DAC960_V1_Controller) {
- if (drive_nr >= p->LogicalDriveCount)
- return 0;
- return p->V1.LogicalDriveInformation[drive_nr].
- LogicalDriveSize;
- } else {
- DAC960_V2_LogicalDeviceInfo_T *i =
- p->V2.LogicalDeviceInformation[drive_nr];
- if (i == NULL)
- return 0;
- return i->ConfigurableDeviceSize;
- }
-}
-
-static int DAC960_open(struct block_device *bdev, fmode_t mode)
-{
- struct gendisk *disk = bdev->bd_disk;
- DAC960_Controller_T *p = disk->queue->queuedata;
- int drive_nr = (long)disk->private_data;
- int ret = -ENXIO;
-
- mutex_lock(&DAC960_mutex);
- if (p->FirmwareType == DAC960_V1_Controller) {
- if (p->V1.LogicalDriveInformation[drive_nr].
- LogicalDriveState == DAC960_V1_LogicalDrive_Offline)
- goto out;
- } else {
- DAC960_V2_LogicalDeviceInfo_T *i =
- p->V2.LogicalDeviceInformation[drive_nr];
- if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline)
- goto out;
- }
-
- check_disk_change(bdev);
-
- if (!get_capacity(p->disks[drive_nr]))
- goto out;
- ret = 0;
-out:
- mutex_unlock(&DAC960_mutex);
- return ret;
-}
-
-static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct gendisk *disk = bdev->bd_disk;
- DAC960_Controller_T *p = disk->queue->queuedata;
- int drive_nr = (long)disk->private_data;
-
- if (p->FirmwareType == DAC960_V1_Controller) {
- geo->heads = p->V1.GeometryTranslationHeads;
- geo->sectors = p->V1.GeometryTranslationSectors;
- geo->cylinders = p->V1.LogicalDriveInformation[drive_nr].
- LogicalDriveSize / (geo->heads * geo->sectors);
- } else {
- DAC960_V2_LogicalDeviceInfo_T *i =
- p->V2.LogicalDeviceInformation[drive_nr];
- switch (i->DriveGeometry) {
- case DAC960_V2_Geometry_128_32:
- geo->heads = 128;
- geo->sectors = 32;
- break;
- case DAC960_V2_Geometry_255_63:
- geo->heads = 255;
- geo->sectors = 63;
- break;
- default:
- DAC960_Error("Illegal Logical Device Geometry %d\n",
- p, i->DriveGeometry);
- return -EINVAL;
- }
-
- geo->cylinders = i->ConfigurableDeviceSize /
- (geo->heads * geo->sectors);
- }
-
- return 0;
-}
-
-static unsigned int DAC960_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- DAC960_Controller_T *p = disk->queue->queuedata;
- int drive_nr = (long)disk->private_data;
-
- if (!p->LogicalDriveInitiallyAccessible[drive_nr])
- return DISK_EVENT_MEDIA_CHANGE;
- return 0;
-}
-
-static int DAC960_revalidate_disk(struct gendisk *disk)
-{
- DAC960_Controller_T *p = disk->queue->queuedata;
- int unit = (long)disk->private_data;
-
- set_capacity(disk, disk_size(p, unit));
- return 0;
-}
-
-static const struct block_device_operations DAC960_BlockDeviceOperations = {
- .owner = THIS_MODULE,
- .open = DAC960_open,
- .getgeo = DAC960_getgeo,
- .check_events = DAC960_check_events,
- .revalidate_disk = DAC960_revalidate_disk,
-};
-
-
-/*
- DAC960_AnnounceDriver announces the Driver Version and Date, Author's Name,
- Copyright Notice, and Electronic Mail Address.
-*/
-
-static void DAC960_AnnounceDriver(DAC960_Controller_T *Controller)
-{
- DAC960_Announce("***** DAC960 RAID Driver Version "
- DAC960_DriverVersion " of "
- DAC960_DriverDate " *****\n", Controller);
- DAC960_Announce("Copyright 1998-2001 by Leonard N. Zubkoff "
- "<lnz@dandelion.com>\n", Controller);
-}
-
-
-/*
- DAC960_Failure prints a standardized error message, and then returns false.
-*/
-
-static bool DAC960_Failure(DAC960_Controller_T *Controller,
- unsigned char *ErrorMessage)
-{
- DAC960_Error("While configuring DAC960 PCI RAID Controller at\n",
- Controller);
- if (Controller->IO_Address == 0)
- DAC960_Error("PCI Bus %d Device %d Function %d I/O Address N/A "
- "PCI Address 0x%X\n", Controller,
- Controller->Bus, Controller->Device,
- Controller->Function, Controller->PCI_Address);
- else DAC960_Error("PCI Bus %d Device %d Function %d I/O Address "
- "0x%X PCI Address 0x%X\n", Controller,
- Controller->Bus, Controller->Device,
- Controller->Function, Controller->IO_Address,
- Controller->PCI_Address);
- DAC960_Error("%s FAILED - DETACHING\n", Controller, ErrorMessage);
- return false;
-}
-
-/*
- init_dma_loaf() and slice_dma_loaf() are helper functions for
- aggregating the dma-mapped memory for a well-known collection of
- data structures that are of different lengths.
-
- These routines don't guarantee any alignment. The caller must
- include any space needed for alignment in the sizes of the structures
- that are passed in.
- */
-
-static bool init_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf,
- size_t len)
-{
- void *cpu_addr;
- dma_addr_t dma_handle;
-
- cpu_addr = pci_alloc_consistent(dev, len, &dma_handle);
- if (cpu_addr == NULL)
- return false;
-
- loaf->cpu_free = loaf->cpu_base = cpu_addr;
- loaf->dma_free =loaf->dma_base = dma_handle;
- loaf->length = len;
- memset(cpu_addr, 0, len);
- return true;
-}
-
-static void *slice_dma_loaf(struct dma_loaf *loaf, size_t len,
- dma_addr_t *dma_handle)
-{
- void *cpu_end = loaf->cpu_free + len;
- void *cpu_addr = loaf->cpu_free;
-
- BUG_ON(cpu_end > loaf->cpu_base + loaf->length);
- *dma_handle = loaf->dma_free;
- loaf->cpu_free = cpu_end;
- loaf->dma_free += len;
- return cpu_addr;
-}
-
-static void free_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf_handle)
-{
- if (loaf_handle->cpu_base != NULL)
- pci_free_consistent(dev, loaf_handle->length,
- loaf_handle->cpu_base, loaf_handle->dma_base);
-}
-
-
-/*
- DAC960_CreateAuxiliaryStructures allocates and initializes the auxiliary
- data structures for Controller. It returns true on success and false on
- failure.
-*/
-
-static bool DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
-{
- int CommandAllocationLength, CommandAllocationGroupSize;
- int CommandsRemaining = 0, CommandIdentifier, CommandGroupByteCount;
- void *AllocationPointer = NULL;
- void *ScatterGatherCPU = NULL;
- dma_addr_t ScatterGatherDMA;
- struct dma_pool *ScatterGatherPool;
- void *RequestSenseCPU = NULL;
- dma_addr_t RequestSenseDMA;
- struct dma_pool *RequestSensePool = NULL;
-
- if (Controller->FirmwareType == DAC960_V1_Controller)
- {
- CommandAllocationLength = offsetof(DAC960_Command_T, V1.EndMarker);
- CommandAllocationGroupSize = DAC960_V1_CommandAllocationGroupSize;
- ScatterGatherPool = dma_pool_create("DAC960_V1_ScatterGather",
- &Controller->PCIDevice->dev,
- DAC960_V1_ScatterGatherLimit * sizeof(DAC960_V1_ScatterGatherSegment_T),
- sizeof(DAC960_V1_ScatterGatherSegment_T), 0);
- if (ScatterGatherPool == NULL)
- return DAC960_Failure(Controller,
- "AUXILIARY STRUCTURE CREATION (SG)");
- Controller->ScatterGatherPool = ScatterGatherPool;
- }
- else
- {
- CommandAllocationLength = offsetof(DAC960_Command_T, V2.EndMarker);
- CommandAllocationGroupSize = DAC960_V2_CommandAllocationGroupSize;
- ScatterGatherPool = dma_pool_create("DAC960_V2_ScatterGather",
- &Controller->PCIDevice->dev,
- DAC960_V2_ScatterGatherLimit * sizeof(DAC960_V2_ScatterGatherSegment_T),
- sizeof(DAC960_V2_ScatterGatherSegment_T), 0);
- if (ScatterGatherPool == NULL)
- return DAC960_Failure(Controller,
- "AUXILIARY STRUCTURE CREATION (SG)");
- RequestSensePool = dma_pool_create("DAC960_V2_RequestSense",
- &Controller->PCIDevice->dev, sizeof(DAC960_SCSI_RequestSense_T),
- sizeof(int), 0);
- if (RequestSensePool == NULL) {
- dma_pool_destroy(ScatterGatherPool);
- return DAC960_Failure(Controller,
- "AUXILIARY STRUCTURE CREATION (SG)");
- }
- Controller->ScatterGatherPool = ScatterGatherPool;
- Controller->V2.RequestSensePool = RequestSensePool;
- }
- Controller->CommandAllocationGroupSize = CommandAllocationGroupSize;
- Controller->FreeCommands = NULL;
- for (CommandIdentifier = 1;
- CommandIdentifier <= Controller->DriverQueueDepth;
- CommandIdentifier++)
- {
- DAC960_Command_T *Command;
- if (--CommandsRemaining <= 0)
- {
- CommandsRemaining =
- Controller->DriverQueueDepth - CommandIdentifier + 1;
- if (CommandsRemaining > CommandAllocationGroupSize)
- CommandsRemaining = CommandAllocationGroupSize;
- CommandGroupByteCount =
- CommandsRemaining * CommandAllocationLength;
- AllocationPointer = kzalloc(CommandGroupByteCount, GFP_ATOMIC);
- if (AllocationPointer == NULL)
- return DAC960_Failure(Controller,
- "AUXILIARY STRUCTURE CREATION");
- }
- Command = (DAC960_Command_T *) AllocationPointer;
- AllocationPointer += CommandAllocationLength;
- Command->CommandIdentifier = CommandIdentifier;
- Command->Controller = Controller;
- Command->Next = Controller->FreeCommands;
- Controller->FreeCommands = Command;
- Controller->Commands[CommandIdentifier-1] = Command;
- ScatterGatherCPU = dma_pool_alloc(ScatterGatherPool, GFP_ATOMIC,
- &ScatterGatherDMA);
- if (ScatterGatherCPU == NULL)
- return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION");
-
- if (RequestSensePool != NULL) {
- RequestSenseCPU = dma_pool_alloc(RequestSensePool, GFP_ATOMIC,
- &RequestSenseDMA);
- if (RequestSenseCPU == NULL) {
- dma_pool_free(ScatterGatherPool, ScatterGatherCPU,
- ScatterGatherDMA);
- return DAC960_Failure(Controller,
- "AUXILIARY STRUCTURE CREATION");
- }
- }
- if (Controller->FirmwareType == DAC960_V1_Controller) {
- Command->cmd_sglist = Command->V1.ScatterList;
- Command->V1.ScatterGatherList =
- (DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU;
- Command->V1.ScatterGatherListDMA = ScatterGatherDMA;
- sg_init_table(Command->cmd_sglist, DAC960_V1_ScatterGatherLimit);
- } else {
- Command->cmd_sglist = Command->V2.ScatterList;
- Command->V2.ScatterGatherList =
- (DAC960_V2_ScatterGatherSegment_T *)ScatterGatherCPU;
- Command->V2.ScatterGatherListDMA = ScatterGatherDMA;
- Command->V2.RequestSense =
- (DAC960_SCSI_RequestSense_T *)RequestSenseCPU;
- Command->V2.RequestSenseDMA = RequestSenseDMA;
- sg_init_table(Command->cmd_sglist, DAC960_V2_ScatterGatherLimit);
- }
- }
- return true;
-}
-
-
-/*
- DAC960_DestroyAuxiliaryStructures deallocates the auxiliary data
- structures for Controller.
-*/
-
-static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller)
-{
- int i;
- struct dma_pool *ScatterGatherPool = Controller->ScatterGatherPool;
- struct dma_pool *RequestSensePool = NULL;
- void *ScatterGatherCPU;
- dma_addr_t ScatterGatherDMA;
- void *RequestSenseCPU;
- dma_addr_t RequestSenseDMA;
- DAC960_Command_T *CommandGroup = NULL;
-
-
- if (Controller->FirmwareType == DAC960_V2_Controller)
- RequestSensePool = Controller->V2.RequestSensePool;
-
- Controller->FreeCommands = NULL;
- for (i = 0; i < Controller->DriverQueueDepth; i++)
- {
- DAC960_Command_T *Command = Controller->Commands[i];
-
- if (Command == NULL)
- continue;
-
- if (Controller->FirmwareType == DAC960_V1_Controller) {
- ScatterGatherCPU = (void *)Command->V1.ScatterGatherList;
- ScatterGatherDMA = Command->V1.ScatterGatherListDMA;
- RequestSenseCPU = NULL;
- RequestSenseDMA = (dma_addr_t)0;
- } else {
- ScatterGatherCPU = (void *)Command->V2.ScatterGatherList;
- ScatterGatherDMA = Command->V2.ScatterGatherListDMA;
- RequestSenseCPU = (void *)Command->V2.RequestSense;
- RequestSenseDMA = Command->V2.RequestSenseDMA;
- }
- if (ScatterGatherCPU != NULL)
- dma_pool_free(ScatterGatherPool, ScatterGatherCPU, ScatterGatherDMA);
- if (RequestSenseCPU != NULL)
- dma_pool_free(RequestSensePool, RequestSenseCPU, RequestSenseDMA);
-
- if ((Command->CommandIdentifier
- % Controller->CommandAllocationGroupSize) == 1) {
- /*
- * We can't free the group of commands until all of the
- * request sense and scatter gather dma structures are free.
- * Remember the beginning of the group, but don't free it
- * until we've reached the beginning of the next group.
- */
- kfree(CommandGroup);
- CommandGroup = Command;
- }
- Controller->Commands[i] = NULL;
- }
- kfree(CommandGroup);
-
- if (Controller->CombinedStatusBuffer != NULL)
- {
- kfree(Controller->CombinedStatusBuffer);
- Controller->CombinedStatusBuffer = NULL;
- Controller->CurrentStatusBuffer = NULL;
- }
-
- dma_pool_destroy(ScatterGatherPool);
- if (Controller->FirmwareType == DAC960_V1_Controller)
- return;
-
- dma_pool_destroy(RequestSensePool);
-
- for (i = 0; i < DAC960_MaxLogicalDrives; i++) {
- kfree(Controller->V2.LogicalDeviceInformation[i]);
- Controller->V2.LogicalDeviceInformation[i] = NULL;
- }
-
- for (i = 0; i < DAC960_V2_MaxPhysicalDevices; i++)
- {
- kfree(Controller->V2.PhysicalDeviceInformation[i]);
- Controller->V2.PhysicalDeviceInformation[i] = NULL;
- kfree(Controller->V2.InquiryUnitSerialNumber[i]);
- Controller->V2.InquiryUnitSerialNumber[i] = NULL;
- }
-}
-
-
-/*
- DAC960_V1_ClearCommand clears critical fields of Command for DAC960 V1
- Firmware Controllers.
-*/
-
-static inline void DAC960_V1_ClearCommand(DAC960_Command_T *Command)
-{
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- memset(CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
- Command->V1.CommandStatus = 0;
-}
-
-
-/*
- DAC960_V2_ClearCommand clears critical fields of Command for DAC960 V2
- Firmware Controllers.
-*/
-
-static inline void DAC960_V2_ClearCommand(DAC960_Command_T *Command)
-{
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- memset(CommandMailbox, 0, sizeof(DAC960_V2_CommandMailbox_T));
- Command->V2.CommandStatus = 0;
-}
-
-
-/*
- DAC960_AllocateCommand allocates a Command structure from Controller's
- free list. During driver initialization, a special initialization command
- has been placed on the free list to guarantee that command allocation can
- never fail.
-*/
-
-static inline DAC960_Command_T *DAC960_AllocateCommand(DAC960_Controller_T
- *Controller)
-{
- DAC960_Command_T *Command = Controller->FreeCommands;
- if (Command == NULL) return NULL;
- Controller->FreeCommands = Command->Next;
- Command->Next = NULL;
- return Command;
-}
-
-
-/*
- DAC960_DeallocateCommand deallocates Command, returning it to Controller's
- free list.
-*/
-
-static inline void DAC960_DeallocateCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
-
- Command->Request = NULL;
- Command->Next = Controller->FreeCommands;
- Controller->FreeCommands = Command;
-}
-
-
-/*
- DAC960_WaitForCommand waits for a wake_up on Controller's Command Wait Queue.
-*/
-
-static void DAC960_WaitForCommand(DAC960_Controller_T *Controller)
-{
- spin_unlock_irq(&Controller->queue_lock);
- __wait_event(Controller->CommandWaitQueue, Controller->FreeCommands);
- spin_lock_irq(&Controller->queue_lock);
-}
-
-/*
- DAC960_GEM_QueueCommand queues Command for DAC960 GEM Series Controllers.
-*/
-
-static void DAC960_GEM_QueueCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandMailbox_T *NextCommandMailbox =
- Controller->V2.NextCommandMailbox;
-
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_GEM_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
-
- if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V2.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_GEM_MemoryMailboxNewCommand(ControllerBaseAddress);
-
- Controller->V2.PreviousCommandMailbox2 =
- Controller->V2.PreviousCommandMailbox1;
- Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox;
-
- if (++NextCommandMailbox > Controller->V2.LastCommandMailbox)
- NextCommandMailbox = Controller->V2.FirstCommandMailbox;
-
- Controller->V2.NextCommandMailbox = NextCommandMailbox;
-}
-
-/*
- DAC960_BA_QueueCommand queues Command for DAC960 BA Series Controllers.
-*/
-
-static void DAC960_BA_QueueCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandMailbox_T *NextCommandMailbox =
- Controller->V2.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_BA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V2.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_BA_MemoryMailboxNewCommand(ControllerBaseAddress);
- Controller->V2.PreviousCommandMailbox2 =
- Controller->V2.PreviousCommandMailbox1;
- Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V2.LastCommandMailbox)
- NextCommandMailbox = Controller->V2.FirstCommandMailbox;
- Controller->V2.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_LP_QueueCommand queues Command for DAC960 LP Series Controllers.
-*/
-
-static void DAC960_LP_QueueCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandMailbox_T *NextCommandMailbox =
- Controller->V2.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_LP_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V2.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_LP_MemoryMailboxNewCommand(ControllerBaseAddress);
- Controller->V2.PreviousCommandMailbox2 =
- Controller->V2.PreviousCommandMailbox1;
- Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V2.LastCommandMailbox)
- NextCommandMailbox = Controller->V2.FirstCommandMailbox;
- Controller->V2.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_LA_QueueCommandDualMode queues Command for DAC960 LA Series
- Controllers with Dual Mode Firmware.
-*/
-
-static void DAC960_LA_QueueCommandDualMode(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandMailbox_T *NextCommandMailbox =
- Controller->V1.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_LA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_LA_MemoryMailboxNewCommand(ControllerBaseAddress);
- Controller->V1.PreviousCommandMailbox2 =
- Controller->V1.PreviousCommandMailbox1;
- Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
- NextCommandMailbox = Controller->V1.FirstCommandMailbox;
- Controller->V1.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_LA_QueueCommandSingleMode queues Command for DAC960 LA Series
- Controllers with Single Mode Firmware.
-*/
-
-static void DAC960_LA_QueueCommandSingleMode(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandMailbox_T *NextCommandMailbox =
- Controller->V1.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_LA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_LA_HardwareMailboxNewCommand(ControllerBaseAddress);
- Controller->V1.PreviousCommandMailbox2 =
- Controller->V1.PreviousCommandMailbox1;
- Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
- NextCommandMailbox = Controller->V1.FirstCommandMailbox;
- Controller->V1.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_PG_QueueCommandDualMode queues Command for DAC960 PG Series
- Controllers with Dual Mode Firmware.
-*/
-
-static void DAC960_PG_QueueCommandDualMode(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandMailbox_T *NextCommandMailbox =
- Controller->V1.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_PG_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_PG_MemoryMailboxNewCommand(ControllerBaseAddress);
- Controller->V1.PreviousCommandMailbox2 =
- Controller->V1.PreviousCommandMailbox1;
- Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
- NextCommandMailbox = Controller->V1.FirstCommandMailbox;
- Controller->V1.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_PG_QueueCommandSingleMode queues Command for DAC960 PG Series
- Controllers with Single Mode Firmware.
-*/
-
-static void DAC960_PG_QueueCommandSingleMode(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandMailbox_T *NextCommandMailbox =
- Controller->V1.NextCommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- DAC960_PG_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
- if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
- Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
- DAC960_PG_HardwareMailboxNewCommand(ControllerBaseAddress);
- Controller->V1.PreviousCommandMailbox2 =
- Controller->V1.PreviousCommandMailbox1;
- Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
- if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
- NextCommandMailbox = Controller->V1.FirstCommandMailbox;
- Controller->V1.NextCommandMailbox = NextCommandMailbox;
-}
-
-
-/*
- DAC960_PD_QueueCommand queues Command for DAC960 PD Series Controllers.
-*/
-
-static void DAC960_PD_QueueCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- while (DAC960_PD_MailboxFullP(ControllerBaseAddress))
- udelay(1);
- DAC960_PD_WriteCommandMailbox(ControllerBaseAddress, CommandMailbox);
- DAC960_PD_NewCommand(ControllerBaseAddress);
-}
-
-
-/*
- DAC960_P_QueueCommand queues Command for DAC960 P Series Controllers.
-*/
-
-static void DAC960_P_QueueCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
- switch (CommandMailbox->Common.CommandOpcode)
- {
- case DAC960_V1_Enquiry:
- CommandMailbox->Common.CommandOpcode = DAC960_V1_Enquiry_Old;
- break;
- case DAC960_V1_GetDeviceState:
- CommandMailbox->Common.CommandOpcode = DAC960_V1_GetDeviceState_Old;
- break;
- case DAC960_V1_Read:
- CommandMailbox->Common.CommandOpcode = DAC960_V1_Read_Old;
- DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_Write:
- CommandMailbox->Common.CommandOpcode = DAC960_V1_Write_Old;
- DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_ReadWithScatterGather:
- CommandMailbox->Common.CommandOpcode =
- DAC960_V1_ReadWithScatterGather_Old;
- DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_WriteWithScatterGather:
- CommandMailbox->Common.CommandOpcode =
- DAC960_V1_WriteWithScatterGather_Old;
- DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
- break;
- default:
- break;
- }
- while (DAC960_PD_MailboxFullP(ControllerBaseAddress))
- udelay(1);
- DAC960_PD_WriteCommandMailbox(ControllerBaseAddress, CommandMailbox);
- DAC960_PD_NewCommand(ControllerBaseAddress);
-}
-
-
-/*
- DAC960_ExecuteCommand executes Command and waits for completion.
-*/
-
-static void DAC960_ExecuteCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DECLARE_COMPLETION_ONSTACK(Completion);
- unsigned long flags;
- Command->Completion = &Completion;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_QueueCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
-
- if (in_interrupt())
- return;
- wait_for_completion(&Completion);
-}
-
-
-/*
- DAC960_V1_ExecuteType3 executes a DAC960 V1 Firmware Controller Type 3
- Command and waits for completion. It returns true on success and false
- on failure.
-*/
-
-static bool DAC960_V1_ExecuteType3(DAC960_Controller_T *Controller,
- DAC960_V1_CommandOpcode_T CommandOpcode,
- dma_addr_t DataDMA)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandStatus_T CommandStatus;
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->Type3.CommandOpcode = CommandOpcode;
- CommandMailbox->Type3.BusAddress = DataDMA;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V1.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V1_NormalCompletion);
-}
-
-
-/*
- DAC960_V1_ExecuteTypeB executes a DAC960 V1 Firmware Controller Type 3B
- Command and waits for completion. It returns true on success and false
- on failure.
-*/
-
-static bool DAC960_V1_ExecuteType3B(DAC960_Controller_T *Controller,
- DAC960_V1_CommandOpcode_T CommandOpcode,
- unsigned char CommandOpcode2,
- dma_addr_t DataDMA)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandStatus_T CommandStatus;
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->Type3B.CommandOpcode = CommandOpcode;
- CommandMailbox->Type3B.CommandOpcode2 = CommandOpcode2;
- CommandMailbox->Type3B.BusAddress = DataDMA;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V1.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V1_NormalCompletion);
-}
-
-
-/*
- DAC960_V1_ExecuteType3D executes a DAC960 V1 Firmware Controller Type 3D
- Command and waits for completion. It returns true on success and false
- on failure.
-*/
-
-static bool DAC960_V1_ExecuteType3D(DAC960_Controller_T *Controller,
- DAC960_V1_CommandOpcode_T CommandOpcode,
- unsigned char Channel,
- unsigned char TargetID,
- dma_addr_t DataDMA)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandStatus_T CommandStatus;
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->Type3D.CommandOpcode = CommandOpcode;
- CommandMailbox->Type3D.Channel = Channel;
- CommandMailbox->Type3D.TargetID = TargetID;
- CommandMailbox->Type3D.BusAddress = DataDMA;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V1.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V1_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_GeneralInfo executes a DAC960 V2 Firmware General Information
- Reading IOCTL Command and waits for completion. It returns true on success
- and false on failure.
-
- Return data in The controller's HealthStatusBuffer, which is dma-able memory
-*/
-
-static bool DAC960_V2_GeneralInfo(DAC960_Controller_T *Controller)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->Common.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->Common.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->Common.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->Common.DataTransferSize = sizeof(DAC960_V2_HealthStatusBuffer_T);
- CommandMailbox->Common.IOCTL_Opcode = DAC960_V2_GetHealthStatus;
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.HealthStatusBufferDMA;
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->Common.DataTransferSize;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_ControllerInfo executes a DAC960 V2 Firmware Controller
- Information Reading IOCTL Command and waits for completion. It returns
- true on success and false on failure.
-
- Data is returned in the controller's V2.NewControllerInformation dma-able
- memory buffer.
-*/
-
-static bool DAC960_V2_NewControllerInfo(DAC960_Controller_T *Controller)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->ControllerInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->ControllerInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->ControllerInfo.DataTransferSize = sizeof(DAC960_V2_ControllerInfo_T);
- CommandMailbox->ControllerInfo.ControllerNumber = 0;
- CommandMailbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo;
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewControllerInformationDMA;
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->ControllerInfo.DataTransferSize;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_LogicalDeviceInfo executes a DAC960 V2 Firmware Controller Logical
- Device Information Reading IOCTL Command and waits for completion. It
- returns true on success and false on failure.
-
- Data is returned in the controller's V2.NewLogicalDeviceInformation
-*/
-
-static bool DAC960_V2_NewLogicalDeviceInfo(DAC960_Controller_T *Controller,
- unsigned short LogicalDeviceNumber)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
-
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->LogicalDeviceInfo.CommandOpcode =
- DAC960_V2_IOCTL;
- CommandMailbox->LogicalDeviceInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->LogicalDeviceInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->LogicalDeviceInfo.DataTransferSize =
- sizeof(DAC960_V2_LogicalDeviceInfo_T);
- CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode = DAC960_V2_GetLogicalDeviceInfoValid;
- CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewLogicalDeviceInformationDMA;
- CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->LogicalDeviceInfo.DataTransferSize;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_PhysicalDeviceInfo executes a DAC960 V2 Firmware Controller "Read
- Physical Device Information" IOCTL Command and waits for completion. It
- returns true on success and false on failure.
-
- The Channel, TargetID, LogicalUnit arguments should be 0 the first time
- this function is called for a given controller. This will return data
- for the "first" device on that controller. The returned data includes a
- Channel, TargetID, LogicalUnit that can be passed in to this routine to
- get data for the NEXT device on that controller.
-
- Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able
- memory buffer.
-
-*/
-
-static bool DAC960_V2_NewPhysicalDeviceInfo(DAC960_Controller_T *Controller,
- unsigned char Channel,
- unsigned char TargetID,
- unsigned char LogicalUnit)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
-
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->PhysicalDeviceInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->PhysicalDeviceInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
- sizeof(DAC960_V2_PhysicalDeviceInfo_T);
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit = LogicalUnit;
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID;
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel;
- CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_GetPhysicalDeviceInfoValid;
- CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewPhysicalDeviceInformationDMA;
- CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->PhysicalDeviceInfo.DataTransferSize;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-static void DAC960_V2_ConstructNewUnitSerialNumber(
- DAC960_Controller_T *Controller,
- DAC960_V2_CommandMailbox_T *CommandMailbox, int Channel, int TargetID,
- int LogicalUnit)
-{
- CommandMailbox->SCSI_10.CommandOpcode = DAC960_V2_SCSI_10_Passthru;
- CommandMailbox->SCSI_10.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->SCSI_10.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->SCSI_10.DataTransferSize =
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- CommandMailbox->SCSI_10.PhysicalDevice.LogicalUnit = LogicalUnit;
- CommandMailbox->SCSI_10.PhysicalDevice.TargetID = TargetID;
- CommandMailbox->SCSI_10.PhysicalDevice.Channel = Channel;
- CommandMailbox->SCSI_10.CDBLength = 6;
- CommandMailbox->SCSI_10.SCSI_CDB[0] = 0x12; /* INQUIRY */
- CommandMailbox->SCSI_10.SCSI_CDB[1] = 1; /* EVPD = 1 */
- CommandMailbox->SCSI_10.SCSI_CDB[2] = 0x80; /* Page Code */
- CommandMailbox->SCSI_10.SCSI_CDB[3] = 0; /* Reserved */
- CommandMailbox->SCSI_10.SCSI_CDB[4] =
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- CommandMailbox->SCSI_10.SCSI_CDB[5] = 0; /* Control */
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewInquiryUnitSerialNumberDMA;
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->SCSI_10.DataTransferSize;
-}
-
-
-/*
- DAC960_V2_NewUnitSerialNumber executes an SCSI pass-through
- Inquiry command to a SCSI device identified by Channel number,
- Target id, Logical Unit Number. This function Waits for completion
- of the command.
-
- The return data includes Unit Serial Number information for the
- specified device.
-
- Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able
- memory buffer.
-*/
-
-static bool DAC960_V2_NewInquiryUnitSerialNumber(DAC960_Controller_T *Controller,
- int Channel, int TargetID, int LogicalUnit)
-{
- DAC960_Command_T *Command;
- DAC960_V2_CommandMailbox_T *CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
-
- Command = DAC960_AllocateCommand(Controller);
- CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
-
- DAC960_V2_ConstructNewUnitSerialNumber(Controller, CommandMailbox,
- Channel, TargetID, LogicalUnit);
-
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_DeviceOperation executes a DAC960 V2 Firmware Controller Device
- Operation IOCTL Command and waits for completion. It returns true on
- success and false on failure.
-*/
-
-static bool DAC960_V2_DeviceOperation(DAC960_Controller_T *Controller,
- DAC960_V2_IOCTL_Opcode_T IOCTL_Opcode,
- DAC960_V2_OperationDevice_T
- OperationDevice)
-{
- DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox->DeviceOperation.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->DeviceOperation.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->DeviceOperation.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->DeviceOperation.IOCTL_Opcode = IOCTL_Opcode;
- CommandMailbox->DeviceOperation.OperationDevice = OperationDevice;
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- DAC960_DeallocateCommand(Command);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V1_EnableMemoryMailboxInterface enables the Memory Mailbox Interface
- for DAC960 V1 Firmware Controllers.
-
- PD and P controller types have no memory mailbox, but still need the
- other dma mapped memory.
-*/
-
-static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T
- *Controller)
-{
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_HardwareType_T hw_type = Controller->HardwareType;
- struct pci_dev *PCI_Device = Controller->PCIDevice;
- struct dma_loaf *DmaPages = &Controller->DmaPages;
- size_t DmaPagesSize;
- size_t CommandMailboxesSize;
- size_t StatusMailboxesSize;
-
- DAC960_V1_CommandMailbox_T *CommandMailboxesMemory;
- dma_addr_t CommandMailboxesMemoryDMA;
-
- DAC960_V1_StatusMailbox_T *StatusMailboxesMemory;
- dma_addr_t StatusMailboxesMemoryDMA;
-
- DAC960_V1_CommandMailbox_T CommandMailbox;
- DAC960_V1_CommandStatus_T CommandStatus;
- int TimeoutCounter;
- int i;
-
- memset(&CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
-
- if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32)))
- return DAC960_Failure(Controller, "DMA mask out of range");
-
- if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) {
- CommandMailboxesSize = 0;
- StatusMailboxesSize = 0;
- } else {
- CommandMailboxesSize = DAC960_V1_CommandMailboxCount * sizeof(DAC960_V1_CommandMailbox_T);
- StatusMailboxesSize = DAC960_V1_StatusMailboxCount * sizeof(DAC960_V1_StatusMailbox_T);
- }
- DmaPagesSize = CommandMailboxesSize + StatusMailboxesSize +
- sizeof(DAC960_V1_DCDB_T) + sizeof(DAC960_V1_Enquiry_T) +
- sizeof(DAC960_V1_ErrorTable_T) + sizeof(DAC960_V1_EventLogEntry_T) +
- sizeof(DAC960_V1_RebuildProgress_T) +
- sizeof(DAC960_V1_LogicalDriveInformationArray_T) +
- sizeof(DAC960_V1_BackgroundInitializationStatus_T) +
- sizeof(DAC960_V1_DeviceState_T) + sizeof(DAC960_SCSI_Inquiry_T) +
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
-
- if (!init_dma_loaf(PCI_Device, DmaPages, DmaPagesSize))
- return false;
-
-
- if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller))
- goto skip_mailboxes;
-
- CommandMailboxesMemory = slice_dma_loaf(DmaPages,
- CommandMailboxesSize, &CommandMailboxesMemoryDMA);
-
- /* These are the base addresses for the command memory mailbox array */
- Controller->V1.FirstCommandMailbox = CommandMailboxesMemory;
- Controller->V1.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA;
-
- CommandMailboxesMemory += DAC960_V1_CommandMailboxCount - 1;
- Controller->V1.LastCommandMailbox = CommandMailboxesMemory;
- Controller->V1.NextCommandMailbox = Controller->V1.FirstCommandMailbox;
- Controller->V1.PreviousCommandMailbox1 = Controller->V1.LastCommandMailbox;
- Controller->V1.PreviousCommandMailbox2 =
- Controller->V1.LastCommandMailbox - 1;
-
- /* These are the base addresses for the status memory mailbox array */
- StatusMailboxesMemory = slice_dma_loaf(DmaPages,
- StatusMailboxesSize, &StatusMailboxesMemoryDMA);
-
- Controller->V1.FirstStatusMailbox = StatusMailboxesMemory;
- Controller->V1.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA;
- StatusMailboxesMemory += DAC960_V1_StatusMailboxCount - 1;
- Controller->V1.LastStatusMailbox = StatusMailboxesMemory;
- Controller->V1.NextStatusMailbox = Controller->V1.FirstStatusMailbox;
-
-skip_mailboxes:
- Controller->V1.MonitoringDCDB = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_DCDB_T),
- &Controller->V1.MonitoringDCDB_DMA);
-
- Controller->V1.NewEnquiry = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_Enquiry_T),
- &Controller->V1.NewEnquiryDMA);
-
- Controller->V1.NewErrorTable = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_ErrorTable_T),
- &Controller->V1.NewErrorTableDMA);
-
- Controller->V1.EventLogEntry = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_EventLogEntry_T),
- &Controller->V1.EventLogEntryDMA);
-
- Controller->V1.RebuildProgress = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_RebuildProgress_T),
- &Controller->V1.RebuildProgressDMA);
-
- Controller->V1.NewLogicalDriveInformation = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_LogicalDriveInformationArray_T),
- &Controller->V1.NewLogicalDriveInformationDMA);
-
- Controller->V1.BackgroundInitializationStatus = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_BackgroundInitializationStatus_T),
- &Controller->V1.BackgroundInitializationStatusDMA);
-
- Controller->V1.NewDeviceState = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V1_DeviceState_T),
- &Controller->V1.NewDeviceStateDMA);
-
- Controller->V1.NewInquiryStandardData = slice_dma_loaf(DmaPages,
- sizeof(DAC960_SCSI_Inquiry_T),
- &Controller->V1.NewInquiryStandardDataDMA);
-
- Controller->V1.NewInquiryUnitSerialNumber = slice_dma_loaf(DmaPages,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
- &Controller->V1.NewInquiryUnitSerialNumberDMA);
-
- if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller))
- return true;
-
- /* Enable the Memory Mailbox Interface. */
- Controller->V1.DualModeMemoryMailboxInterface = true;
- CommandMailbox.TypeX.CommandOpcode = 0x2B;
- CommandMailbox.TypeX.CommandIdentifier = 0;
- CommandMailbox.TypeX.CommandOpcode2 = 0x14;
- CommandMailbox.TypeX.CommandMailboxesBusAddress =
- Controller->V1.FirstCommandMailboxDMA;
- CommandMailbox.TypeX.StatusMailboxesBusAddress =
- Controller->V1.FirstStatusMailboxDMA;
-#define TIMEOUT_COUNT 1000000
-
- for (i = 0; i < 2; i++)
- switch (Controller->HardwareType)
- {
- case DAC960_LA_Controller:
- TimeoutCounter = TIMEOUT_COUNT;
- while (--TimeoutCounter >= 0)
- {
- if (!DAC960_LA_HardwareMailboxFullP(ControllerBaseAddress))
- break;
- udelay(10);
- }
- if (TimeoutCounter < 0) return false;
- DAC960_LA_WriteHardwareMailbox(ControllerBaseAddress, &CommandMailbox);
- DAC960_LA_HardwareMailboxNewCommand(ControllerBaseAddress);
- TimeoutCounter = TIMEOUT_COUNT;
- while (--TimeoutCounter >= 0)
- {
- if (DAC960_LA_HardwareMailboxStatusAvailableP(
- ControllerBaseAddress))
- break;
- udelay(10);
- }
- if (TimeoutCounter < 0) return false;
- CommandStatus = DAC960_LA_ReadStatusRegister(ControllerBaseAddress);
- DAC960_LA_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
- DAC960_LA_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
- if (CommandStatus == DAC960_V1_NormalCompletion) return true;
- Controller->V1.DualModeMemoryMailboxInterface = false;
- CommandMailbox.TypeX.CommandOpcode2 = 0x10;
- break;
- case DAC960_PG_Controller:
- TimeoutCounter = TIMEOUT_COUNT;
- while (--TimeoutCounter >= 0)
- {
- if (!DAC960_PG_HardwareMailboxFullP(ControllerBaseAddress))
- break;
- udelay(10);
- }
- if (TimeoutCounter < 0) return false;
- DAC960_PG_WriteHardwareMailbox(ControllerBaseAddress, &CommandMailbox);
- DAC960_PG_HardwareMailboxNewCommand(ControllerBaseAddress);
-
- TimeoutCounter = TIMEOUT_COUNT;
- while (--TimeoutCounter >= 0)
- {
- if (DAC960_PG_HardwareMailboxStatusAvailableP(
- ControllerBaseAddress))
- break;
- udelay(10);
- }
- if (TimeoutCounter < 0) return false;
- CommandStatus = DAC960_PG_ReadStatusRegister(ControllerBaseAddress);
- DAC960_PG_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
- DAC960_PG_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
- if (CommandStatus == DAC960_V1_NormalCompletion) return true;
- Controller->V1.DualModeMemoryMailboxInterface = false;
- CommandMailbox.TypeX.CommandOpcode2 = 0x10;
- break;
- default:
- DAC960_Failure(Controller, "Unknown Controller Type\n");
- break;
- }
- return false;
-}
-
-
-/*
- DAC960_V2_EnableMemoryMailboxInterface enables the Memory Mailbox Interface
- for DAC960 V2 Firmware Controllers.
-
- Aggregate the space needed for the controller's memory mailbox and
- the other data structures that will be targets of dma transfers with
- the controller. Allocate a dma-mapped region of memory to hold these
- structures. Then, save CPU pointers and dma_addr_t values to reference
- the structures that are contained in that region.
-*/
-
-static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T
- *Controller)
-{
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- struct pci_dev *PCI_Device = Controller->PCIDevice;
- struct dma_loaf *DmaPages = &Controller->DmaPages;
- size_t DmaPagesSize;
- size_t CommandMailboxesSize;
- size_t StatusMailboxesSize;
-
- DAC960_V2_CommandMailbox_T *CommandMailboxesMemory;
- dma_addr_t CommandMailboxesMemoryDMA;
-
- DAC960_V2_StatusMailbox_T *StatusMailboxesMemory;
- dma_addr_t StatusMailboxesMemoryDMA;
-
- DAC960_V2_CommandMailbox_T *CommandMailbox;
- dma_addr_t CommandMailboxDMA;
- DAC960_V2_CommandStatus_T CommandStatus;
-
- if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(64)) &&
- pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32)))
- return DAC960_Failure(Controller, "DMA mask out of range");
-
- /* This is a temporary dma mapping, used only in the scope of this function */
- CommandMailbox = pci_alloc_consistent(PCI_Device,
- sizeof(DAC960_V2_CommandMailbox_T), &CommandMailboxDMA);
- if (CommandMailbox == NULL)
- return false;
-
- CommandMailboxesSize = DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T);
- StatusMailboxesSize = DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T);
- DmaPagesSize =
- CommandMailboxesSize + StatusMailboxesSize +
- sizeof(DAC960_V2_HealthStatusBuffer_T) +
- sizeof(DAC960_V2_ControllerInfo_T) +
- sizeof(DAC960_V2_LogicalDeviceInfo_T) +
- sizeof(DAC960_V2_PhysicalDeviceInfo_T) +
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T) +
- sizeof(DAC960_V2_Event_T) +
- sizeof(DAC960_V2_PhysicalToLogicalDevice_T);
-
- if (!init_dma_loaf(PCI_Device, DmaPages, DmaPagesSize)) {
- pci_free_consistent(PCI_Device, sizeof(DAC960_V2_CommandMailbox_T),
- CommandMailbox, CommandMailboxDMA);
- return false;
- }
-
- CommandMailboxesMemory = slice_dma_loaf(DmaPages,
- CommandMailboxesSize, &CommandMailboxesMemoryDMA);
-
- /* These are the base addresses for the command memory mailbox array */
- Controller->V2.FirstCommandMailbox = CommandMailboxesMemory;
- Controller->V2.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA;
-
- CommandMailboxesMemory += DAC960_V2_CommandMailboxCount - 1;
- Controller->V2.LastCommandMailbox = CommandMailboxesMemory;
- Controller->V2.NextCommandMailbox = Controller->V2.FirstCommandMailbox;
- Controller->V2.PreviousCommandMailbox1 = Controller->V2.LastCommandMailbox;
- Controller->V2.PreviousCommandMailbox2 =
- Controller->V2.LastCommandMailbox - 1;
-
- /* These are the base addresses for the status memory mailbox array */
- StatusMailboxesMemory = slice_dma_loaf(DmaPages,
- StatusMailboxesSize, &StatusMailboxesMemoryDMA);
-
- Controller->V2.FirstStatusMailbox = StatusMailboxesMemory;
- Controller->V2.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA;
- StatusMailboxesMemory += DAC960_V2_StatusMailboxCount - 1;
- Controller->V2.LastStatusMailbox = StatusMailboxesMemory;
- Controller->V2.NextStatusMailbox = Controller->V2.FirstStatusMailbox;
-
- Controller->V2.HealthStatusBuffer = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_HealthStatusBuffer_T),
- &Controller->V2.HealthStatusBufferDMA);
-
- Controller->V2.NewControllerInformation = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_ControllerInfo_T),
- &Controller->V2.NewControllerInformationDMA);
-
- Controller->V2.NewLogicalDeviceInformation = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_LogicalDeviceInfo_T),
- &Controller->V2.NewLogicalDeviceInformationDMA);
-
- Controller->V2.NewPhysicalDeviceInformation = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_PhysicalDeviceInfo_T),
- &Controller->V2.NewPhysicalDeviceInformationDMA);
-
- Controller->V2.NewInquiryUnitSerialNumber = slice_dma_loaf(DmaPages,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
- &Controller->V2.NewInquiryUnitSerialNumberDMA);
-
- Controller->V2.Event = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_Event_T),
- &Controller->V2.EventDMA);
-
- Controller->V2.PhysicalToLogicalDevice = slice_dma_loaf(DmaPages,
- sizeof(DAC960_V2_PhysicalToLogicalDevice_T),
- &Controller->V2.PhysicalToLogicalDeviceDMA);
-
- /*
- Enable the Memory Mailbox Interface.
-
- I don't know why we can't just use one of the memory mailboxes
- we just allocated to do this, instead of using this temporary one.
- Try this change later.
- */
- memset(CommandMailbox, 0, sizeof(DAC960_V2_CommandMailbox_T));
- CommandMailbox->SetMemoryMailbox.CommandIdentifier = 1;
- CommandMailbox->SetMemoryMailbox.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->SetMemoryMailbox.CommandControlBits.NoAutoRequestSense = true;
- CommandMailbox->SetMemoryMailbox.FirstCommandMailboxSizeKB =
- (DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T)) >> 10;
- CommandMailbox->SetMemoryMailbox.FirstStatusMailboxSizeKB =
- (DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T)) >> 10;
- CommandMailbox->SetMemoryMailbox.SecondCommandMailboxSizeKB = 0;
- CommandMailbox->SetMemoryMailbox.SecondStatusMailboxSizeKB = 0;
- CommandMailbox->SetMemoryMailbox.RequestSenseSize = 0;
- CommandMailbox->SetMemoryMailbox.IOCTL_Opcode = DAC960_V2_SetMemoryMailbox;
- CommandMailbox->SetMemoryMailbox.HealthStatusBufferSizeKB = 1;
- CommandMailbox->SetMemoryMailbox.HealthStatusBufferBusAddress =
- Controller->V2.HealthStatusBufferDMA;
- CommandMailbox->SetMemoryMailbox.FirstCommandMailboxBusAddress =
- Controller->V2.FirstCommandMailboxDMA;
- CommandMailbox->SetMemoryMailbox.FirstStatusMailboxBusAddress =
- Controller->V2.FirstStatusMailboxDMA;
- switch (Controller->HardwareType)
- {
- case DAC960_GEM_Controller:
- while (DAC960_GEM_HardwareMailboxFullP(ControllerBaseAddress))
- udelay(1);
- DAC960_GEM_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA);
- DAC960_GEM_HardwareMailboxNewCommand(ControllerBaseAddress);
- while (!DAC960_GEM_HardwareMailboxStatusAvailableP(ControllerBaseAddress))
- udelay(1);
- CommandStatus = DAC960_GEM_ReadCommandStatus(ControllerBaseAddress);
- DAC960_GEM_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
- DAC960_GEM_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
- break;
- case DAC960_BA_Controller:
- while (DAC960_BA_HardwareMailboxFullP(ControllerBaseAddress))
- udelay(1);
- DAC960_BA_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA);
- DAC960_BA_HardwareMailboxNewCommand(ControllerBaseAddress);
- while (!DAC960_BA_HardwareMailboxStatusAvailableP(ControllerBaseAddress))
- udelay(1);
- CommandStatus = DAC960_BA_ReadCommandStatus(ControllerBaseAddress);
- DAC960_BA_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
- DAC960_BA_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
- break;
- case DAC960_LP_Controller:
- while (DAC960_LP_HardwareMailboxFullP(ControllerBaseAddress))
- udelay(1);
- DAC960_LP_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA);
- DAC960_LP_HardwareMailboxNewCommand(ControllerBaseAddress);
- while (!DAC960_LP_HardwareMailboxStatusAvailableP(ControllerBaseAddress))
- udelay(1);
- CommandStatus = DAC960_LP_ReadCommandStatus(ControllerBaseAddress);
- DAC960_LP_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
- DAC960_LP_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
- break;
- default:
- DAC960_Failure(Controller, "Unknown Controller Type\n");
- CommandStatus = DAC960_V2_AbormalCompletion;
- break;
- }
- pci_free_consistent(PCI_Device, sizeof(DAC960_V2_CommandMailbox_T),
- CommandMailbox, CommandMailboxDMA);
- return (CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V1_ReadControllerConfiguration reads the Configuration Information
- from DAC960 V1 Firmware Controllers and initializes the Controller structure.
-*/
-
-static bool DAC960_V1_ReadControllerConfiguration(DAC960_Controller_T
- *Controller)
-{
- DAC960_V1_Enquiry2_T *Enquiry2;
- dma_addr_t Enquiry2DMA;
- DAC960_V1_Config2_T *Config2;
- dma_addr_t Config2DMA;
- int LogicalDriveNumber, Channel, TargetID;
- struct dma_loaf local_dma;
-
- if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
- sizeof(DAC960_V1_Enquiry2_T) + sizeof(DAC960_V1_Config2_T)))
- return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION");
-
- Enquiry2 = slice_dma_loaf(&local_dma, sizeof(DAC960_V1_Enquiry2_T), &Enquiry2DMA);
- Config2 = slice_dma_loaf(&local_dma, sizeof(DAC960_V1_Config2_T), &Config2DMA);
-
- if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_Enquiry,
- Controller->V1.NewEnquiryDMA)) {
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "ENQUIRY");
- }
- memcpy(&Controller->V1.Enquiry, Controller->V1.NewEnquiry,
- sizeof(DAC960_V1_Enquiry_T));
-
- if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_Enquiry2, Enquiry2DMA)) {
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "ENQUIRY2");
- }
-
- if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_ReadConfig2, Config2DMA)) {
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "READ CONFIG2");
- }
-
- if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_GetLogicalDriveInformation,
- Controller->V1.NewLogicalDriveInformationDMA)) {
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "GET LOGICAL DRIVE INFORMATION");
- }
- memcpy(&Controller->V1.LogicalDriveInformation,
- Controller->V1.NewLogicalDriveInformation,
- sizeof(DAC960_V1_LogicalDriveInformationArray_T));
-
- for (Channel = 0; Channel < Enquiry2->ActualChannels; Channel++)
- for (TargetID = 0; TargetID < Enquiry2->MaxTargets; TargetID++) {
- if (!DAC960_V1_ExecuteType3D(Controller, DAC960_V1_GetDeviceState,
- Channel, TargetID,
- Controller->V1.NewDeviceStateDMA)) {
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "GET DEVICE STATE");
- }
- memcpy(&Controller->V1.DeviceState[Channel][TargetID],
- Controller->V1.NewDeviceState, sizeof(DAC960_V1_DeviceState_T));
- }
- /*
- Initialize the Controller Model Name and Full Model Name fields.
- */
- switch (Enquiry2->HardwareID.SubModel)
- {
- case DAC960_V1_P_PD_PU:
- if (Enquiry2->SCSICapability.BusSpeed == DAC960_V1_Ultra)
- strcpy(Controller->ModelName, "DAC960PU");
- else strcpy(Controller->ModelName, "DAC960PD");
- break;
- case DAC960_V1_PL:
- strcpy(Controller->ModelName, "DAC960PL");
- break;
- case DAC960_V1_PG:
- strcpy(Controller->ModelName, "DAC960PG");
- break;
- case DAC960_V1_PJ:
- strcpy(Controller->ModelName, "DAC960PJ");
- break;
- case DAC960_V1_PR:
- strcpy(Controller->ModelName, "DAC960PR");
- break;
- case DAC960_V1_PT:
- strcpy(Controller->ModelName, "DAC960PT");
- break;
- case DAC960_V1_PTL0:
- strcpy(Controller->ModelName, "DAC960PTL0");
- break;
- case DAC960_V1_PRL:
- strcpy(Controller->ModelName, "DAC960PRL");
- break;
- case DAC960_V1_PTL1:
- strcpy(Controller->ModelName, "DAC960PTL1");
- break;
- case DAC960_V1_1164P:
- strcpy(Controller->ModelName, "DAC1164P");
- break;
- default:
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "MODEL VERIFICATION");
- }
- strcpy(Controller->FullModelName, "Mylex ");
- strcat(Controller->FullModelName, Controller->ModelName);
- /*
- Initialize the Controller Firmware Version field and verify that it
- is a supported firmware version. The supported firmware versions are:
-
- DAC1164P 5.06 and above
- DAC960PTL/PRL/PJ/PG 4.06 and above
- DAC960PU/PD/PL 3.51 and above
- DAC960PU/PD/PL/P 2.73 and above
- */
-#if defined(CONFIG_ALPHA)
- /*
- DEC Alpha machines were often equipped with DAC960 cards that were
- OEMed from Mylex, and had their own custom firmware. Version 2.70,
- the last custom FW revision to be released by DEC for these older
- controllers, appears to work quite well with this driver.
-
- Cards tested successfully were several versions each of the PD and
- PU, called by DEC the KZPSC and KZPAC, respectively, and having
- the Manufacturer Numbers (from Mylex), usually on a sticker on the
- back of the board, of:
-
- KZPSC: D040347 (1-channel) or D040348 (2-channel) or D040349 (3-channel)
- KZPAC: D040395 (1-channel) or D040396 (2-channel) or D040397 (3-channel)
- */
-# define FIRMWARE_27X "2.70"
-#else
-# define FIRMWARE_27X "2.73"
-#endif
-
- if (Enquiry2->FirmwareID.MajorVersion == 0)
- {
- Enquiry2->FirmwareID.MajorVersion =
- Controller->V1.Enquiry.MajorFirmwareVersion;
- Enquiry2->FirmwareID.MinorVersion =
- Controller->V1.Enquiry.MinorFirmwareVersion;
- Enquiry2->FirmwareID.FirmwareType = '0';
- Enquiry2->FirmwareID.TurnID = 0;
- }
- snprintf(Controller->FirmwareVersion, sizeof(Controller->FirmwareVersion),
- "%d.%02d-%c-%02d",
- Enquiry2->FirmwareID.MajorVersion,
- Enquiry2->FirmwareID.MinorVersion,
- Enquiry2->FirmwareID.FirmwareType,
- Enquiry2->FirmwareID.TurnID);
- if (!((Controller->FirmwareVersion[0] == '5' &&
- strcmp(Controller->FirmwareVersion, "5.06") >= 0) ||
- (Controller->FirmwareVersion[0] == '4' &&
- strcmp(Controller->FirmwareVersion, "4.06") >= 0) ||
- (Controller->FirmwareVersion[0] == '3' &&
- strcmp(Controller->FirmwareVersion, "3.51") >= 0) ||
- (Controller->FirmwareVersion[0] == '2' &&
- strcmp(Controller->FirmwareVersion, FIRMWARE_27X) >= 0)))
- {
- DAC960_Failure(Controller, "FIRMWARE VERSION VERIFICATION");
- DAC960_Error("Firmware Version = '%s'\n", Controller,
- Controller->FirmwareVersion);
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return false;
- }
- /*
- Initialize the Controller Channels, Targets, Memory Size, and SAF-TE
- Enclosure Management Enabled fields.
- */
- Controller->Channels = Enquiry2->ActualChannels;
- Controller->Targets = Enquiry2->MaxTargets;
- Controller->MemorySize = Enquiry2->MemorySize >> 20;
- Controller->V1.SAFTE_EnclosureManagementEnabled =
- (Enquiry2->FaultManagementType == DAC960_V1_SAFTE);
- /*
- Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive
- Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and
- Driver Scatter/Gather Limit. The Driver Queue Depth must be at most one
- less than the Controller Queue Depth to allow for an automatic drive
- rebuild operation.
- */
- Controller->ControllerQueueDepth = Controller->V1.Enquiry.MaxCommands;
- Controller->DriverQueueDepth = Controller->ControllerQueueDepth - 1;
- if (Controller->DriverQueueDepth > DAC960_MaxDriverQueueDepth)
- Controller->DriverQueueDepth = DAC960_MaxDriverQueueDepth;
- Controller->LogicalDriveCount =
- Controller->V1.Enquiry.NumberOfLogicalDrives;
- Controller->MaxBlocksPerCommand = Enquiry2->MaxBlocksPerCommand;
- Controller->ControllerScatterGatherLimit = Enquiry2->MaxScatterGatherEntries;
- Controller->DriverScatterGatherLimit =
- Controller->ControllerScatterGatherLimit;
- if (Controller->DriverScatterGatherLimit > DAC960_V1_ScatterGatherLimit)
- Controller->DriverScatterGatherLimit = DAC960_V1_ScatterGatherLimit;
- /*
- Initialize the Stripe Size, Segment Size, and Geometry Translation.
- */
- Controller->V1.StripeSize = Config2->BlocksPerStripe * Config2->BlockFactor
- >> (10 - DAC960_BlockSizeBits);
- Controller->V1.SegmentSize = Config2->BlocksPerCacheLine * Config2->BlockFactor
- >> (10 - DAC960_BlockSizeBits);
- switch (Config2->DriveGeometry)
- {
- case DAC960_V1_Geometry_128_32:
- Controller->V1.GeometryTranslationHeads = 128;
- Controller->V1.GeometryTranslationSectors = 32;
- break;
- case DAC960_V1_Geometry_255_63:
- Controller->V1.GeometryTranslationHeads = 255;
- Controller->V1.GeometryTranslationSectors = 63;
- break;
- default:
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return DAC960_Failure(Controller, "CONFIG2 DRIVE GEOMETRY");
- }
- /*
- Initialize the Background Initialization Status.
- */
- if ((Controller->FirmwareVersion[0] == '4' &&
- strcmp(Controller->FirmwareVersion, "4.08") >= 0) ||
- (Controller->FirmwareVersion[0] == '5' &&
- strcmp(Controller->FirmwareVersion, "5.08") >= 0))
- {
- Controller->V1.BackgroundInitializationStatusSupported = true;
- DAC960_V1_ExecuteType3B(Controller,
- DAC960_V1_BackgroundInitializationControl, 0x20,
- Controller->
- V1.BackgroundInitializationStatusDMA);
- memcpy(&Controller->V1.LastBackgroundInitializationStatus,
- Controller->V1.BackgroundInitializationStatus,
- sizeof(DAC960_V1_BackgroundInitializationStatus_T));
- }
- /*
- Initialize the Logical Drive Initially Accessible flag.
- */
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < Controller->LogicalDriveCount;
- LogicalDriveNumber++)
- if (Controller->V1.LogicalDriveInformation
- [LogicalDriveNumber].LogicalDriveState !=
- DAC960_V1_LogicalDrive_Offline)
- Controller->LogicalDriveInitiallyAccessible[LogicalDriveNumber] = true;
- Controller->V1.LastRebuildStatus = DAC960_V1_NoRebuildOrCheckInProgress;
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return true;
-}
-
-
-/*
- DAC960_V2_ReadControllerConfiguration reads the Configuration Information
- from DAC960 V2 Firmware Controllers and initializes the Controller structure.
-*/
-
-static bool DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T
- *Controller)
-{
- DAC960_V2_ControllerInfo_T *ControllerInfo =
- &Controller->V2.ControllerInformation;
- unsigned short LogicalDeviceNumber = 0;
- int ModelNameLength;
-
- /* Get data into dma-able area, then copy into permanent location */
- if (!DAC960_V2_NewControllerInfo(Controller))
- return DAC960_Failure(Controller, "GET CONTROLLER INFO");
- memcpy(ControllerInfo, Controller->V2.NewControllerInformation,
- sizeof(DAC960_V2_ControllerInfo_T));
-
-
- if (!DAC960_V2_GeneralInfo(Controller))
- return DAC960_Failure(Controller, "GET HEALTH STATUS");
-
- /*
- Initialize the Controller Model Name and Full Model Name fields.
- */
- ModelNameLength = sizeof(ControllerInfo->ControllerName);
- if (ModelNameLength > sizeof(Controller->ModelName)-1)
- ModelNameLength = sizeof(Controller->ModelName)-1;
- memcpy(Controller->ModelName, ControllerInfo->ControllerName,
- ModelNameLength);
- ModelNameLength--;
- while (Controller->ModelName[ModelNameLength] == ' ' ||
- Controller->ModelName[ModelNameLength] == '\0')
- ModelNameLength--;
- Controller->ModelName[++ModelNameLength] = '\0';
- strcpy(Controller->FullModelName, "Mylex ");
- strcat(Controller->FullModelName, Controller->ModelName);
- /*
- Initialize the Controller Firmware Version field.
- */
- sprintf(Controller->FirmwareVersion, "%d.%02d-%02d",
- ControllerInfo->FirmwareMajorVersion,
- ControllerInfo->FirmwareMinorVersion,
- ControllerInfo->FirmwareTurnNumber);
- if (ControllerInfo->FirmwareMajorVersion == 6 &&
- ControllerInfo->FirmwareMinorVersion == 0 &&
- ControllerInfo->FirmwareTurnNumber < 1)
- {
- DAC960_Info("FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n",
- Controller, Controller->FirmwareVersion);
- DAC960_Info("STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n",
- Controller);
- DAC960_Info("PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
- Controller);
- }
- /*
- Initialize the Controller Channels, Targets, and Memory Size.
- */
- Controller->Channels = ControllerInfo->NumberOfPhysicalChannelsPresent;
- Controller->Targets =
- ControllerInfo->MaximumTargetsPerChannel
- [ControllerInfo->NumberOfPhysicalChannelsPresent-1];
- Controller->MemorySize = ControllerInfo->MemorySizeMB;
- /*
- Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive
- Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and
- Driver Scatter/Gather Limit. The Driver Queue Depth must be at most one
- less than the Controller Queue Depth to allow for an automatic drive
- rebuild operation.
- */
- Controller->ControllerQueueDepth = ControllerInfo->MaximumParallelCommands;
- Controller->DriverQueueDepth = Controller->ControllerQueueDepth - 1;
- if (Controller->DriverQueueDepth > DAC960_MaxDriverQueueDepth)
- Controller->DriverQueueDepth = DAC960_MaxDriverQueueDepth;
- Controller->LogicalDriveCount = ControllerInfo->LogicalDevicesPresent;
- Controller->MaxBlocksPerCommand =
- ControllerInfo->MaximumDataTransferSizeInBlocks;
- Controller->ControllerScatterGatherLimit =
- ControllerInfo->MaximumScatterGatherEntries;
- Controller->DriverScatterGatherLimit =
- Controller->ControllerScatterGatherLimit;
- if (Controller->DriverScatterGatherLimit > DAC960_V2_ScatterGatherLimit)
- Controller->DriverScatterGatherLimit = DAC960_V2_ScatterGatherLimit;
- /*
- Initialize the Logical Device Information.
- */
- while (true)
- {
- DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
- Controller->V2.NewLogicalDeviceInformation;
- DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo;
- DAC960_V2_PhysicalDevice_T PhysicalDevice;
-
- if (!DAC960_V2_NewLogicalDeviceInfo(Controller, LogicalDeviceNumber))
- break;
- LogicalDeviceNumber = NewLogicalDeviceInfo->LogicalDeviceNumber;
- if (LogicalDeviceNumber >= DAC960_MaxLogicalDrives) {
- DAC960_Error("DAC960: Logical Drive Number %d not supported\n",
- Controller, LogicalDeviceNumber);
- break;
- }
- if (NewLogicalDeviceInfo->DeviceBlockSizeInBytes != DAC960_BlockSize) {
- DAC960_Error("DAC960: Logical Drive Block Size %d not supported\n",
- Controller, NewLogicalDeviceInfo->DeviceBlockSizeInBytes);
- LogicalDeviceNumber++;
- continue;
- }
- PhysicalDevice.Controller = 0;
- PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel;
- PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID;
- PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit;
- Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] =
- PhysicalDevice;
- if (NewLogicalDeviceInfo->LogicalDeviceState !=
- DAC960_V2_LogicalDevice_Offline)
- Controller->LogicalDriveInitiallyAccessible[LogicalDeviceNumber] = true;
- LogicalDeviceInfo = kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T),
- GFP_ATOMIC);
- if (LogicalDeviceInfo == NULL)
- return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION");
- Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] =
- LogicalDeviceInfo;
- memcpy(LogicalDeviceInfo, NewLogicalDeviceInfo,
- sizeof(DAC960_V2_LogicalDeviceInfo_T));
- LogicalDeviceNumber++;
- }
- return true;
-}
-
-
-/*
- DAC960_ReportControllerConfiguration reports the Configuration Information
- for Controller.
-*/
-
-static bool DAC960_ReportControllerConfiguration(DAC960_Controller_T
- *Controller)
-{
- DAC960_Info("Configuring Mylex %s PCI RAID Controller\n",
- Controller, Controller->ModelName);
- DAC960_Info(" Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
- Controller, Controller->FirmwareVersion,
- Controller->Channels, Controller->MemorySize);
- DAC960_Info(" PCI Bus: %d, Device: %d, Function: %d, I/O Address: ",
- Controller, Controller->Bus,
- Controller->Device, Controller->Function);
- if (Controller->IO_Address == 0)
- DAC960_Info("Unassigned\n", Controller);
- else DAC960_Info("0x%X\n", Controller, Controller->IO_Address);
- DAC960_Info(" PCI Address: 0x%X mapped at 0x%lX, IRQ Channel: %d\n",
- Controller, Controller->PCI_Address,
- (unsigned long) Controller->BaseAddress,
- Controller->IRQ_Channel);
- DAC960_Info(" Controller Queue Depth: %d, "
- "Maximum Blocks per Command: %d\n",
- Controller, Controller->ControllerQueueDepth,
- Controller->MaxBlocksPerCommand);
- DAC960_Info(" Driver Queue Depth: %d, "
- "Scatter/Gather Limit: %d of %d Segments\n",
- Controller, Controller->DriverQueueDepth,
- Controller->DriverScatterGatherLimit,
- Controller->ControllerScatterGatherLimit);
- if (Controller->FirmwareType == DAC960_V1_Controller)
- {
- DAC960_Info(" Stripe Size: %dKB, Segment Size: %dKB, "
- "BIOS Geometry: %d/%d\n", Controller,
- Controller->V1.StripeSize,
- Controller->V1.SegmentSize,
- Controller->V1.GeometryTranslationHeads,
- Controller->V1.GeometryTranslationSectors);
- if (Controller->V1.SAFTE_EnclosureManagementEnabled)
- DAC960_Info(" SAF-TE Enclosure Management Enabled\n", Controller);
- }
- return true;
-}
-
-
-/*
- DAC960_V1_ReadDeviceConfiguration reads the Device Configuration Information
- for DAC960 V1 Firmware Controllers by requesting the SCSI Inquiry and SCSI
- Inquiry Unit Serial Number information for each device connected to
- Controller.
-*/
-
-static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
- *Controller)
-{
- struct dma_loaf local_dma;
-
- dma_addr_t DCDBs_dma[DAC960_V1_MaxChannels];
- DAC960_V1_DCDB_T *DCDBs_cpu[DAC960_V1_MaxChannels];
-
- dma_addr_t SCSI_Inquiry_dma[DAC960_V1_MaxChannels];
- DAC960_SCSI_Inquiry_T *SCSI_Inquiry_cpu[DAC960_V1_MaxChannels];
-
- dma_addr_t SCSI_NewInquiryUnitSerialNumberDMA[DAC960_V1_MaxChannels];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *SCSI_NewInquiryUnitSerialNumberCPU[DAC960_V1_MaxChannels];
-
- struct completion Completions[DAC960_V1_MaxChannels];
- unsigned long flags;
- int Channel, TargetID;
-
- if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
- DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
- sizeof(DAC960_SCSI_Inquiry_T) +
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T))))
- return DAC960_Failure(Controller,
- "DMA ALLOCATION FAILED IN ReadDeviceConfiguration");
-
- for (Channel = 0; Channel < Controller->Channels; Channel++) {
- DCDBs_cpu[Channel] = slice_dma_loaf(&local_dma,
- sizeof(DAC960_V1_DCDB_T), DCDBs_dma + Channel);
- SCSI_Inquiry_cpu[Channel] = slice_dma_loaf(&local_dma,
- sizeof(DAC960_SCSI_Inquiry_T),
- SCSI_Inquiry_dma + Channel);
- SCSI_NewInquiryUnitSerialNumberCPU[Channel] = slice_dma_loaf(&local_dma,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
- SCSI_NewInquiryUnitSerialNumberDMA + Channel);
- }
-
- for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
- {
- /*
- * For each channel, submit a probe for a device on that channel.
- * The timeout interval for a device that is present is 10 seconds.
- * With this approach, the timeout periods can elapse in parallel
- * on each channel.
- */
- for (Channel = 0; Channel < Controller->Channels; Channel++)
- {
- dma_addr_t NewInquiryStandardDataDMA = SCSI_Inquiry_dma[Channel];
- DAC960_V1_DCDB_T *DCDB = DCDBs_cpu[Channel];
- dma_addr_t DCDB_dma = DCDBs_dma[Channel];
- DAC960_Command_T *Command = Controller->Commands[Channel];
- struct completion *Completion = &Completions[Channel];
-
- init_completion(Completion);
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- Command->Completion = Completion;
- Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
- Command->V1.CommandMailbox.Type3.BusAddress = DCDB_dma;
- DCDB->Channel = Channel;
- DCDB->TargetID = TargetID;
- DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
- DCDB->EarlyStatus = false;
- DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
- DCDB->NoAutomaticRequestSense = false;
- DCDB->DisconnectPermitted = true;
- DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_T);
- DCDB->BusAddress = NewInquiryStandardDataDMA;
- DCDB->CDBLength = 6;
- DCDB->TransferLengthHigh4 = 0;
- DCDB->SenseLength = sizeof(DCDB->SenseData);
- DCDB->CDB[0] = 0x12; /* INQUIRY */
- DCDB->CDB[1] = 0; /* EVPD = 0 */
- DCDB->CDB[2] = 0; /* Page Code */
- DCDB->CDB[3] = 0; /* Reserved */
- DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_T);
- DCDB->CDB[5] = 0; /* Control */
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_QueueCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- }
- /*
- * Wait for the problems submitted in the previous loop
- * to complete. On the probes that are successful,
- * get the serial number of the device that was found.
- */
- for (Channel = 0; Channel < Controller->Channels; Channel++)
- {
- DAC960_SCSI_Inquiry_T *InquiryStandardData =
- &Controller->V1.InquiryStandardData[Channel][TargetID];
- DAC960_SCSI_Inquiry_T *NewInquiryStandardData = SCSI_Inquiry_cpu[Channel];
- dma_addr_t NewInquiryUnitSerialNumberDMA =
- SCSI_NewInquiryUnitSerialNumberDMA[Channel];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber =
- SCSI_NewInquiryUnitSerialNumberCPU[Channel];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- &Controller->V1.InquiryUnitSerialNumber[Channel][TargetID];
- DAC960_Command_T *Command = Controller->Commands[Channel];
- DAC960_V1_DCDB_T *DCDB = DCDBs_cpu[Channel];
- struct completion *Completion = &Completions[Channel];
-
- wait_for_completion(Completion);
-
- if (Command->V1.CommandStatus != DAC960_V1_NormalCompletion) {
- memset(InquiryStandardData, 0, sizeof(DAC960_SCSI_Inquiry_T));
- InquiryStandardData->PeripheralDeviceType = 0x1F;
- continue;
- } else
- memcpy(InquiryStandardData, NewInquiryStandardData, sizeof(DAC960_SCSI_Inquiry_T));
-
- /* Preserve Channel and TargetID values from the previous loop */
- Command->Completion = Completion;
- DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- DCDB->BusAddress = NewInquiryUnitSerialNumberDMA;
- DCDB->SenseLength = sizeof(DCDB->SenseData);
- DCDB->CDB[0] = 0x12; /* INQUIRY */
- DCDB->CDB[1] = 1; /* EVPD = 1 */
- DCDB->CDB[2] = 0x80; /* Page Code */
- DCDB->CDB[3] = 0; /* Reserved */
- DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- DCDB->CDB[5] = 0; /* Control */
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_QueueCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- wait_for_completion(Completion);
-
- if (Command->V1.CommandStatus != DAC960_V1_NormalCompletion) {
- memset(InquiryUnitSerialNumber, 0,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
- } else
- memcpy(InquiryUnitSerialNumber, NewInquiryUnitSerialNumber,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- }
- }
- free_dma_loaf(Controller->PCIDevice, &local_dma);
- return true;
-}
-
-
-/*
- DAC960_V2_ReadDeviceConfiguration reads the Device Configuration Information
- for DAC960 V2 Firmware Controllers by requesting the Physical Device
- Information and SCSI Inquiry Unit Serial Number information for each
- device connected to Controller.
-*/
-
-static bool DAC960_V2_ReadDeviceConfiguration(DAC960_Controller_T
- *Controller)
-{
- unsigned char Channel = 0, TargetID = 0, LogicalUnit = 0;
- unsigned short PhysicalDeviceIndex = 0;
-
- while (true)
- {
- DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
- Controller->V2.NewPhysicalDeviceInformation;
- DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo;
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber =
- Controller->V2.NewInquiryUnitSerialNumber;
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber;
-
- if (!DAC960_V2_NewPhysicalDeviceInfo(Controller, Channel, TargetID, LogicalUnit))
- break;
-
- PhysicalDeviceInfo = kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T),
- GFP_ATOMIC);
- if (PhysicalDeviceInfo == NULL)
- return DAC960_Failure(Controller, "PHYSICAL DEVICE ALLOCATION");
- Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex] =
- PhysicalDeviceInfo;
- memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo,
- sizeof(DAC960_V2_PhysicalDeviceInfo_T));
-
- InquiryUnitSerialNumber = kmalloc(
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), GFP_ATOMIC);
- if (InquiryUnitSerialNumber == NULL) {
- kfree(PhysicalDeviceInfo);
- return DAC960_Failure(Controller, "SERIAL NUMBER ALLOCATION");
- }
- Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex] =
- InquiryUnitSerialNumber;
-
- Channel = NewPhysicalDeviceInfo->Channel;
- TargetID = NewPhysicalDeviceInfo->TargetID;
- LogicalUnit = NewPhysicalDeviceInfo->LogicalUnit;
-
- /*
- Some devices do NOT have Unit Serial Numbers.
- This command fails for them. But, we still want to
- remember those devices are there. Construct a
- UnitSerialNumber structure for the failure case.
- */
- if (!DAC960_V2_NewInquiryUnitSerialNumber(Controller, Channel, TargetID, LogicalUnit)) {
- memset(InquiryUnitSerialNumber, 0,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
- } else
- memcpy(InquiryUnitSerialNumber, NewInquiryUnitSerialNumber,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
-
- PhysicalDeviceIndex++;
- LogicalUnit++;
- }
- return true;
-}
-
-
-/*
- DAC960_SanitizeInquiryData sanitizes the Vendor, Model, Revision, and
- Product Serial Number fields of the Inquiry Standard Data and Inquiry
- Unit Serial Number structures.
-*/
-
-static void DAC960_SanitizeInquiryData(DAC960_SCSI_Inquiry_T
- *InquiryStandardData,
- DAC960_SCSI_Inquiry_UnitSerialNumber_T
- *InquiryUnitSerialNumber,
- unsigned char *Vendor,
- unsigned char *Model,
- unsigned char *Revision,
- unsigned char *SerialNumber)
-{
- int SerialNumberLength, i;
- if (InquiryStandardData->PeripheralDeviceType == 0x1F) return;
- for (i = 0; i < sizeof(InquiryStandardData->VendorIdentification); i++)
- {
- unsigned char VendorCharacter =
- InquiryStandardData->VendorIdentification[i];
- Vendor[i] = (VendorCharacter >= ' ' && VendorCharacter <= '~'
- ? VendorCharacter : ' ');
- }
- Vendor[sizeof(InquiryStandardData->VendorIdentification)] = '\0';
- for (i = 0; i < sizeof(InquiryStandardData->ProductIdentification); i++)
- {
- unsigned char ModelCharacter =
- InquiryStandardData->ProductIdentification[i];
- Model[i] = (ModelCharacter >= ' ' && ModelCharacter <= '~'
- ? ModelCharacter : ' ');
- }
- Model[sizeof(InquiryStandardData->ProductIdentification)] = '\0';
- for (i = 0; i < sizeof(InquiryStandardData->ProductRevisionLevel); i++)
- {
- unsigned char RevisionCharacter =
- InquiryStandardData->ProductRevisionLevel[i];
- Revision[i] = (RevisionCharacter >= ' ' && RevisionCharacter <= '~'
- ? RevisionCharacter : ' ');
- }
- Revision[sizeof(InquiryStandardData->ProductRevisionLevel)] = '\0';
- if (InquiryUnitSerialNumber->PeripheralDeviceType == 0x1F) return;
- SerialNumberLength = InquiryUnitSerialNumber->PageLength;
- if (SerialNumberLength >
- sizeof(InquiryUnitSerialNumber->ProductSerialNumber))
- SerialNumberLength = sizeof(InquiryUnitSerialNumber->ProductSerialNumber);
- for (i = 0; i < SerialNumberLength; i++)
- {
- unsigned char SerialNumberCharacter =
- InquiryUnitSerialNumber->ProductSerialNumber[i];
- SerialNumber[i] =
- (SerialNumberCharacter >= ' ' && SerialNumberCharacter <= '~'
- ? SerialNumberCharacter : ' ');
- }
- SerialNumber[SerialNumberLength] = '\0';
-}
-
-
-/*
- DAC960_V1_ReportDeviceConfiguration reports the Device Configuration
- Information for DAC960 V1 Firmware Controllers.
-*/
-
-static bool DAC960_V1_ReportDeviceConfiguration(DAC960_Controller_T
- *Controller)
-{
- int LogicalDriveNumber, Channel, TargetID;
- DAC960_Info(" Physical Devices:\n", Controller);
- for (Channel = 0; Channel < Controller->Channels; Channel++)
- for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
- {
- DAC960_SCSI_Inquiry_T *InquiryStandardData =
- &Controller->V1.InquiryStandardData[Channel][TargetID];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- &Controller->V1.InquiryUnitSerialNumber[Channel][TargetID];
- DAC960_V1_DeviceState_T *DeviceState =
- &Controller->V1.DeviceState[Channel][TargetID];
- DAC960_V1_ErrorTableEntry_T *ErrorEntry =
- &Controller->V1.ErrorTable.ErrorTableEntries[Channel][TargetID];
- char Vendor[1+sizeof(InquiryStandardData->VendorIdentification)];
- char Model[1+sizeof(InquiryStandardData->ProductIdentification)];
- char Revision[1+sizeof(InquiryStandardData->ProductRevisionLevel)];
- char SerialNumber[1+sizeof(InquiryUnitSerialNumber
- ->ProductSerialNumber)];
- if (InquiryStandardData->PeripheralDeviceType == 0x1F) continue;
- DAC960_SanitizeInquiryData(InquiryStandardData, InquiryUnitSerialNumber,
- Vendor, Model, Revision, SerialNumber);
- DAC960_Info(" %d:%d%s Vendor: %s Model: %s Revision: %s\n",
- Controller, Channel, TargetID, (TargetID < 10 ? " " : ""),
- Vendor, Model, Revision);
- if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F)
- DAC960_Info(" Serial Number: %s\n", Controller, SerialNumber);
- if (DeviceState->Present &&
- DeviceState->DeviceType == DAC960_V1_DiskType)
- {
- if (Controller->V1.DeviceResetCount[Channel][TargetID] > 0)
- DAC960_Info(" Disk Status: %s, %u blocks, %d resets\n",
- Controller,
- (DeviceState->DeviceState == DAC960_V1_Device_Dead
- ? "Dead"
- : DeviceState->DeviceState
- == DAC960_V1_Device_WriteOnly
- ? "Write-Only"
- : DeviceState->DeviceState
- == DAC960_V1_Device_Online
- ? "Online" : "Standby"),
- DeviceState->DiskSize,
- Controller->V1.DeviceResetCount[Channel][TargetID]);
- else
- DAC960_Info(" Disk Status: %s, %u blocks\n", Controller,
- (DeviceState->DeviceState == DAC960_V1_Device_Dead
- ? "Dead"
- : DeviceState->DeviceState
- == DAC960_V1_Device_WriteOnly
- ? "Write-Only"
- : DeviceState->DeviceState
- == DAC960_V1_Device_Online
- ? "Online" : "Standby"),
- DeviceState->DiskSize);
- }
- if (ErrorEntry->ParityErrorCount > 0 ||
- ErrorEntry->SoftErrorCount > 0 ||
- ErrorEntry->HardErrorCount > 0 ||
- ErrorEntry->MiscErrorCount > 0)
- DAC960_Info(" Errors - Parity: %d, Soft: %d, "
- "Hard: %d, Misc: %d\n", Controller,
- ErrorEntry->ParityErrorCount,
- ErrorEntry->SoftErrorCount,
- ErrorEntry->HardErrorCount,
- ErrorEntry->MiscErrorCount);
- }
- DAC960_Info(" Logical Drives:\n", Controller);
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < Controller->LogicalDriveCount;
- LogicalDriveNumber++)
- {
- DAC960_V1_LogicalDriveInformation_T *LogicalDriveInformation =
- &Controller->V1.LogicalDriveInformation[LogicalDriveNumber];
- DAC960_Info(" /dev/rd/c%dd%d: RAID-%d, %s, %u blocks, %s\n",
- Controller, Controller->ControllerNumber, LogicalDriveNumber,
- LogicalDriveInformation->RAIDLevel,
- (LogicalDriveInformation->LogicalDriveState
- == DAC960_V1_LogicalDrive_Online
- ? "Online"
- : LogicalDriveInformation->LogicalDriveState
- == DAC960_V1_LogicalDrive_Critical
- ? "Critical" : "Offline"),
- LogicalDriveInformation->LogicalDriveSize,
- (LogicalDriveInformation->WriteBack
- ? "Write Back" : "Write Thru"));
- }
- return true;
-}
-
-
-/*
- DAC960_V2_ReportDeviceConfiguration reports the Device Configuration
- Information for DAC960 V2 Firmware Controllers.
-*/
-
-static bool DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T
- *Controller)
-{
- int PhysicalDeviceIndex, LogicalDriveNumber;
- DAC960_Info(" Physical Devices:\n", Controller);
- for (PhysicalDeviceIndex = 0;
- PhysicalDeviceIndex < DAC960_V2_MaxPhysicalDevices;
- PhysicalDeviceIndex++)
- {
- DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
- Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
- DAC960_SCSI_Inquiry_T *InquiryStandardData =
- (DAC960_SCSI_Inquiry_T *) &PhysicalDeviceInfo->SCSI_InquiryData;
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
- char Vendor[1+sizeof(InquiryStandardData->VendorIdentification)];
- char Model[1+sizeof(InquiryStandardData->ProductIdentification)];
- char Revision[1+sizeof(InquiryStandardData->ProductRevisionLevel)];
- char SerialNumber[1+sizeof(InquiryUnitSerialNumber->ProductSerialNumber)];
- if (PhysicalDeviceInfo == NULL) break;
- DAC960_SanitizeInquiryData(InquiryStandardData, InquiryUnitSerialNumber,
- Vendor, Model, Revision, SerialNumber);
- DAC960_Info(" %d:%d%s Vendor: %s Model: %s Revision: %s\n",
- Controller,
- PhysicalDeviceInfo->Channel,
- PhysicalDeviceInfo->TargetID,
- (PhysicalDeviceInfo->TargetID < 10 ? " " : ""),
- Vendor, Model, Revision);
- if (PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers == 0)
- DAC960_Info(" %sAsynchronous\n", Controller,
- (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16
- ? "Wide " :""));
- else
- DAC960_Info(" %sSynchronous at %d MB/sec\n", Controller,
- (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16
- ? "Wide " :""),
- (PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers
- * PhysicalDeviceInfo->NegotiatedDataWidthBits/8));
- if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F)
- DAC960_Info(" Serial Number: %s\n", Controller, SerialNumber);
- if (PhysicalDeviceInfo->PhysicalDeviceState ==
- DAC960_V2_Device_Unconfigured)
- continue;
- DAC960_Info(" Disk Status: %s, %u blocks\n", Controller,
- (PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Online
- ? "Online"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Rebuild
- ? "Rebuild"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Missing
- ? "Missing"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Critical
- ? "Critical"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Dead
- ? "Dead"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_SuspectedDead
- ? "Suspected-Dead"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_CommandedOffline
- ? "Commanded-Offline"
- : PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Standby
- ? "Standby" : "Unknown"),
- PhysicalDeviceInfo->ConfigurableDeviceSize);
- if (PhysicalDeviceInfo->ParityErrors == 0 &&
- PhysicalDeviceInfo->SoftErrors == 0 &&
- PhysicalDeviceInfo->HardErrors == 0 &&
- PhysicalDeviceInfo->MiscellaneousErrors == 0 &&
- PhysicalDeviceInfo->CommandTimeouts == 0 &&
- PhysicalDeviceInfo->Retries == 0 &&
- PhysicalDeviceInfo->Aborts == 0 &&
- PhysicalDeviceInfo->PredictedFailuresDetected == 0)
- continue;
- DAC960_Info(" Errors - Parity: %d, Soft: %d, "
- "Hard: %d, Misc: %d\n", Controller,
- PhysicalDeviceInfo->ParityErrors,
- PhysicalDeviceInfo->SoftErrors,
- PhysicalDeviceInfo->HardErrors,
- PhysicalDeviceInfo->MiscellaneousErrors);
- DAC960_Info(" Timeouts: %d, Retries: %d, "
- "Aborts: %d, Predicted: %d\n", Controller,
- PhysicalDeviceInfo->CommandTimeouts,
- PhysicalDeviceInfo->Retries,
- PhysicalDeviceInfo->Aborts,
- PhysicalDeviceInfo->PredictedFailuresDetected);
- }
- DAC960_Info(" Logical Drives:\n", Controller);
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < DAC960_MaxLogicalDrives;
- LogicalDriveNumber++)
- {
- DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
- Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
- static const unsigned char *ReadCacheStatus[] = {
- "Read Cache Disabled",
- "Read Cache Enabled",
- "Read Ahead Enabled",
- "Intelligent Read Ahead Enabled",
- "-", "-", "-", "-"
- };
- static const unsigned char *WriteCacheStatus[] = {
- "Write Cache Disabled",
- "Logical Device Read Only",
- "Write Cache Enabled",
- "Intelligent Write Cache Enabled",
- "-", "-", "-", "-"
- };
- unsigned char *GeometryTranslation;
- if (LogicalDeviceInfo == NULL) continue;
- switch (LogicalDeviceInfo->DriveGeometry)
- {
- case DAC960_V2_Geometry_128_32:
- GeometryTranslation = "128/32";
- break;
- case DAC960_V2_Geometry_255_63:
- GeometryTranslation = "255/63";
- break;
- default:
- GeometryTranslation = "Invalid";
- DAC960_Error("Illegal Logical Device Geometry %d\n",
- Controller, LogicalDeviceInfo->DriveGeometry);
- break;
- }
- DAC960_Info(" /dev/rd/c%dd%d: RAID-%d, %s, %u blocks\n",
- Controller, Controller->ControllerNumber, LogicalDriveNumber,
- LogicalDeviceInfo->RAIDLevel,
- (LogicalDeviceInfo->LogicalDeviceState
- == DAC960_V2_LogicalDevice_Online
- ? "Online"
- : LogicalDeviceInfo->LogicalDeviceState
- == DAC960_V2_LogicalDevice_Critical
- ? "Critical" : "Offline"),
- LogicalDeviceInfo->ConfigurableDeviceSize);
- DAC960_Info(" Logical Device %s, BIOS Geometry: %s\n",
- Controller,
- (LogicalDeviceInfo->LogicalDeviceControl
- .LogicalDeviceInitialized
- ? "Initialized" : "Uninitialized"),
- GeometryTranslation);
- if (LogicalDeviceInfo->StripeSize == 0)
- {
- if (LogicalDeviceInfo->CacheLineSize == 0)
- DAC960_Info(" Stripe Size: N/A, "
- "Segment Size: N/A\n", Controller);
- else
- DAC960_Info(" Stripe Size: N/A, "
- "Segment Size: %dKB\n", Controller,
- 1 << (LogicalDeviceInfo->CacheLineSize - 2));
- }
- else
- {
- if (LogicalDeviceInfo->CacheLineSize == 0)
- DAC960_Info(" Stripe Size: %dKB, "
- "Segment Size: N/A\n", Controller,
- 1 << (LogicalDeviceInfo->StripeSize - 2));
- else
- DAC960_Info(" Stripe Size: %dKB, "
- "Segment Size: %dKB\n", Controller,
- 1 << (LogicalDeviceInfo->StripeSize - 2),
- 1 << (LogicalDeviceInfo->CacheLineSize - 2));
- }
- DAC960_Info(" %s, %s\n", Controller,
- ReadCacheStatus[
- LogicalDeviceInfo->LogicalDeviceControl.ReadCache],
- WriteCacheStatus[
- LogicalDeviceInfo->LogicalDeviceControl.WriteCache]);
- if (LogicalDeviceInfo->SoftErrors > 0 ||
- LogicalDeviceInfo->CommandsFailed > 0 ||
- LogicalDeviceInfo->DeferredWriteErrors)
- DAC960_Info(" Errors - Soft: %d, Failed: %d, "
- "Deferred Write: %d\n", Controller,
- LogicalDeviceInfo->SoftErrors,
- LogicalDeviceInfo->CommandsFailed,
- LogicalDeviceInfo->DeferredWriteErrors);
-
- }
- return true;
-}
-
-/*
- DAC960_RegisterBlockDevice registers the Block Device structures
- associated with Controller.
-*/
-
-static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
-{
- int MajorNumber = DAC960_MAJOR + Controller->ControllerNumber;
- int n;
-
- /*
- Register the Block Device Major Number for this DAC960 Controller.
- */
- if (register_blkdev(MajorNumber, "dac960") < 0)
- return false;
-
- for (n = 0; n < DAC960_MaxLogicalDrives; n++) {
- struct gendisk *disk = Controller->disks[n];
- struct request_queue *RequestQueue;
-
- /* for now, let all request queues share controller's lock */
- RequestQueue = blk_init_queue(DAC960_RequestFunction,&Controller->queue_lock);
- if (!RequestQueue) {
- printk("DAC960: failure to allocate request queue\n");
- continue;
- }
- Controller->RequestQueue[n] = RequestQueue;
- RequestQueue->queuedata = Controller;
- blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit);
- blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
- disk->queue = RequestQueue;
- sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
- disk->major = MajorNumber;
- disk->first_minor = n << DAC960_MaxPartitionsBits;
- disk->fops = &DAC960_BlockDeviceOperations;
- }
- /*
- Indicate the Block Device Registration completed successfully,
- */
- return true;
-}
-
-
-/*
- DAC960_UnregisterBlockDevice unregisters the Block Device structures
- associated with Controller.
-*/
-
-static void DAC960_UnregisterBlockDevice(DAC960_Controller_T *Controller)
-{
- int MajorNumber = DAC960_MAJOR + Controller->ControllerNumber;
- int disk;
-
- /* does order matter when deleting gendisk and cleanup in request queue? */
- for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) {
- del_gendisk(Controller->disks[disk]);
- blk_cleanup_queue(Controller->RequestQueue[disk]);
- Controller->RequestQueue[disk] = NULL;
- }
-
- /*
- Unregister the Block Device Major Number for this DAC960 Controller.
- */
- unregister_blkdev(MajorNumber, "dac960");
-}
-
-/*
- DAC960_ComputeGenericDiskInfo computes the values for the Generic Disk
- Information Partition Sector Counts and Block Sizes.
-*/
-
-static void DAC960_ComputeGenericDiskInfo(DAC960_Controller_T *Controller)
-{
- int disk;
- for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++)
- set_capacity(Controller->disks[disk], disk_size(Controller, disk));
-}
-
-/*
- DAC960_ReportErrorStatus reports Controller BIOS Messages passed through
- the Error Status Register when the driver performs the BIOS handshaking.
- It returns true for fatal errors and false otherwise.
-*/
-
-static bool DAC960_ReportErrorStatus(DAC960_Controller_T *Controller,
- unsigned char ErrorStatus,
- unsigned char Parameter0,
- unsigned char Parameter1)
-{
- switch (ErrorStatus)
- {
- case 0x00:
- DAC960_Notice("Physical Device %d:%d Not Responding\n",
- Controller, Parameter1, Parameter0);
- break;
- case 0x08:
- if (Controller->DriveSpinUpMessageDisplayed) break;
- DAC960_Notice("Spinning Up Drives\n", Controller);
- Controller->DriveSpinUpMessageDisplayed = true;
- break;
- case 0x30:
- DAC960_Notice("Configuration Checksum Error\n", Controller);
- break;
- case 0x60:
- DAC960_Notice("Mirror Race Recovery Failed\n", Controller);
- break;
- case 0x70:
- DAC960_Notice("Mirror Race Recovery In Progress\n", Controller);
- break;
- case 0x90:
- DAC960_Notice("Physical Device %d:%d COD Mismatch\n",
- Controller, Parameter1, Parameter0);
- break;
- case 0xA0:
- DAC960_Notice("Logical Drive Installation Aborted\n", Controller);
- break;
- case 0xB0:
- DAC960_Notice("Mirror Race On A Critical Logical Drive\n", Controller);
- break;
- case 0xD0:
- DAC960_Notice("New Controller Configuration Found\n", Controller);
- break;
- case 0xF0:
- DAC960_Error("Fatal Memory Parity Error for Controller at\n", Controller);
- return true;
- default:
- DAC960_Error("Unknown Initialization Error %02X for Controller at\n",
- Controller, ErrorStatus);
- return true;
- }
- return false;
-}
-
-
-/*
- * DAC960_DetectCleanup releases the resources that were allocated
- * during DAC960_DetectController(). DAC960_DetectController can
- * has several internal failure points, so not ALL resources may
- * have been allocated. It's important to free only
- * resources that HAVE been allocated. The code below always
- * tests that the resource has been allocated before attempting to
- * free it.
- */
-static void DAC960_DetectCleanup(DAC960_Controller_T *Controller)
-{
- int i;
-
- /* Free the memory mailbox, status, and related structures */
- free_dma_loaf(Controller->PCIDevice, &Controller->DmaPages);
- if (Controller->MemoryMappedAddress) {
- switch(Controller->HardwareType)
- {
- case DAC960_GEM_Controller:
- DAC960_GEM_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_BA_Controller:
- DAC960_BA_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_LP_Controller:
- DAC960_LP_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_LA_Controller:
- DAC960_LA_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_PG_Controller:
- DAC960_PG_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_PD_Controller:
- DAC960_PD_DisableInterrupts(Controller->BaseAddress);
- break;
- case DAC960_P_Controller:
- DAC960_PD_DisableInterrupts(Controller->BaseAddress);
- break;
- }
- iounmap(Controller->MemoryMappedAddress);
- }
- if (Controller->IRQ_Channel)
- free_irq(Controller->IRQ_Channel, Controller);
- if (Controller->IO_Address)
- release_region(Controller->IO_Address, 0x80);
- pci_disable_device(Controller->PCIDevice);
- for (i = 0; (i < DAC960_MaxLogicalDrives) && Controller->disks[i]; i++)
- put_disk(Controller->disks[i]);
- DAC960_Controllers[Controller->ControllerNumber] = NULL;
- kfree(Controller);
-}
-
-
-/*
- DAC960_DetectController detects Mylex DAC960/AcceleRAID/eXtremeRAID
- PCI RAID Controllers by interrogating the PCI Configuration Space for
- Controller Type.
-*/
-
-static DAC960_Controller_T *
-DAC960_DetectController(struct pci_dev *PCI_Device,
- const struct pci_device_id *entry)
-{
- struct DAC960_privdata *privdata =
- (struct DAC960_privdata *)entry->driver_data;
- irq_handler_t InterruptHandler = privdata->InterruptHandler;
- unsigned int MemoryWindowSize = privdata->MemoryWindowSize;
- DAC960_Controller_T *Controller = NULL;
- unsigned char DeviceFunction = PCI_Device->devfn;
- unsigned char ErrorStatus, Parameter0, Parameter1;
- unsigned int IRQ_Channel;
- void __iomem *BaseAddress;
- int i;
-
- Controller = kzalloc(sizeof(DAC960_Controller_T), GFP_ATOMIC);
- if (Controller == NULL) {
- DAC960_Error("Unable to allocate Controller structure for "
- "Controller at\n", NULL);
- return NULL;
- }
- Controller->ControllerNumber = DAC960_ControllerCount;
- DAC960_Controllers[DAC960_ControllerCount++] = Controller;
- Controller->Bus = PCI_Device->bus->number;
- Controller->FirmwareType = privdata->FirmwareType;
- Controller->HardwareType = privdata->HardwareType;
- Controller->Device = DeviceFunction >> 3;
- Controller->Function = DeviceFunction & 0x7;
- Controller->PCIDevice = PCI_Device;
- strcpy(Controller->FullModelName, "DAC960");
-
- if (pci_enable_device(PCI_Device))
- goto Failure;
-
- switch (Controller->HardwareType)
- {
- case DAC960_GEM_Controller:
- Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
- break;
- case DAC960_BA_Controller:
- Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
- break;
- case DAC960_LP_Controller:
- Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
- break;
- case DAC960_LA_Controller:
- Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
- break;
- case DAC960_PG_Controller:
- Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
- break;
- case DAC960_PD_Controller:
- Controller->IO_Address = pci_resource_start(PCI_Device, 0);
- Controller->PCI_Address = pci_resource_start(PCI_Device, 1);
- break;
- case DAC960_P_Controller:
- Controller->IO_Address = pci_resource_start(PCI_Device, 0);
- Controller->PCI_Address = pci_resource_start(PCI_Device, 1);
- break;
- }
-
- pci_set_drvdata(PCI_Device, (void *)((long)Controller->ControllerNumber));
- for (i = 0; i < DAC960_MaxLogicalDrives; i++) {
- Controller->disks[i] = alloc_disk(1<<DAC960_MaxPartitionsBits);
- if (!Controller->disks[i])
- goto Failure;
- Controller->disks[i]->private_data = (void *)((long)i);
- }
- init_waitqueue_head(&Controller->CommandWaitQueue);
- init_waitqueue_head(&Controller->HealthStatusWaitQueue);
- spin_lock_init(&Controller->queue_lock);
- DAC960_AnnounceDriver(Controller);
- /*
- Map the Controller Register Window.
- */
- if (MemoryWindowSize < PAGE_SIZE)
- MemoryWindowSize = PAGE_SIZE;
- Controller->MemoryMappedAddress =
- ioremap_nocache(Controller->PCI_Address & PAGE_MASK, MemoryWindowSize);
- Controller->BaseAddress =
- Controller->MemoryMappedAddress + (Controller->PCI_Address & ~PAGE_MASK);
- if (Controller->MemoryMappedAddress == NULL)
- {
- DAC960_Error("Unable to map Controller Register Window for "
- "Controller at\n", Controller);
- goto Failure;
- }
- BaseAddress = Controller->BaseAddress;
- switch (Controller->HardwareType)
- {
- case DAC960_GEM_Controller:
- DAC960_GEM_DisableInterrupts(BaseAddress);
- DAC960_GEM_AcknowledgeHardwareMailboxStatus(BaseAddress);
- udelay(1000);
- while (DAC960_GEM_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_GEM_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V2_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to Enable Memory Mailbox Interface "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_GEM_EnableInterrupts(BaseAddress);
- Controller->QueueCommand = DAC960_GEM_QueueCommand;
- Controller->ReadControllerConfiguration =
- DAC960_V2_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V2_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V2_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V2_QueueReadWriteCommand;
- break;
- case DAC960_BA_Controller:
- DAC960_BA_DisableInterrupts(BaseAddress);
- DAC960_BA_AcknowledgeHardwareMailboxStatus(BaseAddress);
- udelay(1000);
- while (DAC960_BA_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_BA_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V2_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to Enable Memory Mailbox Interface "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_BA_EnableInterrupts(BaseAddress);
- Controller->QueueCommand = DAC960_BA_QueueCommand;
- Controller->ReadControllerConfiguration =
- DAC960_V2_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V2_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V2_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V2_QueueReadWriteCommand;
- break;
- case DAC960_LP_Controller:
- DAC960_LP_DisableInterrupts(BaseAddress);
- DAC960_LP_AcknowledgeHardwareMailboxStatus(BaseAddress);
- udelay(1000);
- while (DAC960_LP_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_LP_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V2_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to Enable Memory Mailbox Interface "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_LP_EnableInterrupts(BaseAddress);
- Controller->QueueCommand = DAC960_LP_QueueCommand;
- Controller->ReadControllerConfiguration =
- DAC960_V2_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V2_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V2_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V2_QueueReadWriteCommand;
- break;
- case DAC960_LA_Controller:
- DAC960_LA_DisableInterrupts(BaseAddress);
- DAC960_LA_AcknowledgeHardwareMailboxStatus(BaseAddress);
- udelay(1000);
- while (DAC960_LA_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_LA_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to Enable Memory Mailbox Interface "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_LA_EnableInterrupts(BaseAddress);
- if (Controller->V1.DualModeMemoryMailboxInterface)
- Controller->QueueCommand = DAC960_LA_QueueCommandDualMode;
- else Controller->QueueCommand = DAC960_LA_QueueCommandSingleMode;
- Controller->ReadControllerConfiguration =
- DAC960_V1_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V1_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V1_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V1_QueueReadWriteCommand;
- break;
- case DAC960_PG_Controller:
- DAC960_PG_DisableInterrupts(BaseAddress);
- DAC960_PG_AcknowledgeHardwareMailboxStatus(BaseAddress);
- udelay(1000);
- while (DAC960_PG_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_PG_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to Enable Memory Mailbox Interface "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_PG_EnableInterrupts(BaseAddress);
- if (Controller->V1.DualModeMemoryMailboxInterface)
- Controller->QueueCommand = DAC960_PG_QueueCommandDualMode;
- else Controller->QueueCommand = DAC960_PG_QueueCommandSingleMode;
- Controller->ReadControllerConfiguration =
- DAC960_V1_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V1_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V1_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V1_QueueReadWriteCommand;
- break;
- case DAC960_PD_Controller:
- if (!request_region(Controller->IO_Address, 0x80,
- Controller->FullModelName)) {
- DAC960_Error("IO port 0x%lx busy for Controller at\n",
- Controller, Controller->IO_Address);
- goto Failure;
- }
- DAC960_PD_DisableInterrupts(BaseAddress);
- DAC960_PD_AcknowledgeStatus(BaseAddress);
- udelay(1000);
- while (DAC960_PD_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_PD_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to allocate DMA mapped memory "
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_PD_EnableInterrupts(BaseAddress);
- Controller->QueueCommand = DAC960_PD_QueueCommand;
- Controller->ReadControllerConfiguration =
- DAC960_V1_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V1_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V1_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V1_QueueReadWriteCommand;
- break;
- case DAC960_P_Controller:
- if (!request_region(Controller->IO_Address, 0x80,
- Controller->FullModelName)){
- DAC960_Error("IO port 0x%lx busy for Controller at\n",
- Controller, Controller->IO_Address);
- goto Failure;
- }
- DAC960_PD_DisableInterrupts(BaseAddress);
- DAC960_PD_AcknowledgeStatus(BaseAddress);
- udelay(1000);
- while (DAC960_PD_InitializationInProgressP(BaseAddress))
- {
- if (DAC960_PD_ReadErrorStatus(BaseAddress, &ErrorStatus,
- &Parameter0, &Parameter1) &&
- DAC960_ReportErrorStatus(Controller, ErrorStatus,
- Parameter0, Parameter1))
- goto Failure;
- udelay(10);
- }
- if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
- {
- DAC960_Error("Unable to allocate DMA mapped memory"
- "for Controller at\n", Controller);
- goto Failure;
- }
- DAC960_PD_EnableInterrupts(BaseAddress);
- Controller->QueueCommand = DAC960_P_QueueCommand;
- Controller->ReadControllerConfiguration =
- DAC960_V1_ReadControllerConfiguration;
- Controller->ReadDeviceConfiguration =
- DAC960_V1_ReadDeviceConfiguration;
- Controller->ReportDeviceConfiguration =
- DAC960_V1_ReportDeviceConfiguration;
- Controller->QueueReadWriteCommand =
- DAC960_V1_QueueReadWriteCommand;
- break;
- }
- /*
- Acquire shared access to the IRQ Channel.
- */
- IRQ_Channel = PCI_Device->irq;
- if (request_irq(IRQ_Channel, InterruptHandler, IRQF_SHARED,
- Controller->FullModelName, Controller) < 0)
- {
- DAC960_Error("Unable to acquire IRQ Channel %d for Controller at\n",
- Controller, Controller->IRQ_Channel);
- goto Failure;
- }
- Controller->IRQ_Channel = IRQ_Channel;
- Controller->InitialCommand.CommandIdentifier = 1;
- Controller->InitialCommand.Controller = Controller;
- Controller->Commands[0] = &Controller->InitialCommand;
- Controller->FreeCommands = &Controller->InitialCommand;
- return Controller;
-
-Failure:
- if (Controller->IO_Address == 0)
- DAC960_Error("PCI Bus %d Device %d Function %d I/O Address N/A "
- "PCI Address 0x%X\n", Controller,
- Controller->Bus, Controller->Device,
- Controller->Function, Controller->PCI_Address);
- else
- DAC960_Error("PCI Bus %d Device %d Function %d I/O Address "
- "0x%X PCI Address 0x%X\n", Controller,
- Controller->Bus, Controller->Device,
- Controller->Function, Controller->IO_Address,
- Controller->PCI_Address);
- DAC960_DetectCleanup(Controller);
- DAC960_ControllerCount--;
- return NULL;
-}
-
-/*
- DAC960_InitializeController initializes Controller.
-*/
-
-static bool
-DAC960_InitializeController(DAC960_Controller_T *Controller)
-{
- if (DAC960_ReadControllerConfiguration(Controller) &&
- DAC960_ReportControllerConfiguration(Controller) &&
- DAC960_CreateAuxiliaryStructures(Controller) &&
- DAC960_ReadDeviceConfiguration(Controller) &&
- DAC960_ReportDeviceConfiguration(Controller) &&
- DAC960_RegisterBlockDevice(Controller))
- {
- /*
- Initialize the Monitoring Timer.
- */
- timer_setup(&Controller->MonitoringTimer,
- DAC960_MonitoringTimerFunction, 0);
- Controller->MonitoringTimer.expires =
- jiffies + DAC960_MonitoringTimerInterval;
- add_timer(&Controller->MonitoringTimer);
- Controller->ControllerInitialized = true;
- return true;
- }
- return false;
-}
-
-
-/*
- DAC960_FinalizeController finalizes Controller.
-*/
-
-static void DAC960_FinalizeController(DAC960_Controller_T *Controller)
-{
- if (Controller->ControllerInitialized)
- {
- unsigned long flags;
-
- /*
- * Acquiring and releasing lock here eliminates
- * a very low probability race.
- *
- * The code below allocates controller command structures
- * from the free list without holding the controller lock.
- * This is safe assuming there is no other activity on
- * the controller at the time.
- *
- * But, there might be a monitoring command still
- * in progress. Setting the Shutdown flag while holding
- * the lock ensures that there is no monitoring command
- * in the interrupt handler currently, and any monitoring
- * commands that complete from this time on will NOT return
- * their command structure to the free list.
- */
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- Controller->ShutdownMonitoringTimer = 1;
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
-
- del_timer_sync(&Controller->MonitoringTimer);
- if (Controller->FirmwareType == DAC960_V1_Controller)
- {
- DAC960_Notice("Flushing Cache...", Controller);
- DAC960_V1_ExecuteType3(Controller, DAC960_V1_Flush, 0);
- DAC960_Notice("done\n", Controller);
-
- if (Controller->HardwareType == DAC960_PD_Controller)
- release_region(Controller->IO_Address, 0x80);
- }
- else
- {
- DAC960_Notice("Flushing Cache...", Controller);
- DAC960_V2_DeviceOperation(Controller, DAC960_V2_PauseDevice,
- DAC960_V2_RAID_Controller);
- DAC960_Notice("done\n", Controller);
- }
- }
- DAC960_UnregisterBlockDevice(Controller);
- DAC960_DestroyAuxiliaryStructures(Controller);
- DAC960_DestroyProcEntries(Controller);
- DAC960_DetectCleanup(Controller);
-}
-
-
-/*
- DAC960_Probe verifies controller's existence and
- initializes the DAC960 Driver for that controller.
-*/
-
-static int
-DAC960_Probe(struct pci_dev *dev, const struct pci_device_id *entry)
-{
- int disk;
- DAC960_Controller_T *Controller;
-
- if (DAC960_ControllerCount == DAC960_MaxControllers)
- {
- DAC960_Error("More than %d DAC960 Controllers detected - "
- "ignoring from Controller at\n",
- NULL, DAC960_MaxControllers);
- return -ENODEV;
- }
-
- Controller = DAC960_DetectController(dev, entry);
- if (!Controller)
- return -ENODEV;
-
- if (!DAC960_InitializeController(Controller)) {
- DAC960_FinalizeController(Controller);
- return -ENODEV;
- }
-
- for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) {
- set_capacity(Controller->disks[disk], disk_size(Controller, disk));
- add_disk(Controller->disks[disk]);
- }
- DAC960_CreateProcEntries(Controller);
- return 0;
-}
-
-
-/*
- DAC960_Finalize finalizes the DAC960 Driver.
-*/
-
-static void DAC960_Remove(struct pci_dev *PCI_Device)
-{
- int Controller_Number = (long)pci_get_drvdata(PCI_Device);
- DAC960_Controller_T *Controller = DAC960_Controllers[Controller_Number];
- if (Controller != NULL)
- DAC960_FinalizeController(Controller);
-}
-
-
-/*
- DAC960_V1_QueueReadWriteCommand prepares and queues a Read/Write Command for
- DAC960 V1 Firmware Controllers.
-*/
-
-static void DAC960_V1_QueueReadWriteCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_ScatterGatherSegment_T *ScatterGatherList =
- Command->V1.ScatterGatherList;
- struct scatterlist *ScatterList = Command->V1.ScatterList;
-
- DAC960_V1_ClearCommand(Command);
-
- if (Command->SegmentCount == 1)
- {
- if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
- CommandMailbox->Type5.CommandOpcode = DAC960_V1_Read;
- else
- CommandMailbox->Type5.CommandOpcode = DAC960_V1_Write;
-
- CommandMailbox->Type5.LD.TransferLength = Command->BlockCount;
- CommandMailbox->Type5.LD.LogicalDriveNumber = Command->LogicalDriveNumber;
- CommandMailbox->Type5.LogicalBlockAddress = Command->BlockNumber;
- CommandMailbox->Type5.BusAddress =
- (DAC960_BusAddress32_T)sg_dma_address(ScatterList);
- }
- else
- {
- int i;
-
- if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
- CommandMailbox->Type5.CommandOpcode = DAC960_V1_ReadWithScatterGather;
- else
- CommandMailbox->Type5.CommandOpcode = DAC960_V1_WriteWithScatterGather;
-
- CommandMailbox->Type5.LD.TransferLength = Command->BlockCount;
- CommandMailbox->Type5.LD.LogicalDriveNumber = Command->LogicalDriveNumber;
- CommandMailbox->Type5.LogicalBlockAddress = Command->BlockNumber;
- CommandMailbox->Type5.BusAddress = Command->V1.ScatterGatherListDMA;
-
- CommandMailbox->Type5.ScatterGatherCount = Command->SegmentCount;
-
- for (i = 0; i < Command->SegmentCount; i++, ScatterList++, ScatterGatherList++) {
- ScatterGatherList->SegmentDataPointer =
- (DAC960_BusAddress32_T)sg_dma_address(ScatterList);
- ScatterGatherList->SegmentByteCount =
- (DAC960_ByteCount32_T)sg_dma_len(ScatterList);
- }
- }
- DAC960_QueueCommand(Command);
-}
-
-
-/*
- DAC960_V2_QueueReadWriteCommand prepares and queues a Read/Write Command for
- DAC960 V2 Firmware Controllers.
-*/
-
-static void DAC960_V2_QueueReadWriteCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- struct scatterlist *ScatterList = Command->V2.ScatterList;
-
- DAC960_V2_ClearCommand(Command);
-
- CommandMailbox->SCSI_10.CommandOpcode = DAC960_V2_SCSI_10;
- CommandMailbox->SCSI_10.CommandControlBits.DataTransferControllerToHost =
- (Command->DmaDirection == PCI_DMA_FROMDEVICE);
- CommandMailbox->SCSI_10.DataTransferSize =
- Command->BlockCount << DAC960_BlockSizeBits;
- CommandMailbox->SCSI_10.RequestSenseBusAddress = Command->V2.RequestSenseDMA;
- CommandMailbox->SCSI_10.PhysicalDevice =
- Controller->V2.LogicalDriveToVirtualDevice[Command->LogicalDriveNumber];
- CommandMailbox->SCSI_10.RequestSenseSize = sizeof(DAC960_SCSI_RequestSense_T);
- CommandMailbox->SCSI_10.CDBLength = 10;
- CommandMailbox->SCSI_10.SCSI_CDB[0] =
- (Command->DmaDirection == PCI_DMA_FROMDEVICE ? 0x28 : 0x2A);
- CommandMailbox->SCSI_10.SCSI_CDB[2] = Command->BlockNumber >> 24;
- CommandMailbox->SCSI_10.SCSI_CDB[3] = Command->BlockNumber >> 16;
- CommandMailbox->SCSI_10.SCSI_CDB[4] = Command->BlockNumber >> 8;
- CommandMailbox->SCSI_10.SCSI_CDB[5] = Command->BlockNumber;
- CommandMailbox->SCSI_10.SCSI_CDB[7] = Command->BlockCount >> 8;
- CommandMailbox->SCSI_10.SCSI_CDB[8] = Command->BlockCount;
-
- if (Command->SegmentCount == 1)
- {
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- (DAC960_BusAddress64_T)sg_dma_address(ScatterList);
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->SCSI_10.DataTransferSize;
- }
- else
- {
- DAC960_V2_ScatterGatherSegment_T *ScatterGatherList;
- int i;
-
- if (Command->SegmentCount > 2)
- {
- ScatterGatherList = Command->V2.ScatterGatherList;
- CommandMailbox->SCSI_10.CommandControlBits
- .AdditionalScatterGatherListMemory = true;
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ExtendedScatterGather.ScatterGatherList0Length = Command->SegmentCount;
- CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ExtendedScatterGather.ScatterGatherList0Address =
- Command->V2.ScatterGatherListDMA;
- }
- else
- ScatterGatherList = CommandMailbox->SCSI_10.DataTransferMemoryAddress
- .ScatterGatherSegments;
-
- for (i = 0; i < Command->SegmentCount; i++, ScatterList++, ScatterGatherList++) {
- ScatterGatherList->SegmentDataPointer =
- (DAC960_BusAddress64_T)sg_dma_address(ScatterList);
- ScatterGatherList->SegmentByteCount =
- (DAC960_ByteCount64_T)sg_dma_len(ScatterList);
- }
- }
- DAC960_QueueCommand(Command);
-}
-
-
-static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_queue *req_q)
-{
- struct request *Request;
- DAC960_Command_T *Command;
-
- while(1) {
- Request = blk_peek_request(req_q);
- if (!Request)
- return 1;
-
- Command = DAC960_AllocateCommand(Controller);
- if (Command == NULL)
- return 0;
-
- if (rq_data_dir(Request) == READ) {
- Command->DmaDirection = PCI_DMA_FROMDEVICE;
- Command->CommandType = DAC960_ReadCommand;
- } else {
- Command->DmaDirection = PCI_DMA_TODEVICE;
- Command->CommandType = DAC960_WriteCommand;
- }
- Command->Completion = Request->end_io_data;
- Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
- Command->BlockNumber = blk_rq_pos(Request);
- Command->BlockCount = blk_rq_sectors(Request);
- Command->Request = Request;
- blk_start_request(Request);
- Command->SegmentCount = blk_rq_map_sg(req_q,
- Command->Request, Command->cmd_sglist);
- /* pci_map_sg MAY change the value of SegCount */
- Command->SegmentCount = pci_map_sg(Controller->PCIDevice, Command->cmd_sglist,
- Command->SegmentCount, Command->DmaDirection);
-
- DAC960_QueueReadWriteCommand(Command);
- }
-}
-
-/*
- DAC960_ProcessRequest attempts to remove one I/O Request from Controller's
- I/O Request Queue and queues it to the Controller. WaitForCommand is true if
- this function should wait for a Command to become available if necessary.
- This function returns true if an I/O Request was queued and false otherwise.
-*/
-static void DAC960_ProcessRequest(DAC960_Controller_T *controller)
-{
- int i;
-
- if (!controller->ControllerInitialized)
- return;
-
- /* Do this better later! */
- for (i = controller->req_q_index; i < DAC960_MaxLogicalDrives; i++) {
- struct request_queue *req_q = controller->RequestQueue[i];
-
- if (req_q == NULL)
- continue;
-
- if (!DAC960_process_queue(controller, req_q)) {
- controller->req_q_index = i;
- return;
- }
- }
-
- if (controller->req_q_index == 0)
- return;
-
- for (i = 0; i < controller->req_q_index; i++) {
- struct request_queue *req_q = controller->RequestQueue[i];
-
- if (req_q == NULL)
- continue;
-
- if (!DAC960_process_queue(controller, req_q)) {
- controller->req_q_index = i;
- return;
- }
- }
-}
-
-
-/*
- DAC960_queue_partial_rw extracts one bio from the request already
- associated with argument command, and construct a new command block to retry I/O
- only on that bio. Queue that command to the controller.
-
- This function re-uses a previously-allocated Command,
- there is no failure mode from trying to allocate a command.
-*/
-
-static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- struct request *Request = Command->Request;
- struct request_queue *req_q = Controller->RequestQueue[Command->LogicalDriveNumber];
-
- if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
- Command->CommandType = DAC960_ReadRetryCommand;
- else
- Command->CommandType = DAC960_WriteRetryCommand;
-
- /*
- * We could be more efficient with these mapping requests
- * and map only the portions that we need. But since this
- * code should almost never be called, just go with a
- * simple coding.
- */
- (void)blk_rq_map_sg(req_q, Command->Request, Command->cmd_sglist);
-
- (void)pci_map_sg(Controller->PCIDevice, Command->cmd_sglist, 1, Command->DmaDirection);
- /*
- * Resubmitting the request sector at a time is really tedious.
- * But, this should almost never happen. So, we're willing to pay
- * this price so that in the end, as much of the transfer is completed
- * successfully as possible.
- */
- Command->SegmentCount = 1;
- Command->BlockNumber = blk_rq_pos(Request);
- Command->BlockCount = 1;
- DAC960_QueueReadWriteCommand(Command);
- return;
-}
-
-/*
- DAC960_RequestFunction is the I/O Request Function for DAC960 Controllers.
-*/
-
-static void DAC960_RequestFunction(struct request_queue *RequestQueue)
-{
- DAC960_ProcessRequest(RequestQueue->queuedata);
-}
-
-/*
- DAC960_ProcessCompletedBuffer performs completion processing for an
- individual Buffer.
-*/
-
-static inline bool DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
- bool SuccessfulIO)
-{
- struct request *Request = Command->Request;
- blk_status_t Error = SuccessfulIO ? BLK_STS_OK : BLK_STS_IOERR;
-
- pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
- Command->SegmentCount, Command->DmaDirection);
-
- if (!__blk_end_request(Request, Error, Command->BlockCount << 9)) {
- if (Command->Completion) {
- complete(Command->Completion);
- Command->Completion = NULL;
- }
- return true;
- }
- return false;
-}
-
-/*
- DAC960_V1_ReadWriteError prints an appropriate error message for Command
- when an error occurs on a Read or Write operation.
-*/
-
-static void DAC960_V1_ReadWriteError(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- unsigned char *CommandName = "UNKNOWN";
- switch (Command->CommandType)
- {
- case DAC960_ReadCommand:
- case DAC960_ReadRetryCommand:
- CommandName = "READ";
- break;
- case DAC960_WriteCommand:
- case DAC960_WriteRetryCommand:
- CommandName = "WRITE";
- break;
- case DAC960_MonitoringCommand:
- case DAC960_ImmediateCommand:
- case DAC960_QueuedCommand:
- break;
- }
- switch (Command->V1.CommandStatus)
- {
- case DAC960_V1_IrrecoverableDataError:
- DAC960_Error("Irrecoverable Data Error on %s:\n",
- Controller, CommandName);
- break;
- case DAC960_V1_LogicalDriveNonexistentOrOffline:
- DAC960_Error("Logical Drive Nonexistent or Offline on %s:\n",
- Controller, CommandName);
- break;
- case DAC960_V1_AccessBeyondEndOfLogicalDrive:
- DAC960_Error("Attempt to Access Beyond End of Logical Drive "
- "on %s:\n", Controller, CommandName);
- break;
- case DAC960_V1_BadDataEncountered:
- DAC960_Error("Bad Data Encountered on %s:\n", Controller, CommandName);
- break;
- default:
- DAC960_Error("Unexpected Error Status %04X on %s:\n",
- Controller, Command->V1.CommandStatus, CommandName);
- break;
- }
- DAC960_Error(" /dev/rd/c%dd%d: absolute blocks %u..%u\n",
- Controller, Controller->ControllerNumber,
- Command->LogicalDriveNumber, Command->BlockNumber,
- Command->BlockNumber + Command->BlockCount - 1);
-}
-
-
-/*
- DAC960_V1_ProcessCompletedCommand performs completion processing for Command
- for DAC960 V1 Firmware Controllers.
-*/
-
-static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_CommandType_T CommandType = Command->CommandType;
- DAC960_V1_CommandOpcode_T CommandOpcode =
- Command->V1.CommandMailbox.Common.CommandOpcode;
- DAC960_V1_CommandStatus_T CommandStatus = Command->V1.CommandStatus;
-
- if (CommandType == DAC960_ReadCommand ||
- CommandType == DAC960_WriteCommand)
- {
-
-#ifdef FORCE_RETRY_DEBUG
- CommandStatus = DAC960_V1_IrrecoverableDataError;
-#endif
-
- if (CommandStatus == DAC960_V1_NormalCompletion) {
-
- if (!DAC960_ProcessCompletedRequest(Command, true))
- BUG();
-
- } else if (CommandStatus == DAC960_V1_IrrecoverableDataError ||
- CommandStatus == DAC960_V1_BadDataEncountered)
- {
- /*
- * break the command down into pieces and resubmit each
- * piece, hoping that some of them will succeed.
- */
- DAC960_queue_partial_rw(Command);
- return;
- }
- else
- {
- if (CommandStatus != DAC960_V1_LogicalDriveNonexistentOrOffline)
- DAC960_V1_ReadWriteError(Command);
-
- if (!DAC960_ProcessCompletedRequest(Command, false))
- BUG();
- }
- }
- else if (CommandType == DAC960_ReadRetryCommand ||
- CommandType == DAC960_WriteRetryCommand)
- {
- bool normal_completion;
-#ifdef FORCE_RETRY_FAILURE_DEBUG
- static int retry_count = 1;
-#endif
- /*
- Perform completion processing for the portion that was
- retried, and submit the next portion, if any.
- */
- normal_completion = true;
- if (CommandStatus != DAC960_V1_NormalCompletion) {
- normal_completion = false;
- if (CommandStatus != DAC960_V1_LogicalDriveNonexistentOrOffline)
- DAC960_V1_ReadWriteError(Command);
- }
-
-#ifdef FORCE_RETRY_FAILURE_DEBUG
- if (!(++retry_count % 10000)) {
- printk("V1 error retry failure test\n");
- normal_completion = false;
- DAC960_V1_ReadWriteError(Command);
- }
-#endif
-
- if (!DAC960_ProcessCompletedRequest(Command, normal_completion)) {
- DAC960_queue_partial_rw(Command);
- return;
- }
- }
-
- else if (CommandType == DAC960_MonitoringCommand)
- {
- if (Controller->ShutdownMonitoringTimer)
- return;
- if (CommandOpcode == DAC960_V1_Enquiry)
- {
- DAC960_V1_Enquiry_T *OldEnquiry = &Controller->V1.Enquiry;
- DAC960_V1_Enquiry_T *NewEnquiry = Controller->V1.NewEnquiry;
- unsigned int OldCriticalLogicalDriveCount =
- OldEnquiry->CriticalLogicalDriveCount;
- unsigned int NewCriticalLogicalDriveCount =
- NewEnquiry->CriticalLogicalDriveCount;
- if (NewEnquiry->NumberOfLogicalDrives > Controller->LogicalDriveCount)
- {
- int LogicalDriveNumber = Controller->LogicalDriveCount - 1;
- while (++LogicalDriveNumber < NewEnquiry->NumberOfLogicalDrives)
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "Now Exists\n", Controller,
- LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- Controller->LogicalDriveCount = NewEnquiry->NumberOfLogicalDrives;
- DAC960_ComputeGenericDiskInfo(Controller);
- }
- if (NewEnquiry->NumberOfLogicalDrives < Controller->LogicalDriveCount)
- {
- int LogicalDriveNumber = NewEnquiry->NumberOfLogicalDrives - 1;
- while (++LogicalDriveNumber < Controller->LogicalDriveCount)
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "No Longer Exists\n", Controller,
- LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- Controller->LogicalDriveCount = NewEnquiry->NumberOfLogicalDrives;
- DAC960_ComputeGenericDiskInfo(Controller);
- }
- if (NewEnquiry->StatusFlags.DeferredWriteError !=
- OldEnquiry->StatusFlags.DeferredWriteError)
- DAC960_Critical("Deferred Write Error Flag is now %s\n", Controller,
- (NewEnquiry->StatusFlags.DeferredWriteError
- ? "TRUE" : "FALSE"));
- if ((NewCriticalLogicalDriveCount > 0 ||
- NewCriticalLogicalDriveCount != OldCriticalLogicalDriveCount) ||
- (NewEnquiry->OfflineLogicalDriveCount > 0 ||
- NewEnquiry->OfflineLogicalDriveCount !=
- OldEnquiry->OfflineLogicalDriveCount) ||
- (NewEnquiry->DeadDriveCount > 0 ||
- NewEnquiry->DeadDriveCount !=
- OldEnquiry->DeadDriveCount) ||
- (NewEnquiry->EventLogSequenceNumber !=
- OldEnquiry->EventLogSequenceNumber) ||
- Controller->MonitoringTimerCount == 0 ||
- time_after_eq(jiffies, Controller->SecondaryMonitoringTime
- + DAC960_SecondaryMonitoringInterval))
- {
- Controller->V1.NeedLogicalDriveInformation = true;
- Controller->V1.NewEventLogSequenceNumber =
- NewEnquiry->EventLogSequenceNumber;
- Controller->V1.NeedErrorTableInformation = true;
- Controller->V1.NeedDeviceStateInformation = true;
- Controller->V1.StartDeviceStateScan = true;
- Controller->V1.NeedBackgroundInitializationStatus =
- Controller->V1.BackgroundInitializationStatusSupported;
- Controller->SecondaryMonitoringTime = jiffies;
- }
- if (NewEnquiry->RebuildFlag == DAC960_V1_StandbyRebuildInProgress ||
- NewEnquiry->RebuildFlag
- == DAC960_V1_BackgroundRebuildInProgress ||
- OldEnquiry->RebuildFlag == DAC960_V1_StandbyRebuildInProgress ||
- OldEnquiry->RebuildFlag == DAC960_V1_BackgroundRebuildInProgress)
- {
- Controller->V1.NeedRebuildProgress = true;
- Controller->V1.RebuildProgressFirst =
- (NewEnquiry->CriticalLogicalDriveCount <
- OldEnquiry->CriticalLogicalDriveCount);
- }
- if (OldEnquiry->RebuildFlag == DAC960_V1_BackgroundCheckInProgress)
- switch (NewEnquiry->RebuildFlag)
- {
- case DAC960_V1_NoStandbyRebuildOrCheckInProgress:
- DAC960_Progress("Consistency Check Completed Successfully\n",
- Controller);
- break;
- case DAC960_V1_StandbyRebuildInProgress:
- case DAC960_V1_BackgroundRebuildInProgress:
- break;
- case DAC960_V1_BackgroundCheckInProgress:
- Controller->V1.NeedConsistencyCheckProgress = true;
- break;
- case DAC960_V1_StandbyRebuildCompletedWithError:
- DAC960_Progress("Consistency Check Completed with Error\n",
- Controller);
- break;
- case DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed:
- DAC960_Progress("Consistency Check Failed - "
- "Physical Device Failed\n", Controller);
- break;
- case DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed:
- DAC960_Progress("Consistency Check Failed - "
- "Logical Drive Failed\n", Controller);
- break;
- case DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses:
- DAC960_Progress("Consistency Check Failed - Other Causes\n",
- Controller);
- break;
- case DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated:
- DAC960_Progress("Consistency Check Successfully Terminated\n",
- Controller);
- break;
- }
- else if (NewEnquiry->RebuildFlag
- == DAC960_V1_BackgroundCheckInProgress)
- Controller->V1.NeedConsistencyCheckProgress = true;
- Controller->MonitoringAlertMode =
- (NewEnquiry->CriticalLogicalDriveCount > 0 ||
- NewEnquiry->OfflineLogicalDriveCount > 0 ||
- NewEnquiry->DeadDriveCount > 0);
- if (NewEnquiry->RebuildFlag > DAC960_V1_BackgroundCheckInProgress)
- {
- Controller->V1.PendingRebuildFlag = NewEnquiry->RebuildFlag;
- Controller->V1.RebuildFlagPending = true;
- }
- memcpy(&Controller->V1.Enquiry, &Controller->V1.NewEnquiry,
- sizeof(DAC960_V1_Enquiry_T));
- }
- else if (CommandOpcode == DAC960_V1_PerformEventLogOperation)
- {
- static char
- *DAC960_EventMessages[] =
- { "killed because write recovery failed",
- "killed because of SCSI bus reset failure",
- "killed because of double check condition",
- "killed because it was removed",
- "killed because of gross error on SCSI chip",
- "killed because of bad tag returned from drive",
- "killed because of timeout on SCSI command",
- "killed because of reset SCSI command issued from system",
- "killed because busy or parity error count exceeded limit",
- "killed because of 'kill drive' command from system",
- "killed because of selection timeout",
- "killed due to SCSI phase sequence error",
- "killed due to unknown status" };
- DAC960_V1_EventLogEntry_T *EventLogEntry =
- Controller->V1.EventLogEntry;
- if (EventLogEntry->SequenceNumber ==
- Controller->V1.OldEventLogSequenceNumber)
- {
- unsigned char SenseKey = EventLogEntry->SenseKey;
- unsigned char AdditionalSenseCode =
- EventLogEntry->AdditionalSenseCode;
- unsigned char AdditionalSenseCodeQualifier =
- EventLogEntry->AdditionalSenseCodeQualifier;
- if (SenseKey == DAC960_SenseKey_VendorSpecific &&
- AdditionalSenseCode == 0x80 &&
- AdditionalSenseCodeQualifier <
- ARRAY_SIZE(DAC960_EventMessages))
- DAC960_Critical("Physical Device %d:%d %s\n", Controller,
- EventLogEntry->Channel,
- EventLogEntry->TargetID,
- DAC960_EventMessages[
- AdditionalSenseCodeQualifier]);
- else if (SenseKey == DAC960_SenseKey_UnitAttention &&
- AdditionalSenseCode == 0x29)
- {
- if (Controller->MonitoringTimerCount > 0)
- Controller->V1.DeviceResetCount[EventLogEntry->Channel]
- [EventLogEntry->TargetID]++;
- }
- else if (!(SenseKey == DAC960_SenseKey_NoSense ||
- (SenseKey == DAC960_SenseKey_NotReady &&
- AdditionalSenseCode == 0x04 &&
- (AdditionalSenseCodeQualifier == 0x01 ||
- AdditionalSenseCodeQualifier == 0x02))))
- {
- DAC960_Critical("Physical Device %d:%d Error Log: "
- "Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
- Controller,
- EventLogEntry->Channel,
- EventLogEntry->TargetID,
- SenseKey,
- AdditionalSenseCode,
- AdditionalSenseCodeQualifier);
- DAC960_Critical("Physical Device %d:%d Error Log: "
- "Information = %02X%02X%02X%02X "
- "%02X%02X%02X%02X\n",
- Controller,
- EventLogEntry->Channel,
- EventLogEntry->TargetID,
- EventLogEntry->Information[0],
- EventLogEntry->Information[1],
- EventLogEntry->Information[2],
- EventLogEntry->Information[3],
- EventLogEntry->CommandSpecificInformation[0],
- EventLogEntry->CommandSpecificInformation[1],
- EventLogEntry->CommandSpecificInformation[2],
- EventLogEntry->CommandSpecificInformation[3]);
- }
- }
- Controller->V1.OldEventLogSequenceNumber++;
- }
- else if (CommandOpcode == DAC960_V1_GetErrorTable)
- {
- DAC960_V1_ErrorTable_T *OldErrorTable = &Controller->V1.ErrorTable;
- DAC960_V1_ErrorTable_T *NewErrorTable = Controller->V1.NewErrorTable;
- int Channel, TargetID;
- for (Channel = 0; Channel < Controller->Channels; Channel++)
- for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
- {
- DAC960_V1_ErrorTableEntry_T *NewErrorEntry =
- &NewErrorTable->ErrorTableEntries[Channel][TargetID];
- DAC960_V1_ErrorTableEntry_T *OldErrorEntry =
- &OldErrorTable->ErrorTableEntries[Channel][TargetID];
- if ((NewErrorEntry->ParityErrorCount !=
- OldErrorEntry->ParityErrorCount) ||
- (NewErrorEntry->SoftErrorCount !=
- OldErrorEntry->SoftErrorCount) ||
- (NewErrorEntry->HardErrorCount !=
- OldErrorEntry->HardErrorCount) ||
- (NewErrorEntry->MiscErrorCount !=
- OldErrorEntry->MiscErrorCount))
- DAC960_Critical("Physical Device %d:%d Errors: "
- "Parity = %d, Soft = %d, "
- "Hard = %d, Misc = %d\n",
- Controller, Channel, TargetID,
- NewErrorEntry->ParityErrorCount,
- NewErrorEntry->SoftErrorCount,
- NewErrorEntry->HardErrorCount,
- NewErrorEntry->MiscErrorCount);
- }
- memcpy(&Controller->V1.ErrorTable, Controller->V1.NewErrorTable,
- sizeof(DAC960_V1_ErrorTable_T));
- }
- else if (CommandOpcode == DAC960_V1_GetDeviceState)
- {
- DAC960_V1_DeviceState_T *OldDeviceState =
- &Controller->V1.DeviceState[Controller->V1.DeviceStateChannel]
- [Controller->V1.DeviceStateTargetID];
- DAC960_V1_DeviceState_T *NewDeviceState =
- Controller->V1.NewDeviceState;
- if (NewDeviceState->DeviceState != OldDeviceState->DeviceState)
- DAC960_Critical("Physical Device %d:%d is now %s\n", Controller,
- Controller->V1.DeviceStateChannel,
- Controller->V1.DeviceStateTargetID,
- (NewDeviceState->DeviceState
- == DAC960_V1_Device_Dead
- ? "DEAD"
- : NewDeviceState->DeviceState
- == DAC960_V1_Device_WriteOnly
- ? "WRITE-ONLY"
- : NewDeviceState->DeviceState
- == DAC960_V1_Device_Online
- ? "ONLINE" : "STANDBY"));
- if (OldDeviceState->DeviceState == DAC960_V1_Device_Dead &&
- NewDeviceState->DeviceState != DAC960_V1_Device_Dead)
- {
- Controller->V1.NeedDeviceInquiryInformation = true;
- Controller->V1.NeedDeviceSerialNumberInformation = true;
- Controller->V1.DeviceResetCount
- [Controller->V1.DeviceStateChannel]
- [Controller->V1.DeviceStateTargetID] = 0;
- }
- memcpy(OldDeviceState, NewDeviceState,
- sizeof(DAC960_V1_DeviceState_T));
- }
- else if (CommandOpcode == DAC960_V1_GetLogicalDriveInformation)
- {
- int LogicalDriveNumber;
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < Controller->LogicalDriveCount;
- LogicalDriveNumber++)
- {
- DAC960_V1_LogicalDriveInformation_T *OldLogicalDriveInformation =
- &Controller->V1.LogicalDriveInformation[LogicalDriveNumber];
- DAC960_V1_LogicalDriveInformation_T *NewLogicalDriveInformation =
- &(*Controller->V1.NewLogicalDriveInformation)[LogicalDriveNumber];
- if (NewLogicalDriveInformation->LogicalDriveState !=
- OldLogicalDriveInformation->LogicalDriveState)
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "is now %s\n", Controller,
- LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (NewLogicalDriveInformation->LogicalDriveState
- == DAC960_V1_LogicalDrive_Online
- ? "ONLINE"
- : NewLogicalDriveInformation->LogicalDriveState
- == DAC960_V1_LogicalDrive_Critical
- ? "CRITICAL" : "OFFLINE"));
- if (NewLogicalDriveInformation->WriteBack !=
- OldLogicalDriveInformation->WriteBack)
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "is now %s\n", Controller,
- LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (NewLogicalDriveInformation->WriteBack
- ? "WRITE BACK" : "WRITE THRU"));
- }
- memcpy(&Controller->V1.LogicalDriveInformation,
- Controller->V1.NewLogicalDriveInformation,
- sizeof(DAC960_V1_LogicalDriveInformationArray_T));
- }
- else if (CommandOpcode == DAC960_V1_GetRebuildProgress)
- {
- unsigned int LogicalDriveNumber =
- Controller->V1.RebuildProgress->LogicalDriveNumber;
- unsigned int LogicalDriveSize =
- Controller->V1.RebuildProgress->LogicalDriveSize;
- unsigned int BlocksCompleted =
- LogicalDriveSize - Controller->V1.RebuildProgress->RemainingBlocks;
- if (CommandStatus == DAC960_V1_NoRebuildOrCheckInProgress &&
- Controller->V1.LastRebuildStatus == DAC960_V1_NormalCompletion)
- CommandStatus = DAC960_V1_RebuildSuccessful;
- switch (CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- Controller->EphemeralProgressMessage = true;
- DAC960_Progress("Rebuild in Progress: "
- "Logical Drive %d (/dev/rd/c%dd%d) "
- "%d%% completed\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (100 * (BlocksCompleted >> 7))
- / (LogicalDriveSize >> 7));
- Controller->EphemeralProgressMessage = false;
- break;
- case DAC960_V1_RebuildFailed_LogicalDriveFailure:
- DAC960_Progress("Rebuild Failed due to "
- "Logical Drive Failure\n", Controller);
- break;
- case DAC960_V1_RebuildFailed_BadBlocksOnOther:
- DAC960_Progress("Rebuild Failed due to "
- "Bad Blocks on Other Drives\n", Controller);
- break;
- case DAC960_V1_RebuildFailed_NewDriveFailed:
- DAC960_Progress("Rebuild Failed due to "
- "Failure of Drive Being Rebuilt\n", Controller);
- break;
- case DAC960_V1_NoRebuildOrCheckInProgress:
- break;
- case DAC960_V1_RebuildSuccessful:
- DAC960_Progress("Rebuild Completed Successfully\n", Controller);
- break;
- case DAC960_V1_RebuildSuccessfullyTerminated:
- DAC960_Progress("Rebuild Successfully Terminated\n", Controller);
- break;
- }
- Controller->V1.LastRebuildStatus = CommandStatus;
- if (CommandType != DAC960_MonitoringCommand &&
- Controller->V1.RebuildStatusPending)
- {
- Command->V1.CommandStatus = Controller->V1.PendingRebuildStatus;
- Controller->V1.RebuildStatusPending = false;
- }
- else if (CommandType == DAC960_MonitoringCommand &&
- CommandStatus != DAC960_V1_NormalCompletion &&
- CommandStatus != DAC960_V1_NoRebuildOrCheckInProgress)
- {
- Controller->V1.PendingRebuildStatus = CommandStatus;
- Controller->V1.RebuildStatusPending = true;
- }
- }
- else if (CommandOpcode == DAC960_V1_RebuildStat)
- {
- unsigned int LogicalDriveNumber =
- Controller->V1.RebuildProgress->LogicalDriveNumber;
- unsigned int LogicalDriveSize =
- Controller->V1.RebuildProgress->LogicalDriveSize;
- unsigned int BlocksCompleted =
- LogicalDriveSize - Controller->V1.RebuildProgress->RemainingBlocks;
- if (CommandStatus == DAC960_V1_NormalCompletion)
- {
- Controller->EphemeralProgressMessage = true;
- DAC960_Progress("Consistency Check in Progress: "
- "Logical Drive %d (/dev/rd/c%dd%d) "
- "%d%% completed\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (100 * (BlocksCompleted >> 7))
- / (LogicalDriveSize >> 7));
- Controller->EphemeralProgressMessage = false;
- }
- }
- else if (CommandOpcode == DAC960_V1_BackgroundInitializationControl)
- {
- unsigned int LogicalDriveNumber =
- Controller->V1.BackgroundInitializationStatus->LogicalDriveNumber;
- unsigned int LogicalDriveSize =
- Controller->V1.BackgroundInitializationStatus->LogicalDriveSize;
- unsigned int BlocksCompleted =
- Controller->V1.BackgroundInitializationStatus->BlocksCompleted;
- switch (CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- switch (Controller->V1.BackgroundInitializationStatus->Status)
- {
- case DAC960_V1_BackgroundInitializationInvalid:
- break;
- case DAC960_V1_BackgroundInitializationStarted:
- DAC960_Progress("Background Initialization Started\n",
- Controller);
- break;
- case DAC960_V1_BackgroundInitializationInProgress:
- if (BlocksCompleted ==
- Controller->V1.LastBackgroundInitializationStatus.
- BlocksCompleted &&
- LogicalDriveNumber ==
- Controller->V1.LastBackgroundInitializationStatus.
- LogicalDriveNumber)
- break;
- Controller->EphemeralProgressMessage = true;
- DAC960_Progress("Background Initialization in Progress: "
- "Logical Drive %d (/dev/rd/c%dd%d) "
- "%d%% completed\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (100 * (BlocksCompleted >> 7))
- / (LogicalDriveSize >> 7));
- Controller->EphemeralProgressMessage = false;
- break;
- case DAC960_V1_BackgroundInitializationSuspended:
- DAC960_Progress("Background Initialization Suspended\n",
- Controller);
- break;
- case DAC960_V1_BackgroundInitializationCancelled:
- DAC960_Progress("Background Initialization Cancelled\n",
- Controller);
- break;
- }
- memcpy(&Controller->V1.LastBackgroundInitializationStatus,
- Controller->V1.BackgroundInitializationStatus,
- sizeof(DAC960_V1_BackgroundInitializationStatus_T));
- break;
- case DAC960_V1_BackgroundInitSuccessful:
- if (Controller->V1.BackgroundInitializationStatus->Status ==
- DAC960_V1_BackgroundInitializationInProgress)
- DAC960_Progress("Background Initialization "
- "Completed Successfully\n", Controller);
- Controller->V1.BackgroundInitializationStatus->Status =
- DAC960_V1_BackgroundInitializationInvalid;
- break;
- case DAC960_V1_BackgroundInitAborted:
- if (Controller->V1.BackgroundInitializationStatus->Status ==
- DAC960_V1_BackgroundInitializationInProgress)
- DAC960_Progress("Background Initialization Aborted\n",
- Controller);
- Controller->V1.BackgroundInitializationStatus->Status =
- DAC960_V1_BackgroundInitializationInvalid;
- break;
- case DAC960_V1_NoBackgroundInitInProgress:
- break;
- }
- }
- else if (CommandOpcode == DAC960_V1_DCDB)
- {
- /*
- This is a bit ugly.
-
- The InquiryStandardData and
- the InquiryUntitSerialNumber information
- retrieval operations BOTH use the DAC960_V1_DCDB
- commands. the test above can't distinguish between
- these two cases.
-
- Instead, we rely on the order of code later in this
- function to ensure that DeviceInquiryInformation commands
- are submitted before DeviceSerialNumber commands.
- */
- if (Controller->V1.NeedDeviceInquiryInformation)
- {
- DAC960_SCSI_Inquiry_T *InquiryStandardData =
- &Controller->V1.InquiryStandardData
- [Controller->V1.DeviceStateChannel]
- [Controller->V1.DeviceStateTargetID];
- if (CommandStatus != DAC960_V1_NormalCompletion)
- {
- memset(InquiryStandardData, 0,
- sizeof(DAC960_SCSI_Inquiry_T));
- InquiryStandardData->PeripheralDeviceType = 0x1F;
- }
- else
- memcpy(InquiryStandardData,
- Controller->V1.NewInquiryStandardData,
- sizeof(DAC960_SCSI_Inquiry_T));
- Controller->V1.NeedDeviceInquiryInformation = false;
- }
- else if (Controller->V1.NeedDeviceSerialNumberInformation)
- {
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- &Controller->V1.InquiryUnitSerialNumber
- [Controller->V1.DeviceStateChannel]
- [Controller->V1.DeviceStateTargetID];
- if (CommandStatus != DAC960_V1_NormalCompletion)
- {
- memset(InquiryUnitSerialNumber, 0,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
- }
- else
- memcpy(InquiryUnitSerialNumber,
- Controller->V1.NewInquiryUnitSerialNumber,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- Controller->V1.NeedDeviceSerialNumberInformation = false;
- }
- }
- /*
- Begin submitting new monitoring commands.
- */
- if (Controller->V1.NewEventLogSequenceNumber
- - Controller->V1.OldEventLogSequenceNumber > 0)
- {
- Command->V1.CommandMailbox.Type3E.CommandOpcode =
- DAC960_V1_PerformEventLogOperation;
- Command->V1.CommandMailbox.Type3E.OperationType =
- DAC960_V1_GetEventLogEntry;
- Command->V1.CommandMailbox.Type3E.OperationQualifier = 1;
- Command->V1.CommandMailbox.Type3E.SequenceNumber =
- Controller->V1.OldEventLogSequenceNumber;
- Command->V1.CommandMailbox.Type3E.BusAddress =
- Controller->V1.EventLogEntryDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedErrorTableInformation)
- {
- Controller->V1.NeedErrorTableInformation = false;
- Command->V1.CommandMailbox.Type3.CommandOpcode =
- DAC960_V1_GetErrorTable;
- Command->V1.CommandMailbox.Type3.BusAddress =
- Controller->V1.NewErrorTableDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedRebuildProgress &&
- Controller->V1.RebuildProgressFirst)
- {
- Controller->V1.NeedRebuildProgress = false;
- Command->V1.CommandMailbox.Type3.CommandOpcode =
- DAC960_V1_GetRebuildProgress;
- Command->V1.CommandMailbox.Type3.BusAddress =
- Controller->V1.RebuildProgressDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedDeviceStateInformation)
- {
- if (Controller->V1.NeedDeviceInquiryInformation)
- {
- DAC960_V1_DCDB_T *DCDB = Controller->V1.MonitoringDCDB;
- dma_addr_t DCDB_DMA = Controller->V1.MonitoringDCDB_DMA;
-
- dma_addr_t NewInquiryStandardDataDMA =
- Controller->V1.NewInquiryStandardDataDMA;
-
- Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
- Command->V1.CommandMailbox.Type3.BusAddress = DCDB_DMA;
- DCDB->Channel = Controller->V1.DeviceStateChannel;
- DCDB->TargetID = Controller->V1.DeviceStateTargetID;
- DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
- DCDB->EarlyStatus = false;
- DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
- DCDB->NoAutomaticRequestSense = false;
- DCDB->DisconnectPermitted = true;
- DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_T);
- DCDB->BusAddress = NewInquiryStandardDataDMA;
- DCDB->CDBLength = 6;
- DCDB->TransferLengthHigh4 = 0;
- DCDB->SenseLength = sizeof(DCDB->SenseData);
- DCDB->CDB[0] = 0x12; /* INQUIRY */
- DCDB->CDB[1] = 0; /* EVPD = 0 */
- DCDB->CDB[2] = 0; /* Page Code */
- DCDB->CDB[3] = 0; /* Reserved */
- DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_T);
- DCDB->CDB[5] = 0; /* Control */
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedDeviceSerialNumberInformation)
- {
- DAC960_V1_DCDB_T *DCDB = Controller->V1.MonitoringDCDB;
- dma_addr_t DCDB_DMA = Controller->V1.MonitoringDCDB_DMA;
- dma_addr_t NewInquiryUnitSerialNumberDMA =
- Controller->V1.NewInquiryUnitSerialNumberDMA;
-
- Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
- Command->V1.CommandMailbox.Type3.BusAddress = DCDB_DMA;
- DCDB->Channel = Controller->V1.DeviceStateChannel;
- DCDB->TargetID = Controller->V1.DeviceStateTargetID;
- DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
- DCDB->EarlyStatus = false;
- DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
- DCDB->NoAutomaticRequestSense = false;
- DCDB->DisconnectPermitted = true;
- DCDB->TransferLength =
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- DCDB->BusAddress = NewInquiryUnitSerialNumberDMA;
- DCDB->CDBLength = 6;
- DCDB->TransferLengthHigh4 = 0;
- DCDB->SenseLength = sizeof(DCDB->SenseData);
- DCDB->CDB[0] = 0x12; /* INQUIRY */
- DCDB->CDB[1] = 1; /* EVPD = 1 */
- DCDB->CDB[2] = 0x80; /* Page Code */
- DCDB->CDB[3] = 0; /* Reserved */
- DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
- DCDB->CDB[5] = 0; /* Control */
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.StartDeviceStateScan)
- {
- Controller->V1.DeviceStateChannel = 0;
- Controller->V1.DeviceStateTargetID = 0;
- Controller->V1.StartDeviceStateScan = false;
- }
- else if (++Controller->V1.DeviceStateTargetID == Controller->Targets)
- {
- Controller->V1.DeviceStateChannel++;
- Controller->V1.DeviceStateTargetID = 0;
- }
- if (Controller->V1.DeviceStateChannel < Controller->Channels)
- {
- Controller->V1.NewDeviceState->DeviceState =
- DAC960_V1_Device_Dead;
- Command->V1.CommandMailbox.Type3D.CommandOpcode =
- DAC960_V1_GetDeviceState;
- Command->V1.CommandMailbox.Type3D.Channel =
- Controller->V1.DeviceStateChannel;
- Command->V1.CommandMailbox.Type3D.TargetID =
- Controller->V1.DeviceStateTargetID;
- Command->V1.CommandMailbox.Type3D.BusAddress =
- Controller->V1.NewDeviceStateDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- Controller->V1.NeedDeviceStateInformation = false;
- }
- if (Controller->V1.NeedLogicalDriveInformation)
- {
- Controller->V1.NeedLogicalDriveInformation = false;
- Command->V1.CommandMailbox.Type3.CommandOpcode =
- DAC960_V1_GetLogicalDriveInformation;
- Command->V1.CommandMailbox.Type3.BusAddress =
- Controller->V1.NewLogicalDriveInformationDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedRebuildProgress)
- {
- Controller->V1.NeedRebuildProgress = false;
- Command->V1.CommandMailbox.Type3.CommandOpcode =
- DAC960_V1_GetRebuildProgress;
- Command->V1.CommandMailbox.Type3.BusAddress =
- Controller->V1.RebuildProgressDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedConsistencyCheckProgress)
- {
- Controller->V1.NeedConsistencyCheckProgress = false;
- Command->V1.CommandMailbox.Type3.CommandOpcode =
- DAC960_V1_RebuildStat;
- Command->V1.CommandMailbox.Type3.BusAddress =
- Controller->V1.RebuildProgressDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V1.NeedBackgroundInitializationStatus)
- {
- Controller->V1.NeedBackgroundInitializationStatus = false;
- Command->V1.CommandMailbox.Type3B.CommandOpcode =
- DAC960_V1_BackgroundInitializationControl;
- Command->V1.CommandMailbox.Type3B.CommandOpcode2 = 0x20;
- Command->V1.CommandMailbox.Type3B.BusAddress =
- Controller->V1.BackgroundInitializationStatusDMA;
- DAC960_QueueCommand(Command);
- return;
- }
- Controller->MonitoringTimerCount++;
- Controller->MonitoringTimer.expires =
- jiffies + DAC960_MonitoringTimerInterval;
- add_timer(&Controller->MonitoringTimer);
- }
- if (CommandType == DAC960_ImmediateCommand)
- {
- complete(Command->Completion);
- Command->Completion = NULL;
- return;
- }
- if (CommandType == DAC960_QueuedCommand)
- {
- DAC960_V1_KernelCommand_T *KernelCommand = Command->V1.KernelCommand;
- KernelCommand->CommandStatus = Command->V1.CommandStatus;
- Command->V1.KernelCommand = NULL;
- if (CommandOpcode == DAC960_V1_DCDB)
- Controller->V1.DirectCommandActive[KernelCommand->DCDB->Channel]
- [KernelCommand->DCDB->TargetID] =
- false;
- DAC960_DeallocateCommand(Command);
- KernelCommand->CompletionFunction(KernelCommand);
- return;
- }
- /*
- Queue a Status Monitoring Command to the Controller using the just
- completed Command if one was deferred previously due to lack of a
- free Command when the Monitoring Timer Function was called.
- */
- if (Controller->MonitoringCommandDeferred)
- {
- Controller->MonitoringCommandDeferred = false;
- DAC960_V1_QueueMonitoringCommand(Command);
- return;
- }
- /*
- Deallocate the Command.
- */
- DAC960_DeallocateCommand(Command);
- /*
- Wake up any processes waiting on a free Command.
- */
- wake_up(&Controller->CommandWaitQueue);
-}
-
-
-/*
- DAC960_V2_ReadWriteError prints an appropriate error message for Command
- when an error occurs on a Read or Write operation.
-*/
-
-static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- static const unsigned char *SenseErrors[] = {
- "NO SENSE", "RECOVERED ERROR",
- "NOT READY", "MEDIUM ERROR",
- "HARDWARE ERROR", "ILLEGAL REQUEST",
- "UNIT ATTENTION", "DATA PROTECT",
- "BLANK CHECK", "VENDOR-SPECIFIC",
- "COPY ABORTED", "ABORTED COMMAND",
- "EQUAL", "VOLUME OVERFLOW",
- "MISCOMPARE", "RESERVED"
- };
- unsigned char *CommandName = "UNKNOWN";
- switch (Command->CommandType)
- {
- case DAC960_ReadCommand:
- case DAC960_ReadRetryCommand:
- CommandName = "READ";
- break;
- case DAC960_WriteCommand:
- case DAC960_WriteRetryCommand:
- CommandName = "WRITE";
- break;
- case DAC960_MonitoringCommand:
- case DAC960_ImmediateCommand:
- case DAC960_QueuedCommand:
- break;
- }
- DAC960_Error("Error Condition %s on %s:\n", Controller,
- SenseErrors[Command->V2.RequestSense->SenseKey], CommandName);
- DAC960_Error(" /dev/rd/c%dd%d: absolute blocks %u..%u\n",
- Controller, Controller->ControllerNumber,
- Command->LogicalDriveNumber, Command->BlockNumber,
- Command->BlockNumber + Command->BlockCount - 1);
-}
-
-
-/*
- DAC960_V2_ReportEvent prints an appropriate message when a Controller Event
- occurs.
-*/
-
-static void DAC960_V2_ReportEvent(DAC960_Controller_T *Controller,
- DAC960_V2_Event_T *Event)
-{
- DAC960_SCSI_RequestSense_T *RequestSense =
- (DAC960_SCSI_RequestSense_T *) &Event->RequestSenseData;
- unsigned char MessageBuffer[DAC960_LineBufferSize];
- static struct { int EventCode; unsigned char *EventMessage; } EventList[] =
- { /* Physical Device Events (0x0000 - 0x007F) */
- { 0x0001, "P Online" },
- { 0x0002, "P Standby" },
- { 0x0005, "P Automatic Rebuild Started" },
- { 0x0006, "P Manual Rebuild Started" },
- { 0x0007, "P Rebuild Completed" },
- { 0x0008, "P Rebuild Cancelled" },
- { 0x0009, "P Rebuild Failed for Unknown Reasons" },
- { 0x000A, "P Rebuild Failed due to New Physical Device" },
- { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
- { 0x000C, "S Offline" },
- { 0x000D, "P Found" },
- { 0x000E, "P Removed" },
- { 0x000F, "P Unconfigured" },
- { 0x0010, "P Expand Capacity Started" },
- { 0x0011, "P Expand Capacity Completed" },
- { 0x0012, "P Expand Capacity Failed" },
- { 0x0013, "P Command Timed Out" },
- { 0x0014, "P Command Aborted" },
- { 0x0015, "P Command Retried" },
- { 0x0016, "P Parity Error" },
- { 0x0017, "P Soft Error" },
- { 0x0018, "P Miscellaneous Error" },
- { 0x0019, "P Reset" },
- { 0x001A, "P Active Spare Found" },
- { 0x001B, "P Warm Spare Found" },
- { 0x001C, "S Sense Data Received" },
- { 0x001D, "P Initialization Started" },
- { 0x001E, "P Initialization Completed" },
- { 0x001F, "P Initialization Failed" },
- { 0x0020, "P Initialization Cancelled" },
- { 0x0021, "P Failed because Write Recovery Failed" },
- { 0x0022, "P Failed because SCSI Bus Reset Failed" },
- { 0x0023, "P Failed because of Double Check Condition" },
- { 0x0024, "P Failed because Device Cannot Be Accessed" },
- { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
- { 0x0026, "P Failed because of Bad Tag from Device" },
- { 0x0027, "P Failed because of Command Timeout" },
- { 0x0028, "P Failed because of System Reset" },
- { 0x0029, "P Failed because of Busy Status or Parity Error" },
- { 0x002A, "P Failed because Host Set Device to Failed State" },
- { 0x002B, "P Failed because of Selection Timeout" },
- { 0x002C, "P Failed because of SCSI Bus Phase Error" },
- { 0x002D, "P Failed because Device Returned Unknown Status" },
- { 0x002E, "P Failed because Device Not Ready" },
- { 0x002F, "P Failed because Device Not Found at Startup" },
- { 0x0030, "P Failed because COD Write Operation Failed" },
- { 0x0031, "P Failed because BDT Write Operation Failed" },
- { 0x0039, "P Missing at Startup" },
- { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
- { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
- { 0x003D, "P Standby Rebuild Started" },
- /* Logical Device Events (0x0080 - 0x00FF) */
- { 0x0080, "M Consistency Check Started" },
- { 0x0081, "M Consistency Check Completed" },
- { 0x0082, "M Consistency Check Cancelled" },
- { 0x0083, "M Consistency Check Completed With Errors" },
- { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
- { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
- { 0x0086, "L Offline" },
- { 0x0087, "L Critical" },
- { 0x0088, "L Online" },
- { 0x0089, "M Automatic Rebuild Started" },
- { 0x008A, "M Manual Rebuild Started" },
- { 0x008B, "M Rebuild Completed" },
- { 0x008C, "M Rebuild Cancelled" },
- { 0x008D, "M Rebuild Failed for Unknown Reasons" },
- { 0x008E, "M Rebuild Failed due to New Physical Device" },
- { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
- { 0x0090, "M Initialization Started" },
- { 0x0091, "M Initialization Completed" },
- { 0x0092, "M Initialization Cancelled" },
- { 0x0093, "M Initialization Failed" },
- { 0x0094, "L Found" },
- { 0x0095, "L Deleted" },
- { 0x0096, "M Expand Capacity Started" },
- { 0x0097, "M Expand Capacity Completed" },
- { 0x0098, "M Expand Capacity Failed" },
- { 0x0099, "L Bad Block Found" },
- { 0x009A, "L Size Changed" },
- { 0x009B, "L Type Changed" },
- { 0x009C, "L Bad Data Block Found" },
- { 0x009E, "L Read of Data Block in BDT" },
- { 0x009F, "L Write Back Data for Disk Block Lost" },
- { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
- { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
- { 0x00A2, "L Standby Rebuild Started" },
- /* Fault Management Events (0x0100 - 0x017F) */
- { 0x0140, "E Fan %d Failed" },
- { 0x0141, "E Fan %d OK" },
- { 0x0142, "E Fan %d Not Present" },
- { 0x0143, "E Power Supply %d Failed" },
- { 0x0144, "E Power Supply %d OK" },
- { 0x0145, "E Power Supply %d Not Present" },
- { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
- { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
- { 0x0148, "E Temperature Sensor %d Temperature Normal" },
- { 0x0149, "E Temperature Sensor %d Not Present" },
- { 0x014A, "E Enclosure Management Unit %d Access Critical" },
- { 0x014B, "E Enclosure Management Unit %d Access OK" },
- { 0x014C, "E Enclosure Management Unit %d Access Offline" },
- /* Controller Events (0x0180 - 0x01FF) */
- { 0x0181, "C Cache Write Back Error" },
- { 0x0188, "C Battery Backup Unit Found" },
- { 0x0189, "C Battery Backup Unit Charge Level Low" },
- { 0x018A, "C Battery Backup Unit Charge Level OK" },
- { 0x0193, "C Installation Aborted" },
- { 0x0195, "C Battery Backup Unit Physically Removed" },
- { 0x0196, "C Memory Error During Warm Boot" },
- { 0x019E, "C Memory Soft ECC Error Corrected" },
- { 0x019F, "C Memory Hard ECC Error Corrected" },
- { 0x01A2, "C Battery Backup Unit Failed" },
- { 0x01AB, "C Mirror Race Recovery Failed" },
- { 0x01AC, "C Mirror Race on Critical Drive" },
- /* Controller Internal Processor Events */
- { 0x0380, "C Internal Controller Hung" },
- { 0x0381, "C Internal Controller Firmware Breakpoint" },
- { 0x0390, "C Internal Controller i960 Processor Specific Error" },
- { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
- { 0, "" } };
- int EventListIndex = 0, EventCode;
- unsigned char EventType, *EventMessage;
- if (Event->EventCode == 0x1C &&
- RequestSense->SenseKey == DAC960_SenseKey_VendorSpecific &&
- (RequestSense->AdditionalSenseCode == 0x80 ||
- RequestSense->AdditionalSenseCode == 0x81))
- Event->EventCode = ((RequestSense->AdditionalSenseCode - 0x80) << 8) |
- RequestSense->AdditionalSenseCodeQualifier;
- while (true)
- {
- EventCode = EventList[EventListIndex].EventCode;
- if (EventCode == Event->EventCode || EventCode == 0) break;
- EventListIndex++;
- }
- EventType = EventList[EventListIndex].EventMessage[0];
- EventMessage = &EventList[EventListIndex].EventMessage[2];
- if (EventCode == 0)
- {
- DAC960_Critical("Unknown Controller Event Code %04X\n",
- Controller, Event->EventCode);
- return;
- }
- switch (EventType)
- {
- case 'P':
- DAC960_Critical("Physical Device %d:%d %s\n", Controller,
- Event->Channel, Event->TargetID, EventMessage);
- break;
- case 'L':
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) %s\n", Controller,
- Event->LogicalUnit, Controller->ControllerNumber,
- Event->LogicalUnit, EventMessage);
- break;
- case 'M':
- DAC960_Progress("Logical Drive %d (/dev/rd/c%dd%d) %s\n", Controller,
- Event->LogicalUnit, Controller->ControllerNumber,
- Event->LogicalUnit, EventMessage);
- break;
- case 'S':
- if (RequestSense->SenseKey == DAC960_SenseKey_NoSense ||
- (RequestSense->SenseKey == DAC960_SenseKey_NotReady &&
- RequestSense->AdditionalSenseCode == 0x04 &&
- (RequestSense->AdditionalSenseCodeQualifier == 0x01 ||
- RequestSense->AdditionalSenseCodeQualifier == 0x02)))
- break;
- DAC960_Critical("Physical Device %d:%d %s\n", Controller,
- Event->Channel, Event->TargetID, EventMessage);
- DAC960_Critical("Physical Device %d:%d Request Sense: "
- "Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
- Controller,
- Event->Channel,
- Event->TargetID,
- RequestSense->SenseKey,
- RequestSense->AdditionalSenseCode,
- RequestSense->AdditionalSenseCodeQualifier);
- DAC960_Critical("Physical Device %d:%d Request Sense: "
- "Information = %02X%02X%02X%02X "
- "%02X%02X%02X%02X\n",
- Controller,
- Event->Channel,
- Event->TargetID,
- RequestSense->Information[0],
- RequestSense->Information[1],
- RequestSense->Information[2],
- RequestSense->Information[3],
- RequestSense->CommandSpecificInformation[0],
- RequestSense->CommandSpecificInformation[1],
- RequestSense->CommandSpecificInformation[2],
- RequestSense->CommandSpecificInformation[3]);
- break;
- case 'E':
- if (Controller->SuppressEnclosureMessages) break;
- sprintf(MessageBuffer, EventMessage, Event->LogicalUnit);
- DAC960_Critical("Enclosure %d %s\n", Controller,
- Event->TargetID, MessageBuffer);
- break;
- case 'C':
- DAC960_Critical("Controller %s\n", Controller, EventMessage);
- break;
- default:
- DAC960_Critical("Unknown Controller Event Code %04X\n",
- Controller, Event->EventCode);
- break;
- }
-}
-
-
-/*
- DAC960_V2_ReportProgress prints an appropriate progress message for
- Logical Device Long Operations.
-*/
-
-static void DAC960_V2_ReportProgress(DAC960_Controller_T *Controller,
- unsigned char *MessageString,
- unsigned int LogicalDeviceNumber,
- unsigned long BlocksCompleted,
- unsigned long LogicalDeviceSize)
-{
- Controller->EphemeralProgressMessage = true;
- DAC960_Progress("%s in Progress: Logical Drive %d (/dev/rd/c%dd%d) "
- "%d%% completed\n", Controller,
- MessageString,
- LogicalDeviceNumber,
- Controller->ControllerNumber,
- LogicalDeviceNumber,
- (100 * (BlocksCompleted >> 7)) / (LogicalDeviceSize >> 7));
- Controller->EphemeralProgressMessage = false;
-}
-
-
-/*
- DAC960_V2_ProcessCompletedCommand performs completion processing for Command
- for DAC960 V2 Firmware Controllers.
-*/
-
-static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_CommandType_T CommandType = Command->CommandType;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_IOCTL_Opcode_T IOCTLOpcode = CommandMailbox->Common.IOCTL_Opcode;
- DAC960_V2_CommandOpcode_T CommandOpcode = CommandMailbox->SCSI_10.CommandOpcode;
- DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus;
-
- if (CommandType == DAC960_ReadCommand ||
- CommandType == DAC960_WriteCommand)
- {
-
-#ifdef FORCE_RETRY_DEBUG
- CommandStatus = DAC960_V2_AbormalCompletion;
-#endif
- Command->V2.RequestSense->SenseKey = DAC960_SenseKey_MediumError;
-
- if (CommandStatus == DAC960_V2_NormalCompletion) {
-
- if (!DAC960_ProcessCompletedRequest(Command, true))
- BUG();
-
- } else if (Command->V2.RequestSense->SenseKey == DAC960_SenseKey_MediumError)
- {
- /*
- * break the command down into pieces and resubmit each
- * piece, hoping that some of them will succeed.
- */
- DAC960_queue_partial_rw(Command);
- return;
- }
- else
- {
- if (Command->V2.RequestSense->SenseKey != DAC960_SenseKey_NotReady)
- DAC960_V2_ReadWriteError(Command);
- /*
- Perform completion processing for all buffers in this I/O Request.
- */
- (void)DAC960_ProcessCompletedRequest(Command, false);
- }
- }
- else if (CommandType == DAC960_ReadRetryCommand ||
- CommandType == DAC960_WriteRetryCommand)
- {
- bool normal_completion;
-
-#ifdef FORCE_RETRY_FAILURE_DEBUG
- static int retry_count = 1;
-#endif
- /*
- Perform completion processing for the portion that was
- retried, and submit the next portion, if any.
- */
- normal_completion = true;
- if (CommandStatus != DAC960_V2_NormalCompletion) {
- normal_completion = false;
- if (Command->V2.RequestSense->SenseKey != DAC960_SenseKey_NotReady)
- DAC960_V2_ReadWriteError(Command);
- }
-
-#ifdef FORCE_RETRY_FAILURE_DEBUG
- if (!(++retry_count % 10000)) {
- printk("V2 error retry failure test\n");
- normal_completion = false;
- DAC960_V2_ReadWriteError(Command);
- }
-#endif
-
- if (!DAC960_ProcessCompletedRequest(Command, normal_completion)) {
- DAC960_queue_partial_rw(Command);
- return;
- }
- }
- else if (CommandType == DAC960_MonitoringCommand)
- {
- if (Controller->ShutdownMonitoringTimer)
- return;
- if (IOCTLOpcode == DAC960_V2_GetControllerInfo)
- {
- DAC960_V2_ControllerInfo_T *NewControllerInfo =
- Controller->V2.NewControllerInformation;
- DAC960_V2_ControllerInfo_T *ControllerInfo =
- &Controller->V2.ControllerInformation;
- Controller->LogicalDriveCount =
- NewControllerInfo->LogicalDevicesPresent;
- Controller->V2.NeedLogicalDeviceInformation = true;
- Controller->V2.NeedPhysicalDeviceInformation = true;
- Controller->V2.StartLogicalDeviceInformationScan = true;
- Controller->V2.StartPhysicalDeviceInformationScan = true;
- Controller->MonitoringAlertMode =
- (NewControllerInfo->LogicalDevicesCritical > 0 ||
- NewControllerInfo->LogicalDevicesOffline > 0 ||
- NewControllerInfo->PhysicalDisksCritical > 0 ||
- NewControllerInfo->PhysicalDisksOffline > 0);
- memcpy(ControllerInfo, NewControllerInfo,
- sizeof(DAC960_V2_ControllerInfo_T));
- }
- else if (IOCTLOpcode == DAC960_V2_GetEvent)
- {
- if (CommandStatus == DAC960_V2_NormalCompletion) {
- DAC960_V2_ReportEvent(Controller, Controller->V2.Event);
- }
- Controller->V2.NextEventSequenceNumber++;
- }
- else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid &&
- CommandStatus == DAC960_V2_NormalCompletion)
- {
- DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
- Controller->V2.NewPhysicalDeviceInformation;
- unsigned int PhysicalDeviceIndex = Controller->V2.PhysicalDeviceIndex;
- DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
- Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
- unsigned int DeviceIndex;
- while (PhysicalDeviceInfo != NULL &&
- (NewPhysicalDeviceInfo->Channel >
- PhysicalDeviceInfo->Channel ||
- (NewPhysicalDeviceInfo->Channel ==
- PhysicalDeviceInfo->Channel &&
- (NewPhysicalDeviceInfo->TargetID >
- PhysicalDeviceInfo->TargetID ||
- (NewPhysicalDeviceInfo->TargetID ==
- PhysicalDeviceInfo->TargetID &&
- NewPhysicalDeviceInfo->LogicalUnit >
- PhysicalDeviceInfo->LogicalUnit)))))
- {
- DAC960_Critical("Physical Device %d:%d No Longer Exists\n",
- Controller,
- PhysicalDeviceInfo->Channel,
- PhysicalDeviceInfo->TargetID);
- Controller->V2.PhysicalDeviceInformation
- [PhysicalDeviceIndex] = NULL;
- Controller->V2.InquiryUnitSerialNumber
- [PhysicalDeviceIndex] = NULL;
- kfree(PhysicalDeviceInfo);
- kfree(InquiryUnitSerialNumber);
- for (DeviceIndex = PhysicalDeviceIndex;
- DeviceIndex < DAC960_V2_MaxPhysicalDevices - 1;
- DeviceIndex++)
- {
- Controller->V2.PhysicalDeviceInformation[DeviceIndex] =
- Controller->V2.PhysicalDeviceInformation[DeviceIndex+1];
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex] =
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex+1];
- }
- Controller->V2.PhysicalDeviceInformation
- [DAC960_V2_MaxPhysicalDevices-1] = NULL;
- Controller->V2.InquiryUnitSerialNumber
- [DAC960_V2_MaxPhysicalDevices-1] = NULL;
- PhysicalDeviceInfo =
- Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
- InquiryUnitSerialNumber =
- Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
- }
- if (PhysicalDeviceInfo == NULL ||
- (NewPhysicalDeviceInfo->Channel !=
- PhysicalDeviceInfo->Channel) ||
- (NewPhysicalDeviceInfo->TargetID !=
- PhysicalDeviceInfo->TargetID) ||
- (NewPhysicalDeviceInfo->LogicalUnit !=
- PhysicalDeviceInfo->LogicalUnit))
- {
- PhysicalDeviceInfo =
- kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T), GFP_ATOMIC);
- InquiryUnitSerialNumber =
- kmalloc(sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
- GFP_ATOMIC);
- if (InquiryUnitSerialNumber == NULL ||
- PhysicalDeviceInfo == NULL)
- {
- kfree(InquiryUnitSerialNumber);
- InquiryUnitSerialNumber = NULL;
- kfree(PhysicalDeviceInfo);
- PhysicalDeviceInfo = NULL;
- }
- DAC960_Critical("Physical Device %d:%d Now Exists%s\n",
- Controller,
- NewPhysicalDeviceInfo->Channel,
- NewPhysicalDeviceInfo->TargetID,
- (PhysicalDeviceInfo != NULL
- ? "" : " - Allocation Failed"));
- if (PhysicalDeviceInfo != NULL)
- {
- memset(PhysicalDeviceInfo, 0,
- sizeof(DAC960_V2_PhysicalDeviceInfo_T));
- PhysicalDeviceInfo->PhysicalDeviceState =
- DAC960_V2_Device_InvalidState;
- memset(InquiryUnitSerialNumber, 0,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
- for (DeviceIndex = DAC960_V2_MaxPhysicalDevices - 1;
- DeviceIndex > PhysicalDeviceIndex;
- DeviceIndex--)
- {
- Controller->V2.PhysicalDeviceInformation[DeviceIndex] =
- Controller->V2.PhysicalDeviceInformation[DeviceIndex-1];
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex] =
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex-1];
- }
- Controller->V2.PhysicalDeviceInformation
- [PhysicalDeviceIndex] =
- PhysicalDeviceInfo;
- Controller->V2.InquiryUnitSerialNumber
- [PhysicalDeviceIndex] =
- InquiryUnitSerialNumber;
- Controller->V2.NeedDeviceSerialNumberInformation = true;
- }
- }
- if (PhysicalDeviceInfo != NULL)
- {
- if (NewPhysicalDeviceInfo->PhysicalDeviceState !=
- PhysicalDeviceInfo->PhysicalDeviceState)
- DAC960_Critical(
- "Physical Device %d:%d is now %s\n", Controller,
- NewPhysicalDeviceInfo->Channel,
- NewPhysicalDeviceInfo->TargetID,
- (NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Online
- ? "ONLINE"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Rebuild
- ? "REBUILD"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Missing
- ? "MISSING"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Critical
- ? "CRITICAL"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Dead
- ? "DEAD"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_SuspectedDead
- ? "SUSPECTED-DEAD"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_CommandedOffline
- ? "COMMANDED-OFFLINE"
- : NewPhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Standby
- ? "STANDBY" : "UNKNOWN"));
- if ((NewPhysicalDeviceInfo->ParityErrors !=
- PhysicalDeviceInfo->ParityErrors) ||
- (NewPhysicalDeviceInfo->SoftErrors !=
- PhysicalDeviceInfo->SoftErrors) ||
- (NewPhysicalDeviceInfo->HardErrors !=
- PhysicalDeviceInfo->HardErrors) ||
- (NewPhysicalDeviceInfo->MiscellaneousErrors !=
- PhysicalDeviceInfo->MiscellaneousErrors) ||
- (NewPhysicalDeviceInfo->CommandTimeouts !=
- PhysicalDeviceInfo->CommandTimeouts) ||
- (NewPhysicalDeviceInfo->Retries !=
- PhysicalDeviceInfo->Retries) ||
- (NewPhysicalDeviceInfo->Aborts !=
- PhysicalDeviceInfo->Aborts) ||
- (NewPhysicalDeviceInfo->PredictedFailuresDetected !=
- PhysicalDeviceInfo->PredictedFailuresDetected))
- {
- DAC960_Critical("Physical Device %d:%d Errors: "
- "Parity = %d, Soft = %d, "
- "Hard = %d, Misc = %d\n",
- Controller,
- NewPhysicalDeviceInfo->Channel,
- NewPhysicalDeviceInfo->TargetID,
- NewPhysicalDeviceInfo->ParityErrors,
- NewPhysicalDeviceInfo->SoftErrors,
- NewPhysicalDeviceInfo->HardErrors,
- NewPhysicalDeviceInfo->MiscellaneousErrors);
- DAC960_Critical("Physical Device %d:%d Errors: "
- "Timeouts = %d, Retries = %d, "
- "Aborts = %d, Predicted = %d\n",
- Controller,
- NewPhysicalDeviceInfo->Channel,
- NewPhysicalDeviceInfo->TargetID,
- NewPhysicalDeviceInfo->CommandTimeouts,
- NewPhysicalDeviceInfo->Retries,
- NewPhysicalDeviceInfo->Aborts,
- NewPhysicalDeviceInfo
- ->PredictedFailuresDetected);
- }
- if ((PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_Dead ||
- PhysicalDeviceInfo->PhysicalDeviceState
- == DAC960_V2_Device_InvalidState) &&
- NewPhysicalDeviceInfo->PhysicalDeviceState
- != DAC960_V2_Device_Dead)
- Controller->V2.NeedDeviceSerialNumberInformation = true;
- memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo,
- sizeof(DAC960_V2_PhysicalDeviceInfo_T));
- }
- NewPhysicalDeviceInfo->LogicalUnit++;
- Controller->V2.PhysicalDeviceIndex++;
- }
- else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid)
- {
- unsigned int DeviceIndex;
- for (DeviceIndex = Controller->V2.PhysicalDeviceIndex;
- DeviceIndex < DAC960_V2_MaxPhysicalDevices;
- DeviceIndex++)
- {
- DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
- Controller->V2.PhysicalDeviceInformation[DeviceIndex];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex];
- if (PhysicalDeviceInfo == NULL) break;
- DAC960_Critical("Physical Device %d:%d No Longer Exists\n",
- Controller,
- PhysicalDeviceInfo->Channel,
- PhysicalDeviceInfo->TargetID);
- Controller->V2.PhysicalDeviceInformation[DeviceIndex] = NULL;
- Controller->V2.InquiryUnitSerialNumber[DeviceIndex] = NULL;
- kfree(PhysicalDeviceInfo);
- kfree(InquiryUnitSerialNumber);
- }
- Controller->V2.NeedPhysicalDeviceInformation = false;
- }
- else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid &&
- CommandStatus == DAC960_V2_NormalCompletion)
- {
- DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
- Controller->V2.NewLogicalDeviceInformation;
- unsigned short LogicalDeviceNumber =
- NewLogicalDeviceInfo->LogicalDeviceNumber;
- DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
- Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber];
- if (LogicalDeviceInfo == NULL)
- {
- DAC960_V2_PhysicalDevice_T PhysicalDevice;
- PhysicalDevice.Controller = 0;
- PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel;
- PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID;
- PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit;
- Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] =
- PhysicalDevice;
- LogicalDeviceInfo = kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T),
- GFP_ATOMIC);
- Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] =
- LogicalDeviceInfo;
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "Now Exists%s\n", Controller,
- LogicalDeviceNumber,
- Controller->ControllerNumber,
- LogicalDeviceNumber,
- (LogicalDeviceInfo != NULL
- ? "" : " - Allocation Failed"));
- if (LogicalDeviceInfo != NULL)
- {
- memset(LogicalDeviceInfo, 0,
- sizeof(DAC960_V2_LogicalDeviceInfo_T));
- DAC960_ComputeGenericDiskInfo(Controller);
- }
- }
- if (LogicalDeviceInfo != NULL)
- {
- unsigned long LogicalDeviceSize =
- NewLogicalDeviceInfo->ConfigurableDeviceSize;
- if (NewLogicalDeviceInfo->LogicalDeviceState !=
- LogicalDeviceInfo->LogicalDeviceState)
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "is now %s\n", Controller,
- LogicalDeviceNumber,
- Controller->ControllerNumber,
- LogicalDeviceNumber,
- (NewLogicalDeviceInfo->LogicalDeviceState
- == DAC960_V2_LogicalDevice_Online
- ? "ONLINE"
- : NewLogicalDeviceInfo->LogicalDeviceState
- == DAC960_V2_LogicalDevice_Critical
- ? "CRITICAL" : "OFFLINE"));
- if ((NewLogicalDeviceInfo->SoftErrors !=
- LogicalDeviceInfo->SoftErrors) ||
- (NewLogicalDeviceInfo->CommandsFailed !=
- LogicalDeviceInfo->CommandsFailed) ||
- (NewLogicalDeviceInfo->DeferredWriteErrors !=
- LogicalDeviceInfo->DeferredWriteErrors))
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) Errors: "
- "Soft = %d, Failed = %d, Deferred Write = %d\n",
- Controller, LogicalDeviceNumber,
- Controller->ControllerNumber,
- LogicalDeviceNumber,
- NewLogicalDeviceInfo->SoftErrors,
- NewLogicalDeviceInfo->CommandsFailed,
- NewLogicalDeviceInfo->DeferredWriteErrors);
- if (NewLogicalDeviceInfo->ConsistencyCheckInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Consistency Check",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->ConsistencyCheckBlockNumber,
- LogicalDeviceSize);
- else if (NewLogicalDeviceInfo->RebuildInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Rebuild",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->RebuildBlockNumber,
- LogicalDeviceSize);
- else if (NewLogicalDeviceInfo->BackgroundInitializationInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Background Initialization",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->BackgroundInitializationBlockNumber,
- LogicalDeviceSize);
- else if (NewLogicalDeviceInfo->ForegroundInitializationInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Foreground Initialization",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->ForegroundInitializationBlockNumber,
- LogicalDeviceSize);
- else if (NewLogicalDeviceInfo->DataMigrationInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Data Migration",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->DataMigrationBlockNumber,
- LogicalDeviceSize);
- else if (NewLogicalDeviceInfo->PatrolOperationInProgress)
- DAC960_V2_ReportProgress(Controller,
- "Patrol Operation",
- LogicalDeviceNumber,
- NewLogicalDeviceInfo
- ->PatrolOperationBlockNumber,
- LogicalDeviceSize);
- if (LogicalDeviceInfo->BackgroundInitializationInProgress &&
- !NewLogicalDeviceInfo->BackgroundInitializationInProgress)
- DAC960_Progress("Logical Drive %d (/dev/rd/c%dd%d) "
- "Background Initialization %s\n",
- Controller,
- LogicalDeviceNumber,
- Controller->ControllerNumber,
- LogicalDeviceNumber,
- (NewLogicalDeviceInfo->LogicalDeviceControl
- .LogicalDeviceInitialized
- ? "Completed" : "Failed"));
- memcpy(LogicalDeviceInfo, NewLogicalDeviceInfo,
- sizeof(DAC960_V2_LogicalDeviceInfo_T));
- }
- Controller->V2.LogicalDriveFoundDuringScan
- [LogicalDeviceNumber] = true;
- NewLogicalDeviceInfo->LogicalDeviceNumber++;
- }
- else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid)
- {
- int LogicalDriveNumber;
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < DAC960_MaxLogicalDrives;
- LogicalDriveNumber++)
- {
- DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
- Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
- if (LogicalDeviceInfo == NULL ||
- Controller->V2.LogicalDriveFoundDuringScan
- [LogicalDriveNumber])
- continue;
- DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
- "No Longer Exists\n", Controller,
- LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- Controller->V2.LogicalDeviceInformation
- [LogicalDriveNumber] = NULL;
- kfree(LogicalDeviceInfo);
- Controller->LogicalDriveInitiallyAccessible
- [LogicalDriveNumber] = false;
- DAC960_ComputeGenericDiskInfo(Controller);
- }
- Controller->V2.NeedLogicalDeviceInformation = false;
- }
- else if (CommandOpcode == DAC960_V2_SCSI_10_Passthru)
- {
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- Controller->V2.InquiryUnitSerialNumber[Controller->V2.PhysicalDeviceIndex - 1];
-
- if (CommandStatus != DAC960_V2_NormalCompletion) {
- memset(InquiryUnitSerialNumber,
- 0, sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
- } else
- memcpy(InquiryUnitSerialNumber,
- Controller->V2.NewInquiryUnitSerialNumber,
- sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
-
- Controller->V2.NeedDeviceSerialNumberInformation = false;
- }
-
- if (Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
- - Controller->V2.NextEventSequenceNumber > 0)
- {
- CommandMailbox->GetEvent.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->GetEvent.DataTransferSize = sizeof(DAC960_V2_Event_T);
- CommandMailbox->GetEvent.EventSequenceNumberHigh16 =
- Controller->V2.NextEventSequenceNumber >> 16;
- CommandMailbox->GetEvent.ControllerNumber = 0;
- CommandMailbox->GetEvent.IOCTL_Opcode =
- DAC960_V2_GetEvent;
- CommandMailbox->GetEvent.EventSequenceNumberLow16 =
- Controller->V2.NextEventSequenceNumber & 0xFFFF;
- CommandMailbox->GetEvent.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.EventDMA;
- CommandMailbox->GetEvent.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->GetEvent.DataTransferSize;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V2.NeedPhysicalDeviceInformation)
- {
- if (Controller->V2.NeedDeviceSerialNumberInformation)
- {
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
- Controller->V2.NewInquiryUnitSerialNumber;
- InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
-
- DAC960_V2_ConstructNewUnitSerialNumber(Controller, CommandMailbox,
- Controller->V2.NewPhysicalDeviceInformation->Channel,
- Controller->V2.NewPhysicalDeviceInformation->TargetID,
- Controller->V2.NewPhysicalDeviceInformation->LogicalUnit - 1);
-
-
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V2.StartPhysicalDeviceInformationScan)
- {
- Controller->V2.PhysicalDeviceIndex = 0;
- Controller->V2.NewPhysicalDeviceInformation->Channel = 0;
- Controller->V2.NewPhysicalDeviceInformation->TargetID = 0;
- Controller->V2.NewPhysicalDeviceInformation->LogicalUnit = 0;
- Controller->V2.StartPhysicalDeviceInformationScan = false;
- }
- CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
- sizeof(DAC960_V2_PhysicalDeviceInfo_T);
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit =
- Controller->V2.NewPhysicalDeviceInformation->LogicalUnit;
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID =
- Controller->V2.NewPhysicalDeviceInformation->TargetID;
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel =
- Controller->V2.NewPhysicalDeviceInformation->Channel;
- CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_GetPhysicalDeviceInfoValid;
- CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewPhysicalDeviceInformationDMA;
- CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->PhysicalDeviceInfo.DataTransferSize;
- DAC960_QueueCommand(Command);
- return;
- }
- if (Controller->V2.NeedLogicalDeviceInformation)
- {
- if (Controller->V2.StartLogicalDeviceInformationScan)
- {
- int LogicalDriveNumber;
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < DAC960_MaxLogicalDrives;
- LogicalDriveNumber++)
- Controller->V2.LogicalDriveFoundDuringScan
- [LogicalDriveNumber] = false;
- Controller->V2.NewLogicalDeviceInformation->LogicalDeviceNumber = 0;
- Controller->V2.StartLogicalDeviceInformationScan = false;
- }
- CommandMailbox->LogicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->LogicalDeviceInfo.DataTransferSize =
- sizeof(DAC960_V2_LogicalDeviceInfo_T);
- CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
- Controller->V2.NewLogicalDeviceInformation->LogicalDeviceNumber;
- CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_GetLogicalDeviceInfoValid;
- CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewLogicalDeviceInformationDMA;
- CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->LogicalDeviceInfo.DataTransferSize;
- DAC960_QueueCommand(Command);
- return;
- }
- Controller->MonitoringTimerCount++;
- Controller->MonitoringTimer.expires =
- jiffies + DAC960_HealthStatusMonitoringInterval;
- add_timer(&Controller->MonitoringTimer);
- }
- if (CommandType == DAC960_ImmediateCommand)
- {
- complete(Command->Completion);
- Command->Completion = NULL;
- return;
- }
- if (CommandType == DAC960_QueuedCommand)
- {
- DAC960_V2_KernelCommand_T *KernelCommand = Command->V2.KernelCommand;
- KernelCommand->CommandStatus = CommandStatus;
- KernelCommand->RequestSenseLength = Command->V2.RequestSenseLength;
- KernelCommand->DataTransferLength = Command->V2.DataTransferResidue;
- Command->V2.KernelCommand = NULL;
- DAC960_DeallocateCommand(Command);
- KernelCommand->CompletionFunction(KernelCommand);
- return;
- }
- /*
- Queue a Status Monitoring Command to the Controller using the just
- completed Command if one was deferred previously due to lack of a
- free Command when the Monitoring Timer Function was called.
- */
- if (Controller->MonitoringCommandDeferred)
- {
- Controller->MonitoringCommandDeferred = false;
- DAC960_V2_QueueMonitoringCommand(Command);
- return;
- }
- /*
- Deallocate the Command.
- */
- DAC960_DeallocateCommand(Command);
- /*
- Wake up any processes waiting on a free Command.
- */
- wake_up(&Controller->CommandWaitQueue);
-}
-
-/*
- DAC960_GEM_InterruptHandler handles hardware interrupts from DAC960 GEM Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_GEM_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_StatusMailbox_T *NextStatusMailbox;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_GEM_AcknowledgeInterrupt(ControllerBaseAddress);
- NextStatusMailbox = Controller->V2.NextStatusMailbox;
- while (NextStatusMailbox->Fields.CommandIdentifier > 0)
- {
- DAC960_V2_CommandIdentifier_T CommandIdentifier =
- NextStatusMailbox->Fields.CommandIdentifier;
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
- Command->V2.RequestSenseLength =
- NextStatusMailbox->Fields.RequestSenseLength;
- Command->V2.DataTransferResidue =
- NextStatusMailbox->Fields.DataTransferResidue;
- NextStatusMailbox->Words[0] = 0;
- if (++NextStatusMailbox > Controller->V2.LastStatusMailbox)
- NextStatusMailbox = Controller->V2.FirstStatusMailbox;
- DAC960_V2_ProcessCompletedCommand(Command);
- }
- Controller->V2.NextStatusMailbox = NextStatusMailbox;
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-/*
- DAC960_BA_InterruptHandler handles hardware interrupts from DAC960 BA Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_BA_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_StatusMailbox_T *NextStatusMailbox;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_BA_AcknowledgeInterrupt(ControllerBaseAddress);
- NextStatusMailbox = Controller->V2.NextStatusMailbox;
- while (NextStatusMailbox->Fields.CommandIdentifier > 0)
- {
- DAC960_V2_CommandIdentifier_T CommandIdentifier =
- NextStatusMailbox->Fields.CommandIdentifier;
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
- Command->V2.RequestSenseLength =
- NextStatusMailbox->Fields.RequestSenseLength;
- Command->V2.DataTransferResidue =
- NextStatusMailbox->Fields.DataTransferResidue;
- NextStatusMailbox->Words[0] = 0;
- if (++NextStatusMailbox > Controller->V2.LastStatusMailbox)
- NextStatusMailbox = Controller->V2.FirstStatusMailbox;
- DAC960_V2_ProcessCompletedCommand(Command);
- }
- Controller->V2.NextStatusMailbox = NextStatusMailbox;
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_LP_InterruptHandler handles hardware interrupts from DAC960 LP Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_LP_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V2_StatusMailbox_T *NextStatusMailbox;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_LP_AcknowledgeInterrupt(ControllerBaseAddress);
- NextStatusMailbox = Controller->V2.NextStatusMailbox;
- while (NextStatusMailbox->Fields.CommandIdentifier > 0)
- {
- DAC960_V2_CommandIdentifier_T CommandIdentifier =
- NextStatusMailbox->Fields.CommandIdentifier;
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
- Command->V2.RequestSenseLength =
- NextStatusMailbox->Fields.RequestSenseLength;
- Command->V2.DataTransferResidue =
- NextStatusMailbox->Fields.DataTransferResidue;
- NextStatusMailbox->Words[0] = 0;
- if (++NextStatusMailbox > Controller->V2.LastStatusMailbox)
- NextStatusMailbox = Controller->V2.FirstStatusMailbox;
- DAC960_V2_ProcessCompletedCommand(Command);
- }
- Controller->V2.NextStatusMailbox = NextStatusMailbox;
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_LA_InterruptHandler handles hardware interrupts from DAC960 LA Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_LA_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_StatusMailbox_T *NextStatusMailbox;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_LA_AcknowledgeInterrupt(ControllerBaseAddress);
- NextStatusMailbox = Controller->V1.NextStatusMailbox;
- while (NextStatusMailbox->Fields.Valid)
- {
- DAC960_V1_CommandIdentifier_T CommandIdentifier =
- NextStatusMailbox->Fields.CommandIdentifier;
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V1.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
- NextStatusMailbox->Word = 0;
- if (++NextStatusMailbox > Controller->V1.LastStatusMailbox)
- NextStatusMailbox = Controller->V1.FirstStatusMailbox;
- DAC960_V1_ProcessCompletedCommand(Command);
- }
- Controller->V1.NextStatusMailbox = NextStatusMailbox;
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_PG_InterruptHandler handles hardware interrupts from DAC960 PG Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_PG_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- DAC960_V1_StatusMailbox_T *NextStatusMailbox;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_PG_AcknowledgeInterrupt(ControllerBaseAddress);
- NextStatusMailbox = Controller->V1.NextStatusMailbox;
- while (NextStatusMailbox->Fields.Valid)
- {
- DAC960_V1_CommandIdentifier_T CommandIdentifier =
- NextStatusMailbox->Fields.CommandIdentifier;
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V1.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
- NextStatusMailbox->Word = 0;
- if (++NextStatusMailbox > Controller->V1.LastStatusMailbox)
- NextStatusMailbox = Controller->V1.FirstStatusMailbox;
- DAC960_V1_ProcessCompletedCommand(Command);
- }
- Controller->V1.NextStatusMailbox = NextStatusMailbox;
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_PD_InterruptHandler handles hardware interrupts from DAC960 PD Series
- Controllers.
-*/
-
-static irqreturn_t DAC960_PD_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while (DAC960_PD_StatusAvailableP(ControllerBaseAddress))
- {
- DAC960_V1_CommandIdentifier_T CommandIdentifier =
- DAC960_PD_ReadStatusCommandIdentifier(ControllerBaseAddress);
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- Command->V1.CommandStatus =
- DAC960_PD_ReadStatusRegister(ControllerBaseAddress);
- DAC960_PD_AcknowledgeInterrupt(ControllerBaseAddress);
- DAC960_PD_AcknowledgeStatus(ControllerBaseAddress);
- DAC960_V1_ProcessCompletedCommand(Command);
- }
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_P_InterruptHandler handles hardware interrupts from DAC960 P Series
- Controllers.
-
- Translations of DAC960_V1_Enquiry and DAC960_V1_GetDeviceState rely
- on the data having been placed into DAC960_Controller_T, rather than
- an arbitrary buffer.
-*/
-
-static irqreturn_t DAC960_P_InterruptHandler(int IRQ_Channel,
- void *DeviceIdentifier)
-{
- DAC960_Controller_T *Controller = DeviceIdentifier;
- void __iomem *ControllerBaseAddress = Controller->BaseAddress;
- unsigned long flags;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while (DAC960_PD_StatusAvailableP(ControllerBaseAddress))
- {
- DAC960_V1_CommandIdentifier_T CommandIdentifier =
- DAC960_PD_ReadStatusCommandIdentifier(ControllerBaseAddress);
- DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_CommandOpcode_T CommandOpcode =
- CommandMailbox->Common.CommandOpcode;
- Command->V1.CommandStatus =
- DAC960_PD_ReadStatusRegister(ControllerBaseAddress);
- DAC960_PD_AcknowledgeInterrupt(ControllerBaseAddress);
- DAC960_PD_AcknowledgeStatus(ControllerBaseAddress);
- switch (CommandOpcode)
- {
- case DAC960_V1_Enquiry_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Enquiry;
- DAC960_P_To_PD_TranslateEnquiry(Controller->V1.NewEnquiry);
- break;
- case DAC960_V1_GetDeviceState_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode =
- DAC960_V1_GetDeviceState;
- DAC960_P_To_PD_TranslateDeviceState(Controller->V1.NewDeviceState);
- break;
- case DAC960_V1_Read_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Read;
- DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_Write_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Write;
- DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_ReadWithScatterGather_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode =
- DAC960_V1_ReadWithScatterGather;
- DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
- break;
- case DAC960_V1_WriteWithScatterGather_Old:
- Command->V1.CommandMailbox.Common.CommandOpcode =
- DAC960_V1_WriteWithScatterGather;
- DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
- break;
- default:
- break;
- }
- DAC960_V1_ProcessCompletedCommand(Command);
- }
- /*
- Attempt to remove additional I/O Requests from the Controller's
- I/O Request Queue and queue them to the Controller.
- */
- DAC960_ProcessRequest(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return IRQ_HANDLED;
-}
-
-
-/*
- DAC960_V1_QueueMonitoringCommand queues a Monitoring Command to DAC960 V1
- Firmware Controllers.
-*/
-
-static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_MonitoringCommand;
- CommandMailbox->Type3.CommandOpcode = DAC960_V1_Enquiry;
- CommandMailbox->Type3.BusAddress = Controller->V1.NewEnquiryDMA;
- DAC960_QueueCommand(Command);
-}
-
-
-/*
- DAC960_V2_QueueMonitoringCommand queues a Monitoring Command to DAC960 V2
- Firmware Controllers.
-*/
-
-static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *Command)
-{
- DAC960_Controller_T *Controller = Command->Controller;
- DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_MonitoringCommand;
- CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->ControllerInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->ControllerInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->ControllerInfo.DataTransferSize =
- sizeof(DAC960_V2_ControllerInfo_T);
- CommandMailbox->ControllerInfo.ControllerNumber = 0;
- CommandMailbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo;
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewControllerInformationDMA;
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->ControllerInfo.DataTransferSize;
- DAC960_QueueCommand(Command);
-}
-
-
-/*
- DAC960_MonitoringTimerFunction is the timer function for monitoring
- the status of DAC960 Controllers.
-*/
-
-static void DAC960_MonitoringTimerFunction(struct timer_list *t)
-{
- DAC960_Controller_T *Controller = from_timer(Controller, t, MonitoringTimer);
- DAC960_Command_T *Command;
- unsigned long flags;
-
- if (Controller->FirmwareType == DAC960_V1_Controller)
- {
- spin_lock_irqsave(&Controller->queue_lock, flags);
- /*
- Queue a Status Monitoring Command to Controller.
- */
- Command = DAC960_AllocateCommand(Controller);
- if (Command != NULL)
- DAC960_V1_QueueMonitoringCommand(Command);
- else Controller->MonitoringCommandDeferred = true;
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- }
- else
- {
- DAC960_V2_ControllerInfo_T *ControllerInfo =
- &Controller->V2.ControllerInformation;
- unsigned int StatusChangeCounter =
- Controller->V2.HealthStatusBuffer->StatusChangeCounter;
- bool ForceMonitoringCommand = false;
- if (time_after(jiffies, Controller->SecondaryMonitoringTime
- + DAC960_SecondaryMonitoringInterval))
- {
- int LogicalDriveNumber;
- for (LogicalDriveNumber = 0;
- LogicalDriveNumber < DAC960_MaxLogicalDrives;
- LogicalDriveNumber++)
- {
- DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
- Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
- if (LogicalDeviceInfo == NULL) continue;
- if (!LogicalDeviceInfo->LogicalDeviceControl
- .LogicalDeviceInitialized)
- {
- ForceMonitoringCommand = true;
- break;
- }
- }
- Controller->SecondaryMonitoringTime = jiffies;
- }
- if (StatusChangeCounter == Controller->V2.StatusChangeCounter &&
- Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
- == Controller->V2.NextEventSequenceNumber &&
- (ControllerInfo->BackgroundInitializationsActive +
- ControllerInfo->LogicalDeviceInitializationsActive +
- ControllerInfo->PhysicalDeviceInitializationsActive +
- ControllerInfo->ConsistencyChecksActive +
- ControllerInfo->RebuildsActive +
- ControllerInfo->OnlineExpansionsActive == 0 ||
- time_before(jiffies, Controller->PrimaryMonitoringTime
- + DAC960_MonitoringTimerInterval)) &&
- !ForceMonitoringCommand)
- {
- Controller->MonitoringTimer.expires =
- jiffies + DAC960_HealthStatusMonitoringInterval;
- add_timer(&Controller->MonitoringTimer);
- return;
- }
- Controller->V2.StatusChangeCounter = StatusChangeCounter;
- Controller->PrimaryMonitoringTime = jiffies;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- /*
- Queue a Status Monitoring Command to Controller.
- */
- Command = DAC960_AllocateCommand(Controller);
- if (Command != NULL)
- DAC960_V2_QueueMonitoringCommand(Command);
- else Controller->MonitoringCommandDeferred = true;
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- /*
- Wake up any processes waiting on a Health Status Buffer change.
- */
- wake_up(&Controller->HealthStatusWaitQueue);
- }
-}
-
-/*
- DAC960_CheckStatusBuffer verifies that there is room to hold ByteCount
- additional bytes in the Combined Status Buffer and grows the buffer if
- necessary. It returns true if there is enough room and false otherwise.
-*/
-
-static bool DAC960_CheckStatusBuffer(DAC960_Controller_T *Controller,
- unsigned int ByteCount)
-{
- unsigned char *NewStatusBuffer;
- if (Controller->InitialStatusLength + 1 +
- Controller->CurrentStatusLength + ByteCount + 1 <=
- Controller->CombinedStatusBufferLength)
- return true;
- if (Controller->CombinedStatusBufferLength == 0)
- {
- unsigned int NewStatusBufferLength = DAC960_InitialStatusBufferSize;
- while (NewStatusBufferLength < ByteCount)
- NewStatusBufferLength *= 2;
- Controller->CombinedStatusBuffer = kmalloc(NewStatusBufferLength,
- GFP_ATOMIC);
- if (Controller->CombinedStatusBuffer == NULL) return false;
- Controller->CombinedStatusBufferLength = NewStatusBufferLength;
- return true;
- }
- NewStatusBuffer = kmalloc_array(2, Controller->CombinedStatusBufferLength,
- GFP_ATOMIC);
- if (NewStatusBuffer == NULL)
- {
- DAC960_Warning("Unable to expand Combined Status Buffer - Truncating\n",
- Controller);
- return false;
- }
- memcpy(NewStatusBuffer, Controller->CombinedStatusBuffer,
- Controller->CombinedStatusBufferLength);
- kfree(Controller->CombinedStatusBuffer);
- Controller->CombinedStatusBuffer = NewStatusBuffer;
- Controller->CombinedStatusBufferLength *= 2;
- Controller->CurrentStatusBuffer =
- &NewStatusBuffer[Controller->InitialStatusLength + 1];
- return true;
-}
-
-
-/*
- DAC960_Message prints Driver Messages.
-*/
-
-static void DAC960_Message(DAC960_MessageLevel_T MessageLevel,
- unsigned char *Format,
- DAC960_Controller_T *Controller,
- ...)
-{
- static unsigned char Buffer[DAC960_LineBufferSize];
- static bool BeginningOfLine = true;
- va_list Arguments;
- int Length = 0;
- va_start(Arguments, Controller);
- Length = vsprintf(Buffer, Format, Arguments);
- va_end(Arguments);
- if (Controller == NULL)
- printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
- DAC960_ControllerCount, Buffer);
- else if (MessageLevel == DAC960_AnnounceLevel ||
- MessageLevel == DAC960_InfoLevel)
- {
- if (!Controller->ControllerInitialized)
- {
- if (DAC960_CheckStatusBuffer(Controller, Length))
- {
- strcpy(&Controller->CombinedStatusBuffer
- [Controller->InitialStatusLength],
- Buffer);
- Controller->InitialStatusLength += Length;
- Controller->CurrentStatusBuffer =
- &Controller->CombinedStatusBuffer
- [Controller->InitialStatusLength + 1];
- }
- if (MessageLevel == DAC960_AnnounceLevel)
- {
- static int AnnouncementLines = 0;
- if (++AnnouncementLines <= 2)
- printk("%sDAC960: %s", DAC960_MessageLevelMap[MessageLevel],
- Buffer);
- }
- else
- {
- if (BeginningOfLine)
- {
- if (Buffer[0] != '\n' || Length > 1)
- printk("%sDAC960#%d: %s",
- DAC960_MessageLevelMap[MessageLevel],
- Controller->ControllerNumber, Buffer);
- }
- else printk("%s", Buffer);
- }
- }
- else if (DAC960_CheckStatusBuffer(Controller, Length))
- {
- strcpy(&Controller->CurrentStatusBuffer[
- Controller->CurrentStatusLength], Buffer);
- Controller->CurrentStatusLength += Length;
- }
- }
- else if (MessageLevel == DAC960_ProgressLevel)
- {
- strcpy(Controller->ProgressBuffer, Buffer);
- Controller->ProgressBufferLength = Length;
- if (Controller->EphemeralProgressMessage)
- {
- if (time_after_eq(jiffies, Controller->LastProgressReportTime
- + DAC960_ProgressReportingInterval))
- {
- printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
- Controller->ControllerNumber, Buffer);
- Controller->LastProgressReportTime = jiffies;
- }
- }
- else printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
- Controller->ControllerNumber, Buffer);
- }
- else if (MessageLevel == DAC960_UserCriticalLevel)
- {
- strcpy(&Controller->UserStatusBuffer[Controller->UserStatusLength],
- Buffer);
- Controller->UserStatusLength += Length;
- if (Buffer[0] != '\n' || Length > 1)
- printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
- Controller->ControllerNumber, Buffer);
- }
- else
- {
- if (BeginningOfLine)
- printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
- Controller->ControllerNumber, Buffer);
- else printk("%s", Buffer);
- }
- BeginningOfLine = (Buffer[Length-1] == '\n');
-}
-
-
-/*
- DAC960_ParsePhysicalDevice parses spaces followed by a Physical Device
- Channel:TargetID specification from a User Command string. It updates
- Channel and TargetID and returns true on success and false on failure.
-*/
-
-static bool DAC960_ParsePhysicalDevice(DAC960_Controller_T *Controller,
- char *UserCommandString,
- unsigned char *Channel,
- unsigned char *TargetID)
-{
- char *NewUserCommandString = UserCommandString;
- unsigned long XChannel, XTargetID;
- while (*UserCommandString == ' ') UserCommandString++;
- if (UserCommandString == NewUserCommandString)
- return false;
- XChannel = simple_strtoul(UserCommandString, &NewUserCommandString, 10);
- if (NewUserCommandString == UserCommandString ||
- *NewUserCommandString != ':' ||
- XChannel >= Controller->Channels)
- return false;
- UserCommandString = ++NewUserCommandString;
- XTargetID = simple_strtoul(UserCommandString, &NewUserCommandString, 10);
- if (NewUserCommandString == UserCommandString ||
- *NewUserCommandString != '\0' ||
- XTargetID >= Controller->Targets)
- return false;
- *Channel = XChannel;
- *TargetID = XTargetID;
- return true;
-}
-
-
-/*
- DAC960_ParseLogicalDrive parses spaces followed by a Logical Drive Number
- specification from a User Command string. It updates LogicalDriveNumber and
- returns true on success and false on failure.
-*/
-
-static bool DAC960_ParseLogicalDrive(DAC960_Controller_T *Controller,
- char *UserCommandString,
- unsigned char *LogicalDriveNumber)
-{
- char *NewUserCommandString = UserCommandString;
- unsigned long XLogicalDriveNumber;
- while (*UserCommandString == ' ') UserCommandString++;
- if (UserCommandString == NewUserCommandString)
- return false;
- XLogicalDriveNumber =
- simple_strtoul(UserCommandString, &NewUserCommandString, 10);
- if (NewUserCommandString == UserCommandString ||
- *NewUserCommandString != '\0' ||
- XLogicalDriveNumber > DAC960_MaxLogicalDrives - 1)
- return false;
- *LogicalDriveNumber = XLogicalDriveNumber;
- return true;
-}
-
-
-/*
- DAC960_V1_SetDeviceState sets the Device State for a Physical Device for
- DAC960 V1 Firmware Controllers.
-*/
-
-static void DAC960_V1_SetDeviceState(DAC960_Controller_T *Controller,
- DAC960_Command_T *Command,
- unsigned char Channel,
- unsigned char TargetID,
- DAC960_V1_PhysicalDeviceState_T
- DeviceState,
- const unsigned char *DeviceStateString)
-{
- DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
- CommandMailbox->Type3D.CommandOpcode = DAC960_V1_StartDevice;
- CommandMailbox->Type3D.Channel = Channel;
- CommandMailbox->Type3D.TargetID = TargetID;
- CommandMailbox->Type3D.DeviceState = DeviceState;
- CommandMailbox->Type3D.Modifier = 0;
- DAC960_ExecuteCommand(Command);
- switch (Command->V1.CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- DAC960_UserCritical("%s of Physical Device %d:%d Succeeded\n", Controller,
- DeviceStateString, Channel, TargetID);
- break;
- case DAC960_V1_UnableToStartDevice:
- DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
- "Unable to Start Device\n", Controller,
- DeviceStateString, Channel, TargetID);
- break;
- case DAC960_V1_NoDeviceAtAddress:
- DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
- "No Device at Address\n", Controller,
- DeviceStateString, Channel, TargetID);
- break;
- case DAC960_V1_InvalidChannelOrTargetOrModifier:
- DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
- "Invalid Channel or Target or Modifier\n",
- Controller, DeviceStateString, Channel, TargetID);
- break;
- case DAC960_V1_ChannelBusy:
- DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
- "Channel Busy\n", Controller,
- DeviceStateString, Channel, TargetID);
- break;
- default:
- DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
- "Unexpected Status %04X\n", Controller,
- DeviceStateString, Channel, TargetID,
- Command->V1.CommandStatus);
- break;
- }
-}
-
-
-/*
- DAC960_V1_ExecuteUserCommand executes a User Command for DAC960 V1 Firmware
- Controllers.
-*/
-
-static bool DAC960_V1_ExecuteUserCommand(DAC960_Controller_T *Controller,
- unsigned char *UserCommand)
-{
- DAC960_Command_T *Command;
- DAC960_V1_CommandMailbox_T *CommandMailbox;
- unsigned long flags;
- unsigned char Channel, TargetID, LogicalDriveNumber;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
- DAC960_WaitForCommand(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- Controller->UserStatusLength = 0;
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox = &Command->V1.CommandMailbox;
- if (strcmp(UserCommand, "flush-cache") == 0)
- {
- CommandMailbox->Type3.CommandOpcode = DAC960_V1_Flush;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Cache Flush Completed\n", Controller);
- }
- else if (strncmp(UserCommand, "kill", 4) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[4],
- &Channel, &TargetID))
- {
- DAC960_V1_DeviceState_T *DeviceState =
- &Controller->V1.DeviceState[Channel][TargetID];
- if (DeviceState->Present &&
- DeviceState->DeviceType == DAC960_V1_DiskType &&
- DeviceState->DeviceState != DAC960_V1_Device_Dead)
- DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
- DAC960_V1_Device_Dead, "Kill");
- else DAC960_UserCritical("Kill of Physical Device %d:%d Illegal\n",
- Controller, Channel, TargetID);
- }
- else if (strncmp(UserCommand, "make-online", 11) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[11],
- &Channel, &TargetID))
- {
- DAC960_V1_DeviceState_T *DeviceState =
- &Controller->V1.DeviceState[Channel][TargetID];
- if (DeviceState->Present &&
- DeviceState->DeviceType == DAC960_V1_DiskType &&
- DeviceState->DeviceState == DAC960_V1_Device_Dead)
- DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
- DAC960_V1_Device_Online, "Make Online");
- else DAC960_UserCritical("Make Online of Physical Device %d:%d Illegal\n",
- Controller, Channel, TargetID);
-
- }
- else if (strncmp(UserCommand, "make-standby", 12) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[12],
- &Channel, &TargetID))
- {
- DAC960_V1_DeviceState_T *DeviceState =
- &Controller->V1.DeviceState[Channel][TargetID];
- if (DeviceState->Present &&
- DeviceState->DeviceType == DAC960_V1_DiskType &&
- DeviceState->DeviceState == DAC960_V1_Device_Dead)
- DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
- DAC960_V1_Device_Standby, "Make Standby");
- else DAC960_UserCritical("Make Standby of Physical "
- "Device %d:%d Illegal\n",
- Controller, Channel, TargetID);
- }
- else if (strncmp(UserCommand, "rebuild", 7) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[7],
- &Channel, &TargetID))
- {
- CommandMailbox->Type3D.CommandOpcode = DAC960_V1_RebuildAsync;
- CommandMailbox->Type3D.Channel = Channel;
- CommandMailbox->Type3D.TargetID = TargetID;
- DAC960_ExecuteCommand(Command);
- switch (Command->V1.CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Initiated\n",
- Controller, Channel, TargetID);
- break;
- case DAC960_V1_AttemptToRebuildOnlineDrive:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
- "Attempt to Rebuild Online or "
- "Unresponsive Drive\n",
- Controller, Channel, TargetID);
- break;
- case DAC960_V1_NewDiskFailedDuringRebuild:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
- "New Disk Failed During Rebuild\n",
- Controller, Channel, TargetID);
- break;
- case DAC960_V1_InvalidDeviceAddress:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
- "Invalid Device Address\n",
- Controller, Channel, TargetID);
- break;
- case DAC960_V1_RebuildOrCheckAlreadyInProgress:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
- "Rebuild or Consistency Check Already "
- "in Progress\n", Controller, Channel, TargetID);
- break;
- default:
- DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
- "Unexpected Status %04X\n", Controller,
- Channel, TargetID, Command->V1.CommandStatus);
- break;
- }
- }
- else if (strncmp(UserCommand, "check-consistency", 17) == 0 &&
- DAC960_ParseLogicalDrive(Controller, &UserCommand[17],
- &LogicalDriveNumber))
- {
- CommandMailbox->Type3C.CommandOpcode = DAC960_V1_CheckConsistencyAsync;
- CommandMailbox->Type3C.LogicalDriveNumber = LogicalDriveNumber;
- CommandMailbox->Type3C.AutoRestore = true;
- DAC960_ExecuteCommand(Command);
- switch (Command->V1.CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) Initiated\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- break;
- case DAC960_V1_DependentDiskIsDead:
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) Failed - "
- "Dependent Physical Device is DEAD\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- break;
- case DAC960_V1_InvalidOrNonredundantLogicalDrive:
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) Failed - "
- "Invalid or Nonredundant Logical Drive\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- break;
- case DAC960_V1_RebuildOrCheckAlreadyInProgress:
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) Failed - Rebuild or "
- "Consistency Check Already in Progress\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber);
- break;
- default:
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) Failed - "
- "Unexpected Status %04X\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber, Command->V1.CommandStatus);
- break;
- }
- }
- else if (strcmp(UserCommand, "cancel-rebuild") == 0 ||
- strcmp(UserCommand, "cancel-consistency-check") == 0)
- {
- /*
- the OldRebuildRateConstant is never actually used
- once its value is retrieved from the controller.
- */
- unsigned char *OldRebuildRateConstant;
- dma_addr_t OldRebuildRateConstantDMA;
-
- OldRebuildRateConstant = pci_alloc_consistent( Controller->PCIDevice,
- sizeof(char), &OldRebuildRateConstantDMA);
- if (OldRebuildRateConstant == NULL) {
- DAC960_UserCritical("Cancellation of Rebuild or "
- "Consistency Check Failed - "
- "Out of Memory",
- Controller);
- goto failure;
- }
- CommandMailbox->Type3R.CommandOpcode = DAC960_V1_RebuildControl;
- CommandMailbox->Type3R.RebuildRateConstant = 0xFF;
- CommandMailbox->Type3R.BusAddress = OldRebuildRateConstantDMA;
- DAC960_ExecuteCommand(Command);
- switch (Command->V1.CommandStatus)
- {
- case DAC960_V1_NormalCompletion:
- DAC960_UserCritical("Rebuild or Consistency Check Cancelled\n",
- Controller);
- break;
- default:
- DAC960_UserCritical("Cancellation of Rebuild or "
- "Consistency Check Failed - "
- "Unexpected Status %04X\n",
- Controller, Command->V1.CommandStatus);
- break;
- }
-failure:
- pci_free_consistent(Controller->PCIDevice, sizeof(char),
- OldRebuildRateConstant, OldRebuildRateConstantDMA);
- }
- else DAC960_UserCritical("Illegal User Command: '%s'\n",
- Controller, UserCommand);
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_DeallocateCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return true;
-}
-
-
-/*
- DAC960_V2_TranslatePhysicalDevice translates a Physical Device Channel and
- TargetID into a Logical Device. It returns true on success and false
- on failure.
-*/
-
-static bool DAC960_V2_TranslatePhysicalDevice(DAC960_Command_T *Command,
- unsigned char Channel,
- unsigned char TargetID,
- unsigned short
- *LogicalDeviceNumber)
-{
- DAC960_V2_CommandMailbox_T SavedCommandMailbox, *CommandMailbox;
- DAC960_Controller_T *Controller = Command->Controller;
-
- CommandMailbox = &Command->V2.CommandMailbox;
- memcpy(&SavedCommandMailbox, CommandMailbox,
- sizeof(DAC960_V2_CommandMailbox_T));
-
- CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->PhysicalDeviceInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->PhysicalDeviceInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
- sizeof(DAC960_V2_PhysicalToLogicalDevice_T);
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID;
- CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel;
- CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_TranslatePhysicalToLogicalDevice;
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.PhysicalToLogicalDeviceDMA;
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->Common.DataTransferSize;
-
- DAC960_ExecuteCommand(Command);
- *LogicalDeviceNumber = Controller->V2.PhysicalToLogicalDevice->LogicalDeviceNumber;
-
- memcpy(CommandMailbox, &SavedCommandMailbox,
- sizeof(DAC960_V2_CommandMailbox_T));
- return (Command->V2.CommandStatus == DAC960_V2_NormalCompletion);
-}
-
-
-/*
- DAC960_V2_ExecuteUserCommand executes a User Command for DAC960 V2 Firmware
- Controllers.
-*/
-
-static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
- unsigned char *UserCommand)
-{
- DAC960_Command_T *Command;
- DAC960_V2_CommandMailbox_T *CommandMailbox;
- unsigned long flags;
- unsigned char Channel, TargetID, LogicalDriveNumber;
- unsigned short LogicalDeviceNumber;
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
- DAC960_WaitForCommand(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- Controller->UserStatusLength = 0;
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox = &Command->V2.CommandMailbox;
- CommandMailbox->Common.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->Common.CommandControlBits.DataTransferControllerToHost = true;
- CommandMailbox->Common.CommandControlBits.NoAutoRequestSense = true;
- if (strcmp(UserCommand, "flush-cache") == 0)
- {
- CommandMailbox->DeviceOperation.IOCTL_Opcode = DAC960_V2_PauseDevice;
- CommandMailbox->DeviceOperation.OperationDevice =
- DAC960_V2_RAID_Controller;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Cache Flush Completed\n", Controller);
- }
- else if (strncmp(UserCommand, "kill", 4) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[4],
- &Channel, &TargetID) &&
- DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
- &LogicalDeviceNumber))
- {
- CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->SetDeviceState.IOCTL_Opcode =
- DAC960_V2_SetDeviceState;
- CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
- DAC960_V2_Device_Dead;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Kill of Physical Device %d:%d %s\n",
- Controller, Channel, TargetID,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Succeeded" : "Failed"));
- }
- else if (strncmp(UserCommand, "make-online", 11) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[11],
- &Channel, &TargetID) &&
- DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
- &LogicalDeviceNumber))
- {
- CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->SetDeviceState.IOCTL_Opcode =
- DAC960_V2_SetDeviceState;
- CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
- DAC960_V2_Device_Online;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Make Online of Physical Device %d:%d %s\n",
- Controller, Channel, TargetID,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Succeeded" : "Failed"));
- }
- else if (strncmp(UserCommand, "make-standby", 12) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[12],
- &Channel, &TargetID) &&
- DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
- &LogicalDeviceNumber))
- {
- CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->SetDeviceState.IOCTL_Opcode =
- DAC960_V2_SetDeviceState;
- CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
- DAC960_V2_Device_Standby;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Make Standby of Physical Device %d:%d %s\n",
- Controller, Channel, TargetID,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Succeeded" : "Failed"));
- }
- else if (strncmp(UserCommand, "rebuild", 7) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[7],
- &Channel, &TargetID) &&
- DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
- &LogicalDeviceNumber))
- {
- CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_RebuildDeviceStart;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Rebuild of Physical Device %d:%d %s\n",
- Controller, Channel, TargetID,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Initiated" : "Not Initiated"));
- }
- else if (strncmp(UserCommand, "cancel-rebuild", 14) == 0 &&
- DAC960_ParsePhysicalDevice(Controller, &UserCommand[14],
- &Channel, &TargetID) &&
- DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
- &LogicalDeviceNumber))
- {
- CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
- LogicalDeviceNumber;
- CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
- DAC960_V2_RebuildDeviceStop;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Rebuild of Physical Device %d:%d %s\n",
- Controller, Channel, TargetID,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Cancelled" : "Not Cancelled"));
- }
- else if (strncmp(UserCommand, "check-consistency", 17) == 0 &&
- DAC960_ParseLogicalDrive(Controller, &UserCommand[17],
- &LogicalDriveNumber))
- {
- CommandMailbox->ConsistencyCheck.LogicalDevice.LogicalDeviceNumber =
- LogicalDriveNumber;
- CommandMailbox->ConsistencyCheck.IOCTL_Opcode =
- DAC960_V2_ConsistencyCheckStart;
- CommandMailbox->ConsistencyCheck.RestoreConsistency = true;
- CommandMailbox->ConsistencyCheck.InitializedAreaOnly = false;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) %s\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Initiated" : "Not Initiated"));
- }
- else if (strncmp(UserCommand, "cancel-consistency-check", 24) == 0 &&
- DAC960_ParseLogicalDrive(Controller, &UserCommand[24],
- &LogicalDriveNumber))
- {
- CommandMailbox->ConsistencyCheck.LogicalDevice.LogicalDeviceNumber =
- LogicalDriveNumber;
- CommandMailbox->ConsistencyCheck.IOCTL_Opcode =
- DAC960_V2_ConsistencyCheckStop;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Consistency Check of Logical Drive %d "
- "(/dev/rd/c%dd%d) %s\n",
- Controller, LogicalDriveNumber,
- Controller->ControllerNumber,
- LogicalDriveNumber,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Cancelled" : "Not Cancelled"));
- }
- else if (strcmp(UserCommand, "perform-discovery") == 0)
- {
- CommandMailbox->Common.IOCTL_Opcode = DAC960_V2_StartDiscovery;
- DAC960_ExecuteCommand(Command);
- DAC960_UserCritical("Discovery %s\n", Controller,
- (Command->V2.CommandStatus
- == DAC960_V2_NormalCompletion
- ? "Initiated" : "Not Initiated"));
- if (Command->V2.CommandStatus == DAC960_V2_NormalCompletion)
- {
- CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
- CommandMailbox->ControllerInfo.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->ControllerInfo.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->ControllerInfo.DataTransferSize =
- sizeof(DAC960_V2_ControllerInfo_T);
- CommandMailbox->ControllerInfo.ControllerNumber = 0;
- CommandMailbox->ControllerInfo.IOCTL_Opcode =
- DAC960_V2_GetControllerInfo;
- /*
- * How does this NOT race with the queued Monitoring
- * usage of this structure?
- */
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer =
- Controller->V2.NewControllerInformationDMA;
- CommandMailbox->ControllerInfo.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->ControllerInfo.DataTransferSize;
- while (1) {
- DAC960_ExecuteCommand(Command);
- if (!Controller->V2.NewControllerInformation->PhysicalScanActive)
- break;
- msleep(1000);
- }
- DAC960_UserCritical("Discovery Completed\n", Controller);
- }
- }
- else if (strcmp(UserCommand, "suppress-enclosure-messages") == 0)
- Controller->SuppressEnclosureMessages = true;
- else DAC960_UserCritical("Illegal User Command: '%s'\n",
- Controller, UserCommand);
-
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_DeallocateCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- return true;
-}
-
-static int __maybe_unused dac960_proc_show(struct seq_file *m, void *v)
-{
- unsigned char *StatusMessage = "OK\n";
- int ControllerNumber;
- for (ControllerNumber = 0;
- ControllerNumber < DAC960_ControllerCount;
- ControllerNumber++)
- {
- DAC960_Controller_T *Controller = DAC960_Controllers[ControllerNumber];
- if (Controller == NULL) continue;
- if (Controller->MonitoringAlertMode)
- {
- StatusMessage = "ALERT\n";
- break;
- }
- }
- seq_puts(m, StatusMessage);
- return 0;
-}
-
-static int __maybe_unused dac960_initial_status_proc_show(struct seq_file *m,
- void *v)
-{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
- seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer);
- return 0;
-}
-
-static int __maybe_unused dac960_current_status_proc_show(struct seq_file *m,
- void *v)
-{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private;
- unsigned char *StatusMessage =
- "No Rebuild or Consistency Check in Progress\n";
- int ProgressMessageLength = strlen(StatusMessage);
- if (jiffies != Controller->LastCurrentStatusTime)
- {
- Controller->CurrentStatusLength = 0;
- DAC960_AnnounceDriver(Controller);
- DAC960_ReportControllerConfiguration(Controller);
- DAC960_ReportDeviceConfiguration(Controller);
- if (Controller->ProgressBufferLength > 0)
- ProgressMessageLength = Controller->ProgressBufferLength;
- if (DAC960_CheckStatusBuffer(Controller, 2 + ProgressMessageLength))
- {
- unsigned char *CurrentStatusBuffer = Controller->CurrentStatusBuffer;
- CurrentStatusBuffer[Controller->CurrentStatusLength++] = ' ';
- CurrentStatusBuffer[Controller->CurrentStatusLength++] = ' ';
- if (Controller->ProgressBufferLength > 0)
- strcpy(&CurrentStatusBuffer[Controller->CurrentStatusLength],
- Controller->ProgressBuffer);
- else
- strcpy(&CurrentStatusBuffer[Controller->CurrentStatusLength],
- StatusMessage);
- Controller->CurrentStatusLength += ProgressMessageLength;
- }
- Controller->LastCurrentStatusTime = jiffies;
- }
- seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer);
- return 0;
-}
-
-static int dac960_user_command_proc_show(struct seq_file *m, void *v)
-{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
-
- seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer);
- return 0;
-}
-
-static int dac960_user_command_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dac960_user_command_proc_show, PDE_DATA(inode));
-}
-
-static ssize_t dac960_user_command_proc_write(struct file *file,
- const char __user *Buffer,
- size_t Count, loff_t *pos)
-{
- DAC960_Controller_T *Controller = PDE_DATA(file_inode(file));
- unsigned char CommandBuffer[80];
- int Length;
- if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
- if (copy_from_user(CommandBuffer, Buffer, Count)) return -EFAULT;
- CommandBuffer[Count] = '\0';
- Length = strlen(CommandBuffer);
- if (Length > 0 && CommandBuffer[Length-1] == '\n')
- CommandBuffer[--Length] = '\0';
- if (Controller->FirmwareType == DAC960_V1_Controller)
- return (DAC960_V1_ExecuteUserCommand(Controller, CommandBuffer)
- ? Count : -EBUSY);
- else
- return (DAC960_V2_ExecuteUserCommand(Controller, CommandBuffer)
- ? Count : -EBUSY);
-}
-
-static const struct file_operations dac960_user_command_proc_fops = {
- .owner = THIS_MODULE,
- .open = dac960_user_command_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = dac960_user_command_proc_write,
-};
-
-/*
- DAC960_CreateProcEntries creates the /proc/rd/... entries for the
- DAC960 Driver.
-*/
-
-static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
-{
- struct proc_dir_entry *ControllerProcEntry;
-
- if (DAC960_ProcDirectoryEntry == NULL) {
- DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
- proc_create_single("status", 0, DAC960_ProcDirectoryEntry,
- dac960_proc_show);
- }
-
- snprintf(Controller->ControllerName, sizeof(Controller->ControllerName),
- "c%d", Controller->ControllerNumber);
- ControllerProcEntry = proc_mkdir(Controller->ControllerName,
- DAC960_ProcDirectoryEntry);
- proc_create_single_data("initial_status", 0, ControllerProcEntry,
- dac960_initial_status_proc_show, Controller);
- proc_create_single_data("current_status", 0, ControllerProcEntry,
- dac960_current_status_proc_show, Controller);
- proc_create_data("user_command", 0600, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
- Controller->ControllerProcEntry = ControllerProcEntry;
-}
-
-
-/*
- DAC960_DestroyProcEntries destroys the /proc/rd/... entries for the
- DAC960 Driver.
-*/
-
-static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
-{
- if (Controller->ControllerProcEntry == NULL)
- return;
- remove_proc_entry("initial_status", Controller->ControllerProcEntry);
- remove_proc_entry("current_status", Controller->ControllerProcEntry);
- remove_proc_entry("user_command", Controller->ControllerProcEntry);
- remove_proc_entry(Controller->ControllerName, DAC960_ProcDirectoryEntry);
- Controller->ControllerProcEntry = NULL;
-}
-
-#ifdef DAC960_GAM_MINOR
-
-static long DAC960_gam_get_controller_info(DAC960_ControllerInfo_T __user *UserSpaceControllerInfo)
-{
- DAC960_ControllerInfo_T ControllerInfo;
- DAC960_Controller_T *Controller;
- int ControllerNumber;
- long ErrorCode;
-
- if (UserSpaceControllerInfo == NULL)
- ErrorCode = -EINVAL;
- else ErrorCode = get_user(ControllerNumber,
- &UserSpaceControllerInfo->ControllerNumber);
- if (ErrorCode != 0)
- goto out;
- ErrorCode = -ENXIO;
- if (ControllerNumber < 0 ||
- ControllerNumber > DAC960_ControllerCount - 1) {
- goto out;
- }
- Controller = DAC960_Controllers[ControllerNumber];
- if (Controller == NULL)
- goto out;
- memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
- ControllerInfo.ControllerNumber = ControllerNumber;
- ControllerInfo.FirmwareType = Controller->FirmwareType;
- ControllerInfo.Channels = Controller->Channels;
- ControllerInfo.Targets = Controller->Targets;
- ControllerInfo.PCI_Bus = Controller->Bus;
- ControllerInfo.PCI_Device = Controller->Device;
- ControllerInfo.PCI_Function = Controller->Function;
- ControllerInfo.IRQ_Channel = Controller->IRQ_Channel;
- ControllerInfo.PCI_Address = Controller->PCI_Address;
- strcpy(ControllerInfo.ModelName, Controller->ModelName);
- strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
- ErrorCode = (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
- sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
-out:
- return ErrorCode;
-}
-
-static long DAC960_gam_v1_execute_command(DAC960_V1_UserCommand_T __user *UserSpaceUserCommand)
-{
- DAC960_V1_UserCommand_T UserCommand;
- DAC960_Controller_T *Controller;
- DAC960_Command_T *Command = NULL;
- DAC960_V1_CommandOpcode_T CommandOpcode;
- DAC960_V1_CommandStatus_T CommandStatus;
- DAC960_V1_DCDB_T DCDB;
- DAC960_V1_DCDB_T *DCDB_IOBUF = NULL;
- dma_addr_t DCDB_IOBUFDMA;
- unsigned long flags;
- int ControllerNumber, DataTransferLength;
- unsigned char *DataTransferBuffer = NULL;
- dma_addr_t DataTransferBufferDMA;
- long ErrorCode;
-
- if (UserSpaceUserCommand == NULL) {
- ErrorCode = -EINVAL;
- goto out;
- }
- if (copy_from_user(&UserCommand, UserSpaceUserCommand,
- sizeof(DAC960_V1_UserCommand_T))) {
- ErrorCode = -EFAULT;
- goto out;
- }
- ControllerNumber = UserCommand.ControllerNumber;
- ErrorCode = -ENXIO;
- if (ControllerNumber < 0 ||
- ControllerNumber > DAC960_ControllerCount - 1)
- goto out;
- Controller = DAC960_Controllers[ControllerNumber];
- if (Controller == NULL)
- goto out;
- ErrorCode = -EINVAL;
- if (Controller->FirmwareType != DAC960_V1_Controller)
- goto out;
- CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
- DataTransferLength = UserCommand.DataTransferLength;
- if (CommandOpcode & 0x80)
- goto out;
- if (CommandOpcode == DAC960_V1_DCDB)
- {
- if (copy_from_user(&DCDB, UserCommand.DCDB,
- sizeof(DAC960_V1_DCDB_T))) {
- ErrorCode = -EFAULT;
- goto out;
- }
- if (DCDB.Channel >= DAC960_V1_MaxChannels)
- goto out;
- if (!((DataTransferLength == 0 &&
- DCDB.Direction
- == DAC960_V1_DCDB_NoDataTransfer) ||
- (DataTransferLength > 0 &&
- DCDB.Direction
- == DAC960_V1_DCDB_DataTransferDeviceToSystem) ||
- (DataTransferLength < 0 &&
- DCDB.Direction
- == DAC960_V1_DCDB_DataTransferSystemToDevice)))
- goto out;
- if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
- != abs(DataTransferLength))
- goto out;
- DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
- sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
- if (DCDB_IOBUF == NULL) {
- ErrorCode = -ENOMEM;
- goto out;
- }
- }
- ErrorCode = -ENOMEM;
- if (DataTransferLength > 0)
- {
- DataTransferBuffer = pci_zalloc_consistent(Controller->PCIDevice,
- DataTransferLength,
- &DataTransferBufferDMA);
- if (DataTransferBuffer == NULL)
- goto out;
- }
- else if (DataTransferLength < 0)
- {
- DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
- -DataTransferLength, &DataTransferBufferDMA);
- if (DataTransferBuffer == NULL)
- goto out;
- if (copy_from_user(DataTransferBuffer,
- UserCommand.DataTransferBuffer,
- -DataTransferLength)) {
- ErrorCode = -EFAULT;
- goto out;
- }
- }
- if (CommandOpcode == DAC960_V1_DCDB)
- {
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
- DAC960_WaitForCommand(Controller);
- while (Controller->V1.DirectCommandActive[DCDB.Channel]
- [DCDB.TargetID])
- {
- spin_unlock_irq(&Controller->queue_lock);
- __wait_event(Controller->CommandWaitQueue,
- !Controller->V1.DirectCommandActive
- [DCDB.Channel][DCDB.TargetID]);
- spin_lock_irq(&Controller->queue_lock);
- }
- Controller->V1.DirectCommandActive[DCDB.Channel]
- [DCDB.TargetID] = true;
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- memcpy(&Command->V1.CommandMailbox, &UserCommand.CommandMailbox,
- sizeof(DAC960_V1_CommandMailbox_T));
- Command->V1.CommandMailbox.Type3.BusAddress = DCDB_IOBUFDMA;
- DCDB.BusAddress = DataTransferBufferDMA;
- memcpy(DCDB_IOBUF, &DCDB, sizeof(DAC960_V1_DCDB_T));
- }
- else
- {
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
- DAC960_WaitForCommand(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- DAC960_V1_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- memcpy(&Command->V1.CommandMailbox, &UserCommand.CommandMailbox,
- sizeof(DAC960_V1_CommandMailbox_T));
- if (DataTransferBuffer != NULL)
- Command->V1.CommandMailbox.Type3.BusAddress =
- DataTransferBufferDMA;
- }
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V1.CommandStatus;
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_DeallocateCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- if (DataTransferLength > 0)
- {
- if (copy_to_user(UserCommand.DataTransferBuffer,
- DataTransferBuffer, DataTransferLength)) {
- ErrorCode = -EFAULT;
- goto Failure1;
- }
- }
- if (CommandOpcode == DAC960_V1_DCDB)
- {
- /*
- I don't believe Target or Channel in the DCDB_IOBUF
- should be any different from the contents of DCDB.
- */
- Controller->V1.DirectCommandActive[DCDB.Channel]
- [DCDB.TargetID] = false;
- if (copy_to_user(UserCommand.DCDB, DCDB_IOBUF,
- sizeof(DAC960_V1_DCDB_T))) {
- ErrorCode = -EFAULT;
- goto Failure1;
- }
- }
- ErrorCode = CommandStatus;
- Failure1:
- if (DataTransferBuffer != NULL)
- pci_free_consistent(Controller->PCIDevice, abs(DataTransferLength),
- DataTransferBuffer, DataTransferBufferDMA);
- if (DCDB_IOBUF != NULL)
- pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
- DCDB_IOBUF, DCDB_IOBUFDMA);
- out:
- return ErrorCode;
-}
-
-static long DAC960_gam_v2_execute_command(DAC960_V2_UserCommand_T __user *UserSpaceUserCommand)
-{
- DAC960_V2_UserCommand_T UserCommand;
- DAC960_Controller_T *Controller;
- DAC960_Command_T *Command = NULL;
- DAC960_V2_CommandMailbox_T *CommandMailbox;
- DAC960_V2_CommandStatus_T CommandStatus;
- unsigned long flags;
- int ControllerNumber, DataTransferLength;
- int DataTransferResidue, RequestSenseLength;
- unsigned char *DataTransferBuffer = NULL;
- dma_addr_t DataTransferBufferDMA;
- unsigned char *RequestSenseBuffer = NULL;
- dma_addr_t RequestSenseBufferDMA;
- long ErrorCode = -EINVAL;
-
- if (UserSpaceUserCommand == NULL)
- goto out;
- if (copy_from_user(&UserCommand, UserSpaceUserCommand,
- sizeof(DAC960_V2_UserCommand_T))) {
- ErrorCode = -EFAULT;
- goto out;
- }
- ErrorCode = -ENXIO;
- ControllerNumber = UserCommand.ControllerNumber;
- if (ControllerNumber < 0 ||
- ControllerNumber > DAC960_ControllerCount - 1)
- goto out;
- Controller = DAC960_Controllers[ControllerNumber];
- if (Controller == NULL)
- goto out;
- if (Controller->FirmwareType != DAC960_V2_Controller){
- ErrorCode = -EINVAL;
- goto out;
- }
- DataTransferLength = UserCommand.DataTransferLength;
- ErrorCode = -ENOMEM;
- if (DataTransferLength > 0)
- {
- DataTransferBuffer = pci_zalloc_consistent(Controller->PCIDevice,
- DataTransferLength,
- &DataTransferBufferDMA);
- if (DataTransferBuffer == NULL)
- goto out;
- }
- else if (DataTransferLength < 0)
- {
- DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
- -DataTransferLength, &DataTransferBufferDMA);
- if (DataTransferBuffer == NULL)
- goto out;
- if (copy_from_user(DataTransferBuffer,
- UserCommand.DataTransferBuffer,
- -DataTransferLength)) {
- ErrorCode = -EFAULT;
- goto Failure2;
- }
- }
- RequestSenseLength = UserCommand.RequestSenseLength;
- if (RequestSenseLength > 0)
- {
- RequestSenseBuffer = pci_zalloc_consistent(Controller->PCIDevice,
- RequestSenseLength,
- &RequestSenseBufferDMA);
- if (RequestSenseBuffer == NULL)
- {
- ErrorCode = -ENOMEM;
- goto Failure2;
- }
- }
- spin_lock_irqsave(&Controller->queue_lock, flags);
- while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
- DAC960_WaitForCommand(Controller);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- DAC960_V2_ClearCommand(Command);
- Command->CommandType = DAC960_ImmediateCommand;
- CommandMailbox = &Command->V2.CommandMailbox;
- memcpy(CommandMailbox, &UserCommand.CommandMailbox,
- sizeof(DAC960_V2_CommandMailbox_T));
- CommandMailbox->Common.CommandControlBits
- .AdditionalScatterGatherListMemory = false;
- CommandMailbox->Common.CommandControlBits
- .NoAutoRequestSense = true;
- CommandMailbox->Common.DataTransferSize = 0;
- CommandMailbox->Common.DataTransferPageNumber = 0;
- memset(&CommandMailbox->Common.DataTransferMemoryAddress, 0,
- sizeof(DAC960_V2_DataTransferMemoryAddress_T));
- if (DataTransferLength != 0)
- {
- if (DataTransferLength > 0)
- {
- CommandMailbox->Common.CommandControlBits
- .DataTransferControllerToHost = true;
- CommandMailbox->Common.DataTransferSize = DataTransferLength;
- }
- else
- {
- CommandMailbox->Common.CommandControlBits
- .DataTransferControllerToHost = false;
- CommandMailbox->Common.DataTransferSize = -DataTransferLength;
- }
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentDataPointer = DataTransferBufferDMA;
- CommandMailbox->Common.DataTransferMemoryAddress
- .ScatterGatherSegments[0]
- .SegmentByteCount =
- CommandMailbox->Common.DataTransferSize;
- }
- if (RequestSenseLength > 0)
- {
- CommandMailbox->Common.CommandControlBits
- .NoAutoRequestSense = false;
- CommandMailbox->Common.RequestSenseSize = RequestSenseLength;
- CommandMailbox->Common.RequestSenseBusAddress =
- RequestSenseBufferDMA;
- }
- DAC960_ExecuteCommand(Command);
- CommandStatus = Command->V2.CommandStatus;
- RequestSenseLength = Command->V2.RequestSenseLength;
- DataTransferResidue = Command->V2.DataTransferResidue;
- spin_lock_irqsave(&Controller->queue_lock, flags);
- DAC960_DeallocateCommand(Command);
- spin_unlock_irqrestore(&Controller->queue_lock, flags);
- if (RequestSenseLength > UserCommand.RequestSenseLength)
- RequestSenseLength = UserCommand.RequestSenseLength;
- if (copy_to_user(&UserSpaceUserCommand->DataTransferLength,
- &DataTransferResidue,
- sizeof(DataTransferResidue))) {
- ErrorCode = -EFAULT;
- goto Failure2;
- }
- if (copy_to_user(&UserSpaceUserCommand->RequestSenseLength,
- &RequestSenseLength, sizeof(RequestSenseLength))) {
- ErrorCode = -EFAULT;
- goto Failure2;
- }
- if (DataTransferLength > 0)
- {
- if (copy_to_user(UserCommand.DataTransferBuffer,
- DataTransferBuffer, DataTransferLength)) {
- ErrorCode = -EFAULT;
- goto Failure2;
- }
- }
- if (RequestSenseLength > 0)
- {
- if (copy_to_user(UserCommand.RequestSenseBuffer,
- RequestSenseBuffer, RequestSenseLength)) {
- ErrorCode = -EFAULT;
- goto Failure2;
- }
- }
- ErrorCode = CommandStatus;
- Failure2:
- pci_free_consistent(Controller->PCIDevice, abs(DataTransferLength),
- DataTransferBuffer, DataTransferBufferDMA);
- if (RequestSenseBuffer != NULL)
- pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
- RequestSenseBuffer, RequestSenseBufferDMA);
-out:
- return ErrorCode;
-}
-
-static long DAC960_gam_v2_get_health_status(DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus)
-{
- DAC960_V2_GetHealthStatus_T GetHealthStatus;
- DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
- DAC960_Controller_T *Controller;
- int ControllerNumber;
- long ErrorCode;
-
- if (UserSpaceGetHealthStatus == NULL) {
- ErrorCode = -EINVAL;
- goto out;
- }
- if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
- sizeof(DAC960_V2_GetHealthStatus_T))) {
- ErrorCode = -EFAULT;
- goto out;
- }
- ErrorCode = -ENXIO;
- ControllerNumber = GetHealthStatus.ControllerNumber;
- if (ControllerNumber < 0 ||
- ControllerNumber > DAC960_ControllerCount - 1)
- goto out;
- Controller = DAC960_Controllers[ControllerNumber];
- if (Controller == NULL)
- goto out;
- if (Controller->FirmwareType != DAC960_V2_Controller) {
- ErrorCode = -EINVAL;
- goto out;
- }
- if (copy_from_user(&HealthStatusBuffer,
- GetHealthStatus.HealthStatusBuffer,
- sizeof(DAC960_V2_HealthStatusBuffer_T))) {
- ErrorCode = -EFAULT;
- goto out;
- }
- ErrorCode = wait_event_interruptible_timeout(Controller->HealthStatusWaitQueue,
- !(Controller->V2.HealthStatusBuffer->StatusChangeCounter
- == HealthStatusBuffer.StatusChangeCounter &&
- Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
- == HealthStatusBuffer.NextEventSequenceNumber),
- DAC960_MonitoringTimerInterval);
- if (ErrorCode == -ERESTARTSYS) {
- ErrorCode = -EINTR;
- goto out;
- }
- if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
- Controller->V2.HealthStatusBuffer,
- sizeof(DAC960_V2_HealthStatusBuffer_T)))
- ErrorCode = -EFAULT;
- else
- ErrorCode = 0;
-
-out:
- return ErrorCode;
-}
-
-/*
- * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
-*/
-
-static long DAC960_gam_ioctl(struct file *file, unsigned int Request,
- unsigned long Argument)
-{
- long ErrorCode = 0;
- void __user *argp = (void __user *)Argument;
- if (!capable(CAP_SYS_ADMIN)) return -EACCES;
-
- mutex_lock(&DAC960_mutex);
- switch (Request)
- {
- case DAC960_IOCTL_GET_CONTROLLER_COUNT:
- ErrorCode = DAC960_ControllerCount;
- break;
- case DAC960_IOCTL_GET_CONTROLLER_INFO:
- ErrorCode = DAC960_gam_get_controller_info(argp);
- break;
- case DAC960_IOCTL_V1_EXECUTE_COMMAND:
- ErrorCode = DAC960_gam_v1_execute_command(argp);
- break;
- case DAC960_IOCTL_V2_EXECUTE_COMMAND:
- ErrorCode = DAC960_gam_v2_execute_command(argp);
- break;
- case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
- ErrorCode = DAC960_gam_v2_get_health_status(argp);
- break;
- default:
- ErrorCode = -ENOTTY;
- }
- mutex_unlock(&DAC960_mutex);
- return ErrorCode;
-}
-
-static const struct file_operations DAC960_gam_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = DAC960_gam_ioctl,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice DAC960_gam_dev = {
- DAC960_GAM_MINOR,
- "dac960_gam",
- &DAC960_gam_fops
-};
-
-static int DAC960_gam_init(void)
-{
- int ret;
-
- ret = misc_register(&DAC960_gam_dev);
- if (ret)
- printk(KERN_ERR "DAC960_gam: can't misc_register on minor %d\n", DAC960_GAM_MINOR);
- return ret;
-}
-
-static void DAC960_gam_cleanup(void)
-{
- misc_deregister(&DAC960_gam_dev);
-}
-
-#endif /* DAC960_GAM_MINOR */
-
-static struct DAC960_privdata DAC960_GEM_privdata = {
- .HardwareType = DAC960_GEM_Controller,
- .FirmwareType = DAC960_V2_Controller,
- .InterruptHandler = DAC960_GEM_InterruptHandler,
- .MemoryWindowSize = DAC960_GEM_RegisterWindowSize,
-};
-
-
-static struct DAC960_privdata DAC960_BA_privdata = {
- .HardwareType = DAC960_BA_Controller,
- .FirmwareType = DAC960_V2_Controller,
- .InterruptHandler = DAC960_BA_InterruptHandler,
- .MemoryWindowSize = DAC960_BA_RegisterWindowSize,
-};
-
-static struct DAC960_privdata DAC960_LP_privdata = {
- .HardwareType = DAC960_LP_Controller,
- .FirmwareType = DAC960_V2_Controller,
- .InterruptHandler = DAC960_LP_InterruptHandler,
- .MemoryWindowSize = DAC960_LP_RegisterWindowSize,
-};
-
-static struct DAC960_privdata DAC960_LA_privdata = {
- .HardwareType = DAC960_LA_Controller,
- .FirmwareType = DAC960_V1_Controller,
- .InterruptHandler = DAC960_LA_InterruptHandler,
- .MemoryWindowSize = DAC960_LA_RegisterWindowSize,
-};
-
-static struct DAC960_privdata DAC960_PG_privdata = {
- .HardwareType = DAC960_PG_Controller,
- .FirmwareType = DAC960_V1_Controller,
- .InterruptHandler = DAC960_PG_InterruptHandler,
- .MemoryWindowSize = DAC960_PG_RegisterWindowSize,
-};
-
-static struct DAC960_privdata DAC960_PD_privdata = {
- .HardwareType = DAC960_PD_Controller,
- .FirmwareType = DAC960_V1_Controller,
- .InterruptHandler = DAC960_PD_InterruptHandler,
- .MemoryWindowSize = DAC960_PD_RegisterWindowSize,
-};
-
-static struct DAC960_privdata DAC960_P_privdata = {
- .HardwareType = DAC960_P_Controller,
- .FirmwareType = DAC960_V1_Controller,
- .InterruptHandler = DAC960_P_InterruptHandler,
- .MemoryWindowSize = DAC960_PD_RegisterWindowSize,
-};
-
-static const struct pci_device_id DAC960_id_table[] = {
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM,
- .subvendor = PCI_VENDOR_ID_MYLEX,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_GEM_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_BA,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_BA_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_LP,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_LP_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_DEC,
- .device = PCI_DEVICE_ID_DEC_21285,
- .subvendor = PCI_VENDOR_ID_MYLEX,
- .subdevice = PCI_DEVICE_ID_MYLEX_DAC960_LA,
- .driver_data = (unsigned long) &DAC960_LA_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_PG,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_PG_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_PD,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_PD_privdata,
- },
- {
- .vendor = PCI_VENDOR_ID_MYLEX,
- .device = PCI_DEVICE_ID_MYLEX_DAC960_P,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .driver_data = (unsigned long) &DAC960_P_privdata,
- },
- {0, },
-};
-
-MODULE_DEVICE_TABLE(pci, DAC960_id_table);
-
-static struct pci_driver DAC960_pci_driver = {
- .name = "DAC960",
- .id_table = DAC960_id_table,
- .probe = DAC960_Probe,
- .remove = DAC960_Remove,
-};
-
-static int __init DAC960_init_module(void)
-{
- int ret;
-
- ret = pci_register_driver(&DAC960_pci_driver);
-#ifdef DAC960_GAM_MINOR
- if (!ret)
- DAC960_gam_init();
-#endif
- return ret;
-}
-
-static void __exit DAC960_cleanup_module(void)
-{
- int i;
-
-#ifdef DAC960_GAM_MINOR
- DAC960_gam_cleanup();
-#endif
-
- for (i = 0; i < DAC960_ControllerCount; i++) {
- DAC960_Controller_T *Controller = DAC960_Controllers[i];
- if (Controller == NULL)
- continue;
- DAC960_FinalizeController(Controller);
- }
- if (DAC960_ProcDirectoryEntry != NULL) {
- remove_proc_entry("rd/status", NULL);
- remove_proc_entry("rd", NULL);
- }
- DAC960_ControllerCount = 0;
- pci_unregister_driver(&DAC960_pci_driver);
-}
-
-module_init(DAC960_init_module);
-module_exit(DAC960_cleanup_module);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h
deleted file mode 100644
index 1439e651928b..000000000000
--- a/drivers/block/DAC960.h
+++ /dev/null
@@ -1,4414 +0,0 @@
-/*
-
- Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
-
- Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
-
- This program is free software; you may redistribute and/or modify it under
- the terms of the GNU General Public License Version 2 as published by the
- Free Software Foundation.
-
- This program is distributed in the hope that it will be useful, but
- WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
- or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- for complete details.
-
- The author respectfully requests that any modifications to this software be
- sent directly to him for evaluation and testing.
-
-*/
-
-
-/*
- Define the maximum number of DAC960 Controllers supported by this driver.
-*/
-
-#define DAC960_MaxControllers 8
-
-
-/*
- Define the maximum number of Controller Channels supported by DAC960
- V1 and V2 Firmware Controllers.
-*/
-
-#define DAC960_V1_MaxChannels 3
-#define DAC960_V2_MaxChannels 4
-
-
-/*
- Define the maximum number of Targets per Channel supported by DAC960
- V1 and V2 Firmware Controllers.
-*/
-
-#define DAC960_V1_MaxTargets 16
-#define DAC960_V2_MaxTargets 128
-
-
-/*
- Define the maximum number of Logical Drives supported by DAC960
- V1 and V2 Firmware Controllers.
-*/
-
-#define DAC960_MaxLogicalDrives 32
-
-
-/*
- Define the maximum number of Physical Devices supported by DAC960
- V1 and V2 Firmware Controllers.
-*/
-
-#define DAC960_V1_MaxPhysicalDevices 45
-#define DAC960_V2_MaxPhysicalDevices 272
-
-/*
- Define a 32/64 bit I/O Address data type.
-*/
-
-typedef unsigned long DAC960_IO_Address_T;
-
-
-/*
- Define a 32/64 bit PCI Bus Address data type.
-*/
-
-typedef unsigned long DAC960_PCI_Address_T;
-
-
-/*
- Define a 32 bit Bus Address data type.
-*/
-
-typedef unsigned int DAC960_BusAddress32_T;
-
-
-/*
- Define a 64 bit Bus Address data type.
-*/
-
-typedef unsigned long long DAC960_BusAddress64_T;
-
-
-/*
- Define a 32 bit Byte Count data type.
-*/
-
-typedef unsigned int DAC960_ByteCount32_T;
-
-
-/*
- Define a 64 bit Byte Count data type.
-*/
-
-typedef unsigned long long DAC960_ByteCount64_T;
-
-
-/*
- dma_loaf is used by helper routines to divide a region of
- dma mapped memory into smaller pieces, where those pieces
- are not of uniform size.
- */
-
-struct dma_loaf {
- void *cpu_base;
- dma_addr_t dma_base;
- size_t length;
- void *cpu_free;
- dma_addr_t dma_free;
-};
-
-/*
- Define the SCSI INQUIRY Standard Data structure.
-*/
-
-typedef struct DAC960_SCSI_Inquiry
-{
- unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */
- unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */
- unsigned char DeviceTypeModifier:7; /* Byte 1 Bits 0-6 */
- bool RMB:1; /* Byte 1 Bit 7 */
- unsigned char ANSI_ApprovedVersion:3; /* Byte 2 Bits 0-2 */
- unsigned char ECMA_Version:3; /* Byte 2 Bits 3-5 */
- unsigned char ISO_Version:2; /* Byte 2 Bits 6-7 */
- unsigned char ResponseDataFormat:4; /* Byte 3 Bits 0-3 */
- unsigned char :2; /* Byte 3 Bits 4-5 */
- bool TrmIOP:1; /* Byte 3 Bit 6 */
- bool AENC:1; /* Byte 3 Bit 7 */
- unsigned char AdditionalLength; /* Byte 4 */
- unsigned char :8; /* Byte 5 */
- unsigned char :8; /* Byte 6 */
- bool SftRe:1; /* Byte 7 Bit 0 */
- bool CmdQue:1; /* Byte 7 Bit 1 */
- bool :1; /* Byte 7 Bit 2 */
- bool Linked:1; /* Byte 7 Bit 3 */
- bool Sync:1; /* Byte 7 Bit 4 */
- bool WBus16:1; /* Byte 7 Bit 5 */
- bool WBus32:1; /* Byte 7 Bit 6 */
- bool RelAdr:1; /* Byte 7 Bit 7 */
- unsigned char VendorIdentification[8]; /* Bytes 8-15 */
- unsigned char ProductIdentification[16]; /* Bytes 16-31 */
- unsigned char ProductRevisionLevel[4]; /* Bytes 32-35 */
-}
-DAC960_SCSI_Inquiry_T;
-
-
-/*
- Define the SCSI INQUIRY Unit Serial Number structure.
-*/
-
-typedef struct DAC960_SCSI_Inquiry_UnitSerialNumber
-{
- unsigned char PeripheralDeviceType:5; /* Byte 0 Bits 0-4 */
- unsigned char PeripheralQualifier:3; /* Byte 0 Bits 5-7 */
- unsigned char PageCode; /* Byte 1 */
- unsigned char :8; /* Byte 2 */
- unsigned char PageLength; /* Byte 3 */
- unsigned char ProductSerialNumber[28]; /* Bytes 4-31 */
-}
-DAC960_SCSI_Inquiry_UnitSerialNumber_T;
-
-
-/*
- Define the SCSI REQUEST SENSE Sense Key type.
-*/
-
-typedef enum
-{
- DAC960_SenseKey_NoSense = 0x0,
- DAC960_SenseKey_RecoveredError = 0x1,
- DAC960_SenseKey_NotReady = 0x2,
- DAC960_SenseKey_MediumError = 0x3,
- DAC960_SenseKey_HardwareError = 0x4,
- DAC960_SenseKey_IllegalRequest = 0x5,
- DAC960_SenseKey_UnitAttention = 0x6,
- DAC960_SenseKey_DataProtect = 0x7,
- DAC960_SenseKey_BlankCheck = 0x8,
- DAC960_SenseKey_VendorSpecific = 0x9,
- DAC960_SenseKey_CopyAborted = 0xA,
- DAC960_SenseKey_AbortedCommand = 0xB,
- DAC960_SenseKey_Equal = 0xC,
- DAC960_SenseKey_VolumeOverflow = 0xD,
- DAC960_SenseKey_Miscompare = 0xE,
- DAC960_SenseKey_Reserved = 0xF
-}
-__attribute__ ((packed))
-DAC960_SCSI_RequestSenseKey_T;
-
-
-/*
- Define the SCSI REQUEST SENSE structure.
-*/
-
-typedef struct DAC960_SCSI_RequestSense
-{
- unsigned char ErrorCode:7; /* Byte 0 Bits 0-6 */
- bool Valid:1; /* Byte 0 Bit 7 */
- unsigned char SegmentNumber; /* Byte 1 */
- DAC960_SCSI_RequestSenseKey_T SenseKey:4; /* Byte 2 Bits 0-3 */
- unsigned char :1; /* Byte 2 Bit 4 */
- bool ILI:1; /* Byte 2 Bit 5 */
- bool EOM:1; /* Byte 2 Bit 6 */
- bool Filemark:1; /* Byte 2 Bit 7 */
- unsigned char Information[4]; /* Bytes 3-6 */
- unsigned char AdditionalSenseLength; /* Byte 7 */
- unsigned char CommandSpecificInformation[4]; /* Bytes 8-11 */
- unsigned char AdditionalSenseCode; /* Byte 12 */
- unsigned char AdditionalSenseCodeQualifier; /* Byte 13 */
-}
-DAC960_SCSI_RequestSense_T;
-
-
-/*
- Define the DAC960 V1 Firmware Command Opcodes.
-*/
-
-typedef enum
-{
- /* I/O Commands */
- DAC960_V1_ReadExtended = 0x33,
- DAC960_V1_WriteExtended = 0x34,
- DAC960_V1_ReadAheadExtended = 0x35,
- DAC960_V1_ReadExtendedWithScatterGather = 0xB3,
- DAC960_V1_WriteExtendedWithScatterGather = 0xB4,
- DAC960_V1_Read = 0x36,
- DAC960_V1_ReadWithScatterGather = 0xB6,
- DAC960_V1_Write = 0x37,
- DAC960_V1_WriteWithScatterGather = 0xB7,
- DAC960_V1_DCDB = 0x04,
- DAC960_V1_DCDBWithScatterGather = 0x84,
- DAC960_V1_Flush = 0x0A,
- /* Controller Status Related Commands */
- DAC960_V1_Enquiry = 0x53,
- DAC960_V1_Enquiry2 = 0x1C,
- DAC960_V1_GetLogicalDriveElement = 0x55,
- DAC960_V1_GetLogicalDriveInformation = 0x19,
- DAC960_V1_IOPortRead = 0x39,
- DAC960_V1_IOPortWrite = 0x3A,
- DAC960_V1_GetSDStats = 0x3E,
- DAC960_V1_GetPDStats = 0x3F,
- DAC960_V1_PerformEventLogOperation = 0x72,
- /* Device Related Commands */
- DAC960_V1_StartDevice = 0x10,
- DAC960_V1_GetDeviceState = 0x50,
- DAC960_V1_StopChannel = 0x13,
- DAC960_V1_StartChannel = 0x12,
- DAC960_V1_ResetChannel = 0x1A,
- /* Commands Associated with Data Consistency and Errors */
- DAC960_V1_Rebuild = 0x09,
- DAC960_V1_RebuildAsync = 0x16,
- DAC960_V1_CheckConsistency = 0x0F,
- DAC960_V1_CheckConsistencyAsync = 0x1E,
- DAC960_V1_RebuildStat = 0x0C,
- DAC960_V1_GetRebuildProgress = 0x27,
- DAC960_V1_RebuildControl = 0x1F,
- DAC960_V1_ReadBadBlockTable = 0x0B,
- DAC960_V1_ReadBadDataTable = 0x25,
- DAC960_V1_ClearBadDataTable = 0x26,
- DAC960_V1_GetErrorTable = 0x17,
- DAC960_V1_AddCapacityAsync = 0x2A,
- DAC960_V1_BackgroundInitializationControl = 0x2B,
- /* Configuration Related Commands */
- DAC960_V1_ReadConfig2 = 0x3D,
- DAC960_V1_WriteConfig2 = 0x3C,
- DAC960_V1_ReadConfigurationOnDisk = 0x4A,
- DAC960_V1_WriteConfigurationOnDisk = 0x4B,
- DAC960_V1_ReadConfiguration = 0x4E,
- DAC960_V1_ReadBackupConfiguration = 0x4D,
- DAC960_V1_WriteConfiguration = 0x4F,
- DAC960_V1_AddConfiguration = 0x4C,
- DAC960_V1_ReadConfigurationLabel = 0x48,
- DAC960_V1_WriteConfigurationLabel = 0x49,
- /* Firmware Upgrade Related Commands */
- DAC960_V1_LoadImage = 0x20,
- DAC960_V1_StoreImage = 0x21,
- DAC960_V1_ProgramImage = 0x22,
- /* Diagnostic Commands */
- DAC960_V1_SetDiagnosticMode = 0x31,
- DAC960_V1_RunDiagnostic = 0x32,
- /* Subsystem Service Commands */
- DAC960_V1_GetSubsystemData = 0x70,
- DAC960_V1_SetSubsystemParameters = 0x71,
- /* Version 2.xx Firmware Commands */
- DAC960_V1_Enquiry_Old = 0x05,
- DAC960_V1_GetDeviceState_Old = 0x14,
- DAC960_V1_Read_Old = 0x02,
- DAC960_V1_Write_Old = 0x03,
- DAC960_V1_ReadWithScatterGather_Old = 0x82,
- DAC960_V1_WriteWithScatterGather_Old = 0x83
-}
-__attribute__ ((packed))
-DAC960_V1_CommandOpcode_T;
-
-
-/*
- Define the DAC960 V1 Firmware Command Identifier type.
-*/
-
-typedef unsigned char DAC960_V1_CommandIdentifier_T;
-
-
-/*
- Define the DAC960 V1 Firmware Command Status Codes.
-*/
-
-#define DAC960_V1_NormalCompletion 0x0000 /* Common */
-#define DAC960_V1_CheckConditionReceived 0x0002 /* Common */
-#define DAC960_V1_NoDeviceAtAddress 0x0102 /* Common */
-#define DAC960_V1_InvalidDeviceAddress 0x0105 /* Common */
-#define DAC960_V1_InvalidParameter 0x0105 /* Common */
-#define DAC960_V1_IrrecoverableDataError 0x0001 /* I/O */
-#define DAC960_V1_LogicalDriveNonexistentOrOffline 0x0002 /* I/O */
-#define DAC960_V1_AccessBeyondEndOfLogicalDrive 0x0105 /* I/O */
-#define DAC960_V1_BadDataEncountered 0x010C /* I/O */
-#define DAC960_V1_DeviceBusy 0x0008 /* DCDB */
-#define DAC960_V1_DeviceNonresponsive 0x000E /* DCDB */
-#define DAC960_V1_CommandTerminatedAbnormally 0x000F /* DCDB */
-#define DAC960_V1_UnableToStartDevice 0x0002 /* Device */
-#define DAC960_V1_InvalidChannelOrTargetOrModifier 0x0105 /* Device */
-#define DAC960_V1_ChannelBusy 0x0106 /* Device */
-#define DAC960_V1_ChannelNotStopped 0x0002 /* Device */
-#define DAC960_V1_AttemptToRebuildOnlineDrive 0x0002 /* Consistency */
-#define DAC960_V1_RebuildBadBlocksEncountered 0x0003 /* Consistency */
-#define DAC960_V1_NewDiskFailedDuringRebuild 0x0004 /* Consistency */
-#define DAC960_V1_RebuildOrCheckAlreadyInProgress 0x0106 /* Consistency */
-#define DAC960_V1_DependentDiskIsDead 0x0002 /* Consistency */
-#define DAC960_V1_InconsistentBlocksFound 0x0003 /* Consistency */
-#define DAC960_V1_InvalidOrNonredundantLogicalDrive 0x0105 /* Consistency */
-#define DAC960_V1_NoRebuildOrCheckInProgress 0x0105 /* Consistency */
-#define DAC960_V1_RebuildInProgress_DataValid 0x0000 /* Consistency */
-#define DAC960_V1_RebuildFailed_LogicalDriveFailure 0x0002 /* Consistency */
-#define DAC960_V1_RebuildFailed_BadBlocksOnOther 0x0003 /* Consistency */
-#define DAC960_V1_RebuildFailed_NewDriveFailed 0x0004 /* Consistency */
-#define DAC960_V1_RebuildSuccessful 0x0100 /* Consistency */
-#define DAC960_V1_RebuildSuccessfullyTerminated 0x0107 /* Consistency */
-#define DAC960_V1_BackgroundInitSuccessful 0x0100 /* Consistency */
-#define DAC960_V1_BackgroundInitAborted 0x0005 /* Consistency */
-#define DAC960_V1_NoBackgroundInitInProgress 0x0105 /* Consistency */
-#define DAC960_V1_AddCapacityInProgress 0x0004 /* Consistency */
-#define DAC960_V1_AddCapacityFailedOrSuspended 0x00F4 /* Consistency */
-#define DAC960_V1_Config2ChecksumError 0x0002 /* Configuration */
-#define DAC960_V1_ConfigurationSuspended 0x0106 /* Configuration */
-#define DAC960_V1_FailedToConfigureNVRAM 0x0105 /* Configuration */
-#define DAC960_V1_ConfigurationNotSavedStateChange 0x0106 /* Configuration */
-#define DAC960_V1_SubsystemNotInstalled 0x0001 /* Subsystem */
-#define DAC960_V1_SubsystemFailed 0x0002 /* Subsystem */
-#define DAC960_V1_SubsystemBusy 0x0106 /* Subsystem */
-
-typedef unsigned short DAC960_V1_CommandStatus_T;
-
-
-/*
- Define the DAC960 V1 Firmware Enquiry Command reply structure.
-*/
-
-typedef struct DAC960_V1_Enquiry
-{
- unsigned char NumberOfLogicalDrives; /* Byte 0 */
- unsigned int :24; /* Bytes 1-3 */
- unsigned int LogicalDriveSizes[32]; /* Bytes 4-131 */
- unsigned short FlashAge; /* Bytes 132-133 */
- struct {
- bool DeferredWriteError:1; /* Byte 134 Bit 0 */
- bool BatteryLow:1; /* Byte 134 Bit 1 */
- unsigned char :6; /* Byte 134 Bits 2-7 */
- } StatusFlags;
- unsigned char :8; /* Byte 135 */
- unsigned char MinorFirmwareVersion; /* Byte 136 */
- unsigned char MajorFirmwareVersion; /* Byte 137 */
- enum {
- DAC960_V1_NoStandbyRebuildOrCheckInProgress = 0x00,
- DAC960_V1_StandbyRebuildInProgress = 0x01,
- DAC960_V1_BackgroundRebuildInProgress = 0x02,
- DAC960_V1_BackgroundCheckInProgress = 0x03,
- DAC960_V1_StandbyRebuildCompletedWithError = 0xFF,
- DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed = 0xF0,
- DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed = 0xF1,
- DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses = 0xF2,
- DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated = 0xF3
- } __attribute__ ((packed)) RebuildFlag; /* Byte 138 */
- unsigned char MaxCommands; /* Byte 139 */
- unsigned char OfflineLogicalDriveCount; /* Byte 140 */
- unsigned char :8; /* Byte 141 */
- unsigned short EventLogSequenceNumber; /* Bytes 142-143 */
- unsigned char CriticalLogicalDriveCount; /* Byte 144 */
- unsigned int :24; /* Bytes 145-147 */
- unsigned char DeadDriveCount; /* Byte 148 */
- unsigned char :8; /* Byte 149 */
- unsigned char RebuildCount; /* Byte 150 */
- struct {
- unsigned char :3; /* Byte 151 Bits 0-2 */
- bool BatteryBackupUnitPresent:1; /* Byte 151 Bit 3 */
- unsigned char :3; /* Byte 151 Bits 4-6 */
- unsigned char :1; /* Byte 151 Bit 7 */
- } MiscFlags;
- struct {
- unsigned char TargetID;
- unsigned char Channel;
- } DeadDrives[21]; /* Bytes 152-194 */
- unsigned char Reserved[62]; /* Bytes 195-255 */
-}
-__attribute__ ((packed))
-DAC960_V1_Enquiry_T;
-
-
-/*
- Define the DAC960 V1 Firmware Enquiry2 Command reply structure.
-*/
-
-typedef struct DAC960_V1_Enquiry2
-{
- struct {
- enum {
- DAC960_V1_P_PD_PU = 0x01,
- DAC960_V1_PL = 0x02,
- DAC960_V1_PG = 0x10,
- DAC960_V1_PJ = 0x11,
- DAC960_V1_PR = 0x12,
- DAC960_V1_PT = 0x13,
- DAC960_V1_PTL0 = 0x14,
- DAC960_V1_PRL = 0x15,
- DAC960_V1_PTL1 = 0x16,
- DAC960_V1_1164P = 0x20
- } __attribute__ ((packed)) SubModel; /* Byte 0 */
- unsigned char ActualChannels; /* Byte 1 */
- enum {
- DAC960_V1_FiveChannelBoard = 0x01,
- DAC960_V1_ThreeChannelBoard = 0x02,
- DAC960_V1_TwoChannelBoard = 0x03,
- DAC960_V1_ThreeChannelASIC_DAC = 0x04
- } __attribute__ ((packed)) Model; /* Byte 2 */
- enum {
- DAC960_V1_EISA_Controller = 0x01,
- DAC960_V1_MicroChannel_Controller = 0x02,
- DAC960_V1_PCI_Controller = 0x03,
- DAC960_V1_SCSItoSCSI_Controller = 0x08
- } __attribute__ ((packed)) ProductFamily; /* Byte 3 */
- } HardwareID; /* Bytes 0-3 */
- /* MajorVersion.MinorVersion-FirmwareType-TurnID */
- struct {
- unsigned char MajorVersion; /* Byte 4 */
- unsigned char MinorVersion; /* Byte 5 */
- unsigned char TurnID; /* Byte 6 */
- char FirmwareType; /* Byte 7 */
- } FirmwareID; /* Bytes 4-7 */
- unsigned char :8; /* Byte 8 */
- unsigned int :24; /* Bytes 9-11 */
- unsigned char ConfiguredChannels; /* Byte 12 */
- unsigned char ActualChannels; /* Byte 13 */
- unsigned char MaxTargets; /* Byte 14 */
- unsigned char MaxTags; /* Byte 15 */
- unsigned char MaxLogicalDrives; /* Byte 16 */
- unsigned char MaxArms; /* Byte 17 */
- unsigned char MaxSpans; /* Byte 18 */
- unsigned char :8; /* Byte 19 */
- unsigned int :32; /* Bytes 20-23 */
- unsigned int MemorySize; /* Bytes 24-27 */
- unsigned int CacheSize; /* Bytes 28-31 */
- unsigned int FlashMemorySize; /* Bytes 32-35 */
- unsigned int NonVolatileMemorySize; /* Bytes 36-39 */
- struct {
- enum {
- DAC960_V1_RamType_DRAM = 0x0,
- DAC960_V1_RamType_EDO = 0x1,
- DAC960_V1_RamType_SDRAM = 0x2,
- DAC960_V1_RamType_Last = 0x7
- } __attribute__ ((packed)) RamType:3; /* Byte 40 Bits 0-2 */
- enum {
- DAC960_V1_ErrorCorrection_None = 0x0,
- DAC960_V1_ErrorCorrection_Parity = 0x1,
- DAC960_V1_ErrorCorrection_ECC = 0x2,
- DAC960_V1_ErrorCorrection_Last = 0x7
- } __attribute__ ((packed)) ErrorCorrection:3; /* Byte 40 Bits 3-5 */
- bool FastPageMode:1; /* Byte 40 Bit 6 */
- bool LowPowerMemory:1; /* Byte 40 Bit 7 */
- unsigned char :8; /* Bytes 41 */
- } MemoryType;
- unsigned short ClockSpeed; /* Bytes 42-43 */
- unsigned short MemorySpeed; /* Bytes 44-45 */
- unsigned short HardwareSpeed; /* Bytes 46-47 */
- unsigned int :32; /* Bytes 48-51 */
- unsigned int :32; /* Bytes 52-55 */
- unsigned char :8; /* Byte 56 */
- unsigned char :8; /* Byte 57 */
- unsigned short :16; /* Bytes 58-59 */
- unsigned short MaxCommands; /* Bytes 60-61 */
- unsigned short MaxScatterGatherEntries; /* Bytes 62-63 */
- unsigned short MaxDriveCommands; /* Bytes 64-65 */
- unsigned short MaxIODescriptors; /* Bytes 66-67 */
- unsigned short MaxCombinedSectors; /* Bytes 68-69 */
- unsigned char Latency; /* Byte 70 */
- unsigned char :8; /* Byte 71 */
- unsigned char SCSITimeout; /* Byte 72 */
- unsigned char :8; /* Byte 73 */
- unsigned short MinFreeLines; /* Bytes 74-75 */
- unsigned int :32; /* Bytes 76-79 */
- unsigned int :32; /* Bytes 80-83 */
- unsigned char RebuildRateConstant; /* Byte 84 */
- unsigned char :8; /* Byte 85 */
- unsigned char :8; /* Byte 86 */
- unsigned char :8; /* Byte 87 */
- unsigned int :32; /* Bytes 88-91 */
- unsigned int :32; /* Bytes 92-95 */
- unsigned short PhysicalDriveBlockSize; /* Bytes 96-97 */
- unsigned short LogicalDriveBlockSize; /* Bytes 98-99 */
- unsigned short MaxBlocksPerCommand; /* Bytes 100-101 */
- unsigned short BlockFactor; /* Bytes 102-103 */
- unsigned short CacheLineSize; /* Bytes 104-105 */
- struct {
- enum {
- DAC960_V1_Narrow_8bit = 0x0,
- DAC960_V1_Wide_16bit = 0x1,
- DAC960_V1_Wide_32bit = 0x2
- } __attribute__ ((packed)) BusWidth:2; /* Byte 106 Bits 0-1 */
- enum {
- DAC960_V1_Fast = 0x0,
- DAC960_V1_Ultra = 0x1,
- DAC960_V1_Ultra2 = 0x2
- } __attribute__ ((packed)) BusSpeed:2; /* Byte 106 Bits 2-3 */
- bool Differential:1; /* Byte 106 Bit 4 */
- unsigned char :3; /* Byte 106 Bits 5-7 */
- } SCSICapability;
- unsigned char :8; /* Byte 107 */
- unsigned int :32; /* Bytes 108-111 */
- unsigned short FirmwareBuildNumber; /* Bytes 112-113 */
- enum {
- DAC960_V1_AEMI = 0x01,
- DAC960_V1_OEM1 = 0x02,
- DAC960_V1_OEM2 = 0x04,
- DAC960_V1_OEM3 = 0x08,
- DAC960_V1_Conner = 0x10,
- DAC960_V1_SAFTE = 0x20
- } __attribute__ ((packed)) FaultManagementType; /* Byte 114 */
- unsigned char :8; /* Byte 115 */
- struct {
- bool Clustering:1; /* Byte 116 Bit 0 */
- bool MylexOnlineRAIDExpansion:1; /* Byte 116 Bit 1 */
- bool ReadAhead:1; /* Byte 116 Bit 2 */
- bool BackgroundInitialization:1; /* Byte 116 Bit 3 */
- unsigned int :28; /* Bytes 116-119 */
- } FirmwareFeatures;
- unsigned int :32; /* Bytes 120-123 */
- unsigned int :32; /* Bytes 124-127 */
-}
-DAC960_V1_Enquiry2_T;
-
-
-/*
- Define the DAC960 V1 Firmware Logical Drive State type.
-*/
-
-typedef enum
-{
- DAC960_V1_LogicalDrive_Online = 0x03,
- DAC960_V1_LogicalDrive_Critical = 0x04,
- DAC960_V1_LogicalDrive_Offline = 0xFF
-}
-__attribute__ ((packed))
-DAC960_V1_LogicalDriveState_T;
-
-
-/*
- Define the DAC960 V1 Firmware Logical Drive Information structure.
-*/
-
-typedef struct DAC960_V1_LogicalDriveInformation
-{
- unsigned int LogicalDriveSize; /* Bytes 0-3 */
- DAC960_V1_LogicalDriveState_T LogicalDriveState; /* Byte 4 */
- unsigned char RAIDLevel:7; /* Byte 5 Bits 0-6 */
- bool WriteBack:1; /* Byte 5 Bit 7 */
- unsigned short :16; /* Bytes 6-7 */
-}
-DAC960_V1_LogicalDriveInformation_T;
-
-
-/*
- Define the DAC960 V1 Firmware Get Logical Drive Information Command
- reply structure.
-*/
-
-typedef DAC960_V1_LogicalDriveInformation_T
- DAC960_V1_LogicalDriveInformationArray_T[DAC960_MaxLogicalDrives];
-
-
-/*
- Define the DAC960 V1 Firmware Perform Event Log Operation Types.
-*/
-
-typedef enum
-{
- DAC960_V1_GetEventLogEntry = 0x00
-}
-__attribute__ ((packed))
-DAC960_V1_PerformEventLogOpType_T;
-
-
-/*
- Define the DAC960 V1 Firmware Get Event Log Entry Command reply structure.
-*/
-
-typedef struct DAC960_V1_EventLogEntry
-{
- unsigned char MessageType; /* Byte 0 */
- unsigned char MessageLength; /* Byte 1 */
- unsigned char TargetID:5; /* Byte 2 Bits 0-4 */
- unsigned char Channel:3; /* Byte 2 Bits 5-7 */
- unsigned char LogicalUnit:6; /* Byte 3 Bits 0-5 */
- unsigned char :2; /* Byte 3 Bits 6-7 */
- unsigned short SequenceNumber; /* Bytes 4-5 */
- unsigned char ErrorCode:7; /* Byte 6 Bits 0-6 */
- bool Valid:1; /* Byte 6 Bit 7 */
- unsigned char SegmentNumber; /* Byte 7 */
- DAC960_SCSI_RequestSenseKey_T SenseKey:4; /* Byte 8 Bits 0-3 */
- unsigned char :1; /* Byte 8 Bit 4 */
- bool ILI:1; /* Byte 8 Bit 5 */
- bool EOM:1; /* Byte 8 Bit 6 */
- bool Filemark:1; /* Byte 8 Bit 7 */
- unsigned char Information[4]; /* Bytes 9-12 */
- unsigned char AdditionalSenseLength; /* Byte 13 */
- unsigned char CommandSpecificInformation[4]; /* Bytes 14-17 */
- unsigned char AdditionalSenseCode; /* Byte 18 */
- unsigned char AdditionalSenseCodeQualifier; /* Byte 19 */
- unsigned char Dummy[12]; /* Bytes 20-31 */
-}
-DAC960_V1_EventLogEntry_T;
-
-
-/*
- Define the DAC960 V1 Firmware Physical Device State type.
-*/
-
-typedef enum
-{
- DAC960_V1_Device_Dead = 0x00,
- DAC960_V1_Device_WriteOnly = 0x02,
- DAC960_V1_Device_Online = 0x03,
- DAC960_V1_Device_Standby = 0x10
-}
-__attribute__ ((packed))
-DAC960_V1_PhysicalDeviceState_T;
-
-
-/*
- Define the DAC960 V1 Firmware Get Device State Command reply structure.
- The structure is padded by 2 bytes for compatibility with Version 2.xx
- Firmware.
-*/
-
-typedef struct DAC960_V1_DeviceState
-{
- bool Present:1; /* Byte 0 Bit 0 */
- unsigned char :7; /* Byte 0 Bits 1-7 */
- enum {
- DAC960_V1_OtherType = 0x0,
- DAC960_V1_DiskType = 0x1,
- DAC960_V1_SequentialType = 0x2,
- DAC960_V1_CDROM_or_WORM_Type = 0x3
- } __attribute__ ((packed)) DeviceType:2; /* Byte 1 Bits 0-1 */
- bool :1; /* Byte 1 Bit 2 */
- bool Fast20:1; /* Byte 1 Bit 3 */
- bool Sync:1; /* Byte 1 Bit 4 */
- bool Fast:1; /* Byte 1 Bit 5 */
- bool Wide:1; /* Byte 1 Bit 6 */
- bool TaggedQueuingSupported:1; /* Byte 1 Bit 7 */
- DAC960_V1_PhysicalDeviceState_T DeviceState; /* Byte 2 */
- unsigned char :8; /* Byte 3 */
- unsigned char SynchronousMultiplier; /* Byte 4 */
- unsigned char SynchronousOffset:5; /* Byte 5 Bits 0-4 */
- unsigned char :3; /* Byte 5 Bits 5-7 */
- unsigned int DiskSize __attribute__ ((packed)); /* Bytes 6-9 */
- unsigned short :16; /* Bytes 10-11 */
-}
-DAC960_V1_DeviceState_T;
-
-
-/*
- Define the DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
-*/
-
-typedef struct DAC960_V1_RebuildProgress
-{
- unsigned int LogicalDriveNumber; /* Bytes 0-3 */
- unsigned int LogicalDriveSize; /* Bytes 4-7 */
- unsigned int RemainingBlocks; /* Bytes 8-11 */
-}
-DAC960_V1_RebuildProgress_T;
-
-
-/*
- Define the DAC960 V1 Firmware Background Initialization Status Command
- reply structure.
-*/
-
-typedef struct DAC960_V1_BackgroundInitializationStatus
-{
- unsigned int LogicalDriveSize; /* Bytes 0-3 */
- unsigned int BlocksCompleted; /* Bytes 4-7 */
- unsigned char Reserved1[12]; /* Bytes 8-19 */
- unsigned int LogicalDriveNumber; /* Bytes 20-23 */
- unsigned char RAIDLevel; /* Byte 24 */
- enum {
- DAC960_V1_BackgroundInitializationInvalid = 0x00,
- DAC960_V1_BackgroundInitializationStarted = 0x02,
- DAC960_V1_BackgroundInitializationInProgress = 0x04,
- DAC960_V1_BackgroundInitializationSuspended = 0x05,
- DAC960_V1_BackgroundInitializationCancelled = 0x06
- } __attribute__ ((packed)) Status; /* Byte 25 */
- unsigned char Reserved2[6]; /* Bytes 26-31 */
-}
-DAC960_V1_BackgroundInitializationStatus_T;
-
-
-/*
- Define the DAC960 V1 Firmware Error Table Entry structure.
-*/
-
-typedef struct DAC960_V1_ErrorTableEntry
-{
- unsigned char ParityErrorCount; /* Byte 0 */
- unsigned char SoftErrorCount; /* Byte 1 */
- unsigned char HardErrorCount; /* Byte 2 */
- unsigned char MiscErrorCount; /* Byte 3 */
-}
-DAC960_V1_ErrorTableEntry_T;
-
-
-/*
- Define the DAC960 V1 Firmware Get Error Table Command reply structure.
-*/
-
-typedef struct DAC960_V1_ErrorTable
-{
- DAC960_V1_ErrorTableEntry_T
- ErrorTableEntries[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
-}
-DAC960_V1_ErrorTable_T;
-
-
-/*
- Define the DAC960 V1 Firmware Read Config2 Command reply structure.
-*/
-
-typedef struct DAC960_V1_Config2
-{
- unsigned char :1; /* Byte 0 Bit 0 */
- bool ActiveNegationEnabled:1; /* Byte 0 Bit 1 */
- unsigned char :5; /* Byte 0 Bits 2-6 */
- bool NoRescanIfResetReceivedDuringScan:1; /* Byte 0 Bit 7 */
- bool StorageWorksSupportEnabled:1; /* Byte 1 Bit 0 */
- bool HewlettPackardSupportEnabled:1; /* Byte 1 Bit 1 */
- bool NoDisconnectOnFirstCommand:1; /* Byte 1 Bit 2 */
- unsigned char :2; /* Byte 1 Bits 3-4 */
- bool AEMI_ARM:1; /* Byte 1 Bit 5 */
- bool AEMI_OFM:1; /* Byte 1 Bit 6 */
- unsigned char :1; /* Byte 1 Bit 7 */
- enum {
- DAC960_V1_OEMID_Mylex = 0x00,
- DAC960_V1_OEMID_IBM = 0x08,
- DAC960_V1_OEMID_HP = 0x0A,
- DAC960_V1_OEMID_DEC = 0x0C,
- DAC960_V1_OEMID_Siemens = 0x10,
- DAC960_V1_OEMID_Intel = 0x12
- } __attribute__ ((packed)) OEMID; /* Byte 2 */
- unsigned char OEMModelNumber; /* Byte 3 */
- unsigned char PhysicalSector; /* Byte 4 */
- unsigned char LogicalSector; /* Byte 5 */
- unsigned char BlockFactor; /* Byte 6 */
- bool ReadAheadEnabled:1; /* Byte 7 Bit 0 */
- bool LowBIOSDelay:1; /* Byte 7 Bit 1 */
- unsigned char :2; /* Byte 7 Bits 2-3 */
- bool ReassignRestrictedToOneSector:1; /* Byte 7 Bit 4 */
- unsigned char :1; /* Byte 7 Bit 5 */
- bool ForceUnitAccessDuringWriteRecovery:1; /* Byte 7 Bit 6 */
- bool EnableLeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */
- unsigned char DefaultRebuildRate; /* Byte 8 */
- unsigned char :8; /* Byte 9 */
- unsigned char BlocksPerCacheLine; /* Byte 10 */
- unsigned char BlocksPerStripe; /* Byte 11 */
- struct {
- enum {
- DAC960_V1_Async = 0x0,
- DAC960_V1_Sync_8MHz = 0x1,
- DAC960_V1_Sync_5MHz = 0x2,
- DAC960_V1_Sync_10or20MHz = 0x3 /* Byte 11 Bits 0-1 */
- } __attribute__ ((packed)) Speed:2;
- bool Force8Bit:1; /* Byte 11 Bit 2 */
- bool DisableFast20:1; /* Byte 11 Bit 3 */
- unsigned char :3; /* Byte 11 Bits 4-6 */
- bool EnableTaggedQueuing:1; /* Byte 11 Bit 7 */
- } __attribute__ ((packed)) ChannelParameters[6]; /* Bytes 12-17 */
- unsigned char SCSIInitiatorID; /* Byte 18 */
- unsigned char :8; /* Byte 19 */
- enum {
- DAC960_V1_StartupMode_ControllerSpinUp = 0x00,
- DAC960_V1_StartupMode_PowerOnSpinUp = 0x01
- } __attribute__ ((packed)) StartupMode; /* Byte 20 */
- unsigned char SimultaneousDeviceSpinUpCount; /* Byte 21 */
- unsigned char SecondsDelayBetweenSpinUps; /* Byte 22 */
- unsigned char Reserved1[29]; /* Bytes 23-51 */
- bool BIOSDisabled:1; /* Byte 52 Bit 0 */
- bool CDROMBootEnabled:1; /* Byte 52 Bit 1 */
- unsigned char :3; /* Byte 52 Bits 2-4 */
- enum {
- DAC960_V1_Geometry_128_32 = 0x0,
- DAC960_V1_Geometry_255_63 = 0x1,
- DAC960_V1_Geometry_Reserved1 = 0x2,
- DAC960_V1_Geometry_Reserved2 = 0x3
- } __attribute__ ((packed)) DriveGeometry:2; /* Byte 52 Bits 5-6 */
- unsigned char :1; /* Byte 52 Bit 7 */
- unsigned char Reserved2[9]; /* Bytes 53-61 */
- unsigned short Checksum; /* Bytes 62-63 */
-}
-DAC960_V1_Config2_T;
-
-
-/*
- Define the DAC960 V1 Firmware DCDB request structure.
-*/
-
-typedef struct DAC960_V1_DCDB
-{
- unsigned char TargetID:4; /* Byte 0 Bits 0-3 */
- unsigned char Channel:4; /* Byte 0 Bits 4-7 */
- enum {
- DAC960_V1_DCDB_NoDataTransfer = 0,
- DAC960_V1_DCDB_DataTransferDeviceToSystem = 1,
- DAC960_V1_DCDB_DataTransferSystemToDevice = 2,
- DAC960_V1_DCDB_IllegalDataTransfer = 3
- } __attribute__ ((packed)) Direction:2; /* Byte 1 Bits 0-1 */
- bool EarlyStatus:1; /* Byte 1 Bit 2 */
- unsigned char :1; /* Byte 1 Bit 3 */
- enum {
- DAC960_V1_DCDB_Timeout_24_hours = 0,
- DAC960_V1_DCDB_Timeout_10_seconds = 1,
- DAC960_V1_DCDB_Timeout_60_seconds = 2,
- DAC960_V1_DCDB_Timeout_10_minutes = 3
- } __attribute__ ((packed)) Timeout:2; /* Byte 1 Bits 4-5 */
- bool NoAutomaticRequestSense:1; /* Byte 1 Bit 6 */
- bool DisconnectPermitted:1; /* Byte 1 Bit 7 */
- unsigned short TransferLength; /* Bytes 2-3 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 4-7 */
- unsigned char CDBLength:4; /* Byte 8 Bits 0-3 */
- unsigned char TransferLengthHigh4:4; /* Byte 8 Bits 4-7 */
- unsigned char SenseLength; /* Byte 9 */
- unsigned char CDB[12]; /* Bytes 10-21 */
- unsigned char SenseData[64]; /* Bytes 22-85 */
- unsigned char Status; /* Byte 86 */
- unsigned char :8; /* Byte 87 */
-}
-DAC960_V1_DCDB_T;
-
-
-/*
- Define the DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
- 32 Bit Byte Count structure.
-*/
-
-typedef struct DAC960_V1_ScatterGatherSegment
-{
- DAC960_BusAddress32_T SegmentDataPointer; /* Bytes 0-3 */
- DAC960_ByteCount32_T SegmentByteCount; /* Bytes 4-7 */
-}
-DAC960_V1_ScatterGatherSegment_T;
-
-
-/*
- Define the 13 Byte DAC960 V1 Firmware Command Mailbox structure. Bytes 13-15
- are not used. The Command Mailbox structure is padded to 16 bytes for
- efficient access.
-*/
-
-typedef union DAC960_V1_CommandMailbox
-{
- unsigned int Words[4]; /* Words 0-3 */
- unsigned char Bytes[16]; /* Bytes 0-15 */
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char Dummy[14]; /* Bytes 2-15 */
- } __attribute__ ((packed)) Common;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char Dummy1[6]; /* Bytes 2-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char Dummy2[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) Type3;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char CommandOpcode2; /* Byte 2 */
- unsigned char Dummy1[5]; /* Bytes 3-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char Dummy2[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) Type3B;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char Dummy1[5]; /* Bytes 2-6 */
- unsigned char LogicalDriveNumber:6; /* Byte 7 Bits 0-6 */
- bool AutoRestore:1; /* Byte 7 Bit 7 */
- unsigned char Dummy2[8]; /* Bytes 8-15 */
- } __attribute__ ((packed)) Type3C;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char Channel; /* Byte 2 */
- unsigned char TargetID; /* Byte 3 */
- DAC960_V1_PhysicalDeviceState_T DeviceState:5; /* Byte 4 Bits 0-4 */
- unsigned char Modifier:3; /* Byte 4 Bits 5-7 */
- unsigned char Dummy1[3]; /* Bytes 5-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char Dummy2[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) Type3D;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- DAC960_V1_PerformEventLogOpType_T OperationType; /* Byte 2 */
- unsigned char OperationQualifier; /* Byte 3 */
- unsigned short SequenceNumber; /* Bytes 4-5 */
- unsigned char Dummy1[2]; /* Bytes 6-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char Dummy2[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) Type3E;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char Dummy1[2]; /* Bytes 2-3 */
- unsigned char RebuildRateConstant; /* Byte 4 */
- unsigned char Dummy2[3]; /* Bytes 5-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char Dummy3[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) Type3R;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned short TransferLength; /* Bytes 2-3 */
- unsigned int LogicalBlockAddress; /* Bytes 4-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char LogicalDriveNumber; /* Byte 12 */
- unsigned char Dummy[3]; /* Bytes 13-15 */
- } __attribute__ ((packed)) Type4;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- struct {
- unsigned short TransferLength:11; /* Bytes 2-3 */
- unsigned char LogicalDriveNumber:5; /* Byte 3 Bits 3-7 */
- } __attribute__ ((packed)) LD;
- unsigned int LogicalBlockAddress; /* Bytes 4-7 */
- DAC960_BusAddress32_T BusAddress; /* Bytes 8-11 */
- unsigned char ScatterGatherCount:6; /* Byte 12 Bits 0-5 */
- enum {
- DAC960_V1_ScatterGather_32BitAddress_32BitByteCount = 0x0,
- DAC960_V1_ScatterGather_32BitAddress_16BitByteCount = 0x1,
- DAC960_V1_ScatterGather_32BitByteCount_32BitAddress = 0x2,
- DAC960_V1_ScatterGather_16BitByteCount_32BitAddress = 0x3
- } __attribute__ ((packed)) ScatterGatherType:2; /* Byte 12 Bits 6-7 */
- unsigned char Dummy[3]; /* Bytes 13-15 */
- } __attribute__ ((packed)) Type5;
- struct {
- DAC960_V1_CommandOpcode_T CommandOpcode; /* Byte 0 */
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 1 */
- unsigned char CommandOpcode2; /* Byte 2 */
- unsigned char :8; /* Byte 3 */
- DAC960_BusAddress32_T CommandMailboxesBusAddress; /* Bytes 4-7 */
- DAC960_BusAddress32_T StatusMailboxesBusAddress; /* Bytes 8-11 */
- unsigned char Dummy[4]; /* Bytes 12-15 */
- } __attribute__ ((packed)) TypeX;
-}
-DAC960_V1_CommandMailbox_T;
-
-
-/*
- Define the DAC960 V2 Firmware Command Opcodes.
-*/
-
-typedef enum
-{
- DAC960_V2_MemCopy = 0x01,
- DAC960_V2_SCSI_10_Passthru = 0x02,
- DAC960_V2_SCSI_255_Passthru = 0x03,
- DAC960_V2_SCSI_10 = 0x04,
- DAC960_V2_SCSI_256 = 0x05,
- DAC960_V2_IOCTL = 0x20
-}
-__attribute__ ((packed))
-DAC960_V2_CommandOpcode_T;
-
-
-/*
- Define the DAC960 V2 Firmware IOCTL Opcodes.
-*/
-
-typedef enum
-{
- DAC960_V2_GetControllerInfo = 0x01,
- DAC960_V2_GetLogicalDeviceInfoValid = 0x03,
- DAC960_V2_GetPhysicalDeviceInfoValid = 0x05,
- DAC960_V2_GetHealthStatus = 0x11,
- DAC960_V2_GetEvent = 0x15,
- DAC960_V2_StartDiscovery = 0x81,
- DAC960_V2_SetDeviceState = 0x82,
- DAC960_V2_RebuildDeviceStart = 0x88,
- DAC960_V2_RebuildDeviceStop = 0x89,
- DAC960_V2_ConsistencyCheckStart = 0x8C,
- DAC960_V2_ConsistencyCheckStop = 0x8D,
- DAC960_V2_SetMemoryMailbox = 0x8E,
- DAC960_V2_PauseDevice = 0x92,
- DAC960_V2_TranslatePhysicalToLogicalDevice = 0xC5
-}
-__attribute__ ((packed))
-DAC960_V2_IOCTL_Opcode_T;
-
-
-/*
- Define the DAC960 V2 Firmware Command Identifier type.
-*/
-
-typedef unsigned short DAC960_V2_CommandIdentifier_T;
-
-
-/*
- Define the DAC960 V2 Firmware Command Status Codes.
-*/
-
-#define DAC960_V2_NormalCompletion 0x00
-#define DAC960_V2_AbormalCompletion 0x02
-#define DAC960_V2_DeviceBusy 0x08
-#define DAC960_V2_DeviceNonresponsive 0x0E
-#define DAC960_V2_DeviceNonresponsive2 0x0F
-#define DAC960_V2_DeviceRevervationConflict 0x18
-
-typedef unsigned char DAC960_V2_CommandStatus_T;
-
-
-/*
- Define the DAC960 V2 Firmware Memory Type structure.
-*/
-
-typedef struct DAC960_V2_MemoryType
-{
- enum {
- DAC960_V2_MemoryType_Reserved = 0x00,
- DAC960_V2_MemoryType_DRAM = 0x01,
- DAC960_V2_MemoryType_EDRAM = 0x02,
- DAC960_V2_MemoryType_EDO = 0x03,
- DAC960_V2_MemoryType_SDRAM = 0x04,
- DAC960_V2_MemoryType_Last = 0x1F
- } __attribute__ ((packed)) MemoryType:5; /* Byte 0 Bits 0-4 */
- bool :1; /* Byte 0 Bit 5 */
- bool MemoryParity:1; /* Byte 0 Bit 6 */
- bool MemoryECC:1; /* Byte 0 Bit 7 */
-}
-DAC960_V2_MemoryType_T;
-
-
-/*
- Define the DAC960 V2 Firmware Processor Type structure.
-*/
-
-typedef enum
-{
- DAC960_V2_ProcessorType_i960CA = 0x01,
- DAC960_V2_ProcessorType_i960RD = 0x02,
- DAC960_V2_ProcessorType_i960RN = 0x03,
- DAC960_V2_ProcessorType_i960RP = 0x04,
- DAC960_V2_ProcessorType_NorthBay = 0x05,
- DAC960_V2_ProcessorType_StrongArm = 0x06,
- DAC960_V2_ProcessorType_i960RM = 0x07
-}
-__attribute__ ((packed))
-DAC960_V2_ProcessorType_T;
-
-
-/*
- Define the DAC960 V2 Firmware Get Controller Info reply structure.
-*/
-
-typedef struct DAC960_V2_ControllerInfo
-{
- unsigned char :8; /* Byte 0 */
- enum {
- DAC960_V2_SCSI_Bus = 0x00,
- DAC960_V2_Fibre_Bus = 0x01,
- DAC960_V2_PCI_Bus = 0x03
- } __attribute__ ((packed)) BusInterfaceType; /* Byte 1 */
- enum {
- DAC960_V2_DAC960E = 0x01,
- DAC960_V2_DAC960M = 0x08,
- DAC960_V2_DAC960PD = 0x10,
- DAC960_V2_DAC960PL = 0x11,
- DAC960_V2_DAC960PU = 0x12,
- DAC960_V2_DAC960PE = 0x13,
- DAC960_V2_DAC960PG = 0x14,
- DAC960_V2_DAC960PJ = 0x15,
- DAC960_V2_DAC960PTL0 = 0x16,
- DAC960_V2_DAC960PR = 0x17,
- DAC960_V2_DAC960PRL = 0x18,
- DAC960_V2_DAC960PT = 0x19,
- DAC960_V2_DAC1164P = 0x1A,
- DAC960_V2_DAC960PTL1 = 0x1B,
- DAC960_V2_EXR2000P = 0x1C,
- DAC960_V2_EXR3000P = 0x1D,
- DAC960_V2_AcceleRAID352 = 0x1E,
- DAC960_V2_AcceleRAID170 = 0x1F,
- DAC960_V2_AcceleRAID160 = 0x20,
- DAC960_V2_DAC960S = 0x60,
- DAC960_V2_DAC960SU = 0x61,
- DAC960_V2_DAC960SX = 0x62,
- DAC960_V2_DAC960SF = 0x63,
- DAC960_V2_DAC960SS = 0x64,
- DAC960_V2_DAC960FL = 0x65,
- DAC960_V2_DAC960LL = 0x66,
- DAC960_V2_DAC960FF = 0x67,
- DAC960_V2_DAC960HP = 0x68,
- DAC960_V2_RAIDBRICK = 0x69,
- DAC960_V2_METEOR_FL = 0x6A,
- DAC960_V2_METEOR_FF = 0x6B
- } __attribute__ ((packed)) ControllerType; /* Byte 2 */
- unsigned char :8; /* Byte 3 */
- unsigned short BusInterfaceSpeedMHz; /* Bytes 4-5 */
- unsigned char BusWidthBits; /* Byte 6 */
- unsigned char FlashCodeTypeOrProductID; /* Byte 7 */
- unsigned char NumberOfHostPortsPresent; /* Byte 8 */
- unsigned char Reserved1[7]; /* Bytes 9-15 */
- unsigned char BusInterfaceName[16]; /* Bytes 16-31 */
- unsigned char ControllerName[16]; /* Bytes 32-47 */
- unsigned char Reserved2[16]; /* Bytes 48-63 */
- /* Firmware Release Information */
- unsigned char FirmwareMajorVersion; /* Byte 64 */
- unsigned char FirmwareMinorVersion; /* Byte 65 */
- unsigned char FirmwareTurnNumber; /* Byte 66 */
- unsigned char FirmwareBuildNumber; /* Byte 67 */
- unsigned char FirmwareReleaseDay; /* Byte 68 */
- unsigned char FirmwareReleaseMonth; /* Byte 69 */
- unsigned char FirmwareReleaseYearHigh2Digits; /* Byte 70 */
- unsigned char FirmwareReleaseYearLow2Digits; /* Byte 71 */
- /* Hardware Release Information */
- unsigned char HardwareRevision; /* Byte 72 */
- unsigned int :24; /* Bytes 73-75 */
- unsigned char HardwareReleaseDay; /* Byte 76 */
- unsigned char HardwareReleaseMonth; /* Byte 77 */
- unsigned char HardwareReleaseYearHigh2Digits; /* Byte 78 */
- unsigned char HardwareReleaseYearLow2Digits; /* Byte 79 */
- /* Hardware Manufacturing Information */
- unsigned char ManufacturingBatchNumber; /* Byte 80 */
- unsigned char :8; /* Byte 81 */
- unsigned char ManufacturingPlantNumber; /* Byte 82 */
- unsigned char :8; /* Byte 83 */
- unsigned char HardwareManufacturingDay; /* Byte 84 */
- unsigned char HardwareManufacturingMonth; /* Byte 85 */
- unsigned char HardwareManufacturingYearHigh2Digits; /* Byte 86 */
- unsigned char HardwareManufacturingYearLow2Digits; /* Byte 87 */
- unsigned char MaximumNumberOfPDDperXLD; /* Byte 88 */
- unsigned char MaximumNumberOfILDperXLD; /* Byte 89 */
- unsigned short NonvolatileMemorySizeKB; /* Bytes 90-91 */
- unsigned char MaximumNumberOfXLD; /* Byte 92 */
- unsigned int :24; /* Bytes 93-95 */
- /* Unique Information per Controller */
- unsigned char ControllerSerialNumber[16]; /* Bytes 96-111 */
- unsigned char Reserved3[16]; /* Bytes 112-127 */
- /* Vendor Information */
- unsigned int :24; /* Bytes 128-130 */
- unsigned char OEM_Code; /* Byte 131 */
- unsigned char VendorName[16]; /* Bytes 132-147 */
- /* Other Physical/Controller/Operation Information */
- bool BBU_Present:1; /* Byte 148 Bit 0 */
- bool ActiveActiveClusteringMode:1; /* Byte 148 Bit 1 */
- unsigned char :6; /* Byte 148 Bits 2-7 */
- unsigned char :8; /* Byte 149 */
- unsigned short :16; /* Bytes 150-151 */
- /* Physical Device Scan Information */
- bool PhysicalScanActive:1; /* Byte 152 Bit 0 */
- unsigned char :7; /* Byte 152 Bits 1-7 */
- unsigned char PhysicalDeviceChannelNumber; /* Byte 153 */
- unsigned char PhysicalDeviceTargetID; /* Byte 154 */
- unsigned char PhysicalDeviceLogicalUnit; /* Byte 155 */
- /* Maximum Command Data Transfer Sizes */
- unsigned short MaximumDataTransferSizeInBlocks; /* Bytes 156-157 */
- unsigned short MaximumScatterGatherEntries; /* Bytes 158-159 */
- /* Logical/Physical Device Counts */
- unsigned short LogicalDevicesPresent; /* Bytes 160-161 */
- unsigned short LogicalDevicesCritical; /* Bytes 162-163 */
- unsigned short LogicalDevicesOffline; /* Bytes 164-165 */
- unsigned short PhysicalDevicesPresent; /* Bytes 166-167 */
- unsigned short PhysicalDisksPresent; /* Bytes 168-169 */
- unsigned short PhysicalDisksCritical; /* Bytes 170-171 */
- unsigned short PhysicalDisksOffline; /* Bytes 172-173 */
- unsigned short MaximumParallelCommands; /* Bytes 174-175 */
- /* Channel and Target ID Information */
- unsigned char NumberOfPhysicalChannelsPresent; /* Byte 176 */
- unsigned char NumberOfVirtualChannelsPresent; /* Byte 177 */
- unsigned char NumberOfPhysicalChannelsPossible; /* Byte 178 */
- unsigned char NumberOfVirtualChannelsPossible; /* Byte 179 */
- unsigned char MaximumTargetsPerChannel[16]; /* Bytes 180-195 */
- unsigned char Reserved4[12]; /* Bytes 196-207 */
- /* Memory/Cache Information */
- unsigned short MemorySizeMB; /* Bytes 208-209 */
- unsigned short CacheSizeMB; /* Bytes 210-211 */
- unsigned int ValidCacheSizeInBytes; /* Bytes 212-215 */
- unsigned int DirtyCacheSizeInBytes; /* Bytes 216-219 */
- unsigned short MemorySpeedMHz; /* Bytes 220-221 */
- unsigned char MemoryDataWidthBits; /* Byte 222 */
- DAC960_V2_MemoryType_T MemoryType; /* Byte 223 */
- unsigned char CacheMemoryTypeName[16]; /* Bytes 224-239 */
- /* Execution Memory Information */
- unsigned short ExecutionMemorySizeMB; /* Bytes 240-241 */
- unsigned short ExecutionL2CacheSizeMB; /* Bytes 242-243 */
- unsigned char Reserved5[8]; /* Bytes 244-251 */
- unsigned short ExecutionMemorySpeedMHz; /* Bytes 252-253 */
- unsigned char ExecutionMemoryDataWidthBits; /* Byte 254 */
- DAC960_V2_MemoryType_T ExecutionMemoryType; /* Byte 255 */
- unsigned char ExecutionMemoryTypeName[16]; /* Bytes 256-271 */
- /* First CPU Type Information */
- unsigned short FirstProcessorSpeedMHz; /* Bytes 272-273 */
- DAC960_V2_ProcessorType_T FirstProcessorType; /* Byte 274 */
- unsigned char FirstProcessorCount; /* Byte 275 */
- unsigned char Reserved6[12]; /* Bytes 276-287 */
- unsigned char FirstProcessorName[16]; /* Bytes 288-303 */
- /* Second CPU Type Information */
- unsigned short SecondProcessorSpeedMHz; /* Bytes 304-305 */
- DAC960_V2_ProcessorType_T SecondProcessorType; /* Byte 306 */
- unsigned char SecondProcessorCount; /* Byte 307 */
- unsigned char Reserved7[12]; /* Bytes 308-319 */
- unsigned char SecondProcessorName[16]; /* Bytes 320-335 */
- /* Debugging/Profiling/Command Time Tracing Information */
- unsigned short CurrentProfilingDataPageNumber; /* Bytes 336-337 */
- unsigned short ProgramsAwaitingProfilingData; /* Bytes 338-339 */
- unsigned short CurrentCommandTimeTraceDataPageNumber; /* Bytes 340-341 */
- unsigned short ProgramsAwaitingCommandTimeTraceData; /* Bytes 342-343 */
- unsigned char Reserved8[8]; /* Bytes 344-351 */
- /* Error Counters on Physical Devices */
- unsigned short PhysicalDeviceBusResets; /* Bytes 352-353 */
- unsigned short PhysicalDeviceParityErrors; /* Bytes 355-355 */
- unsigned short PhysicalDeviceSoftErrors; /* Bytes 356-357 */
- unsigned short PhysicalDeviceCommandsFailed; /* Bytes 358-359 */
- unsigned short PhysicalDeviceMiscellaneousErrors; /* Bytes 360-361 */
- unsigned short PhysicalDeviceCommandTimeouts; /* Bytes 362-363 */
- unsigned short PhysicalDeviceSelectionTimeouts; /* Bytes 364-365 */
- unsigned short PhysicalDeviceRetriesDone; /* Bytes 366-367 */
- unsigned short PhysicalDeviceAbortsDone; /* Bytes 368-369 */
- unsigned short PhysicalDeviceHostCommandAbortsDone; /* Bytes 370-371 */
- unsigned short PhysicalDevicePredictedFailuresDetected; /* Bytes 372-373 */
- unsigned short PhysicalDeviceHostCommandsFailed; /* Bytes 374-375 */
- unsigned short PhysicalDeviceHardErrors; /* Bytes 376-377 */
- unsigned char Reserved9[6]; /* Bytes 378-383 */
- /* Error Counters on Logical Devices */
- unsigned short LogicalDeviceSoftErrors; /* Bytes 384-385 */
- unsigned short LogicalDeviceCommandsFailed; /* Bytes 386-387 */
- unsigned short LogicalDeviceHostCommandAbortsDone; /* Bytes 388-389 */
- unsigned short :16; /* Bytes 390-391 */
- /* Error Counters on Controller */
- unsigned short ControllerMemoryErrors; /* Bytes 392-393 */
- unsigned short ControllerHostCommandAbortsDone; /* Bytes 394-395 */
- unsigned int :32; /* Bytes 396-399 */
- /* Long Duration Activity Information */
- unsigned short BackgroundInitializationsActive; /* Bytes 400-401 */
- unsigned short LogicalDeviceInitializationsActive; /* Bytes 402-403 */
- unsigned short PhysicalDeviceInitializationsActive; /* Bytes 404-405 */
- unsigned short ConsistencyChecksActive; /* Bytes 406-407 */
- unsigned short RebuildsActive; /* Bytes 408-409 */
- unsigned short OnlineExpansionsActive; /* Bytes 410-411 */
- unsigned short PatrolActivitiesActive; /* Bytes 412-413 */
- unsigned short :16; /* Bytes 414-415 */
- /* Flash ROM Information */
- unsigned char FlashType; /* Byte 416 */
- unsigned char :8; /* Byte 417 */
- unsigned short FlashSizeMB; /* Bytes 418-419 */
- unsigned int FlashLimit; /* Bytes 420-423 */
- unsigned int FlashCount; /* Bytes 424-427 */
- unsigned int :32; /* Bytes 428-431 */
- unsigned char FlashTypeName[16]; /* Bytes 432-447 */
- /* Firmware Run Time Information */
- unsigned char RebuildRate; /* Byte 448 */
- unsigned char BackgroundInitializationRate; /* Byte 449 */
- unsigned char ForegroundInitializationRate; /* Byte 450 */
- unsigned char ConsistencyCheckRate; /* Byte 451 */
- unsigned int :32; /* Bytes 452-455 */
- unsigned int MaximumDP; /* Bytes 456-459 */
- unsigned int FreeDP; /* Bytes 460-463 */
- unsigned int MaximumIOP; /* Bytes 464-467 */
- unsigned int FreeIOP; /* Bytes 468-471 */
- unsigned short MaximumCombLengthInBlocks; /* Bytes 472-473 */
- unsigned short NumberOfConfigurationGroups; /* Bytes 474-475 */
- bool InstallationAbortStatus:1; /* Byte 476 Bit 0 */
- bool MaintenanceModeStatus:1; /* Byte 476 Bit 1 */
- unsigned int :24; /* Bytes 476-479 */
- unsigned char Reserved10[32]; /* Bytes 480-511 */
- unsigned char Reserved11[512]; /* Bytes 512-1023 */
-}
-DAC960_V2_ControllerInfo_T;
-
-
-/*
- Define the DAC960 V2 Firmware Logical Device State type.
-*/
-
-typedef enum
-{
- DAC960_V2_LogicalDevice_Online = 0x01,
- DAC960_V2_LogicalDevice_Offline = 0x08,
- DAC960_V2_LogicalDevice_Critical = 0x09
-}
-__attribute__ ((packed))
-DAC960_V2_LogicalDeviceState_T;
-
-
-/*
- Define the DAC960 V2 Firmware Get Logical Device Info reply structure.
-*/
-
-typedef struct DAC960_V2_LogicalDeviceInfo
-{
- unsigned char :8; /* Byte 0 */
- unsigned char Channel; /* Byte 1 */
- unsigned char TargetID; /* Byte 2 */
- unsigned char LogicalUnit; /* Byte 3 */
- DAC960_V2_LogicalDeviceState_T LogicalDeviceState; /* Byte 4 */
- unsigned char RAIDLevel; /* Byte 5 */
- unsigned char StripeSize; /* Byte 6 */
- unsigned char CacheLineSize; /* Byte 7 */
- struct {
- enum {
- DAC960_V2_ReadCacheDisabled = 0x0,
- DAC960_V2_ReadCacheEnabled = 0x1,
- DAC960_V2_ReadAheadEnabled = 0x2,
- DAC960_V2_IntelligentReadAheadEnabled = 0x3,
- DAC960_V2_ReadCache_Last = 0x7
- } __attribute__ ((packed)) ReadCache:3; /* Byte 8 Bits 0-2 */
- enum {
- DAC960_V2_WriteCacheDisabled = 0x0,
- DAC960_V2_LogicalDeviceReadOnly = 0x1,
- DAC960_V2_WriteCacheEnabled = 0x2,
- DAC960_V2_IntelligentWriteCacheEnabled = 0x3,
- DAC960_V2_WriteCache_Last = 0x7
- } __attribute__ ((packed)) WriteCache:3; /* Byte 8 Bits 3-5 */
- bool :1; /* Byte 8 Bit 6 */
- bool LogicalDeviceInitialized:1; /* Byte 8 Bit 7 */
- } LogicalDeviceControl; /* Byte 8 */
- /* Logical Device Operations Status */
- bool ConsistencyCheckInProgress:1; /* Byte 9 Bit 0 */
- bool RebuildInProgress:1; /* Byte 9 Bit 1 */
- bool BackgroundInitializationInProgress:1; /* Byte 9 Bit 2 */
- bool ForegroundInitializationInProgress:1; /* Byte 9 Bit 3 */
- bool DataMigrationInProgress:1; /* Byte 9 Bit 4 */
- bool PatrolOperationInProgress:1; /* Byte 9 Bit 5 */
- unsigned char :2; /* Byte 9 Bits 6-7 */
- unsigned char RAID5WriteUpdate; /* Byte 10 */
- unsigned char RAID5Algorithm; /* Byte 11 */
- unsigned short LogicalDeviceNumber; /* Bytes 12-13 */
- /* BIOS Info */
- bool BIOSDisabled:1; /* Byte 14 Bit 0 */
- bool CDROMBootEnabled:1; /* Byte 14 Bit 1 */
- bool DriveCoercionEnabled:1; /* Byte 14 Bit 2 */
- bool WriteSameDisabled:1; /* Byte 14 Bit 3 */
- bool HBA_ModeEnabled:1; /* Byte 14 Bit 4 */
- enum {
- DAC960_V2_Geometry_128_32 = 0x0,
- DAC960_V2_Geometry_255_63 = 0x1,
- DAC960_V2_Geometry_Reserved1 = 0x2,
- DAC960_V2_Geometry_Reserved2 = 0x3
- } __attribute__ ((packed)) DriveGeometry:2; /* Byte 14 Bits 5-6 */
- bool SuperReadAheadEnabled:1; /* Byte 14 Bit 7 */
- unsigned char :8; /* Byte 15 */
- /* Error Counters */
- unsigned short SoftErrors; /* Bytes 16-17 */
- unsigned short CommandsFailed; /* Bytes 18-19 */
- unsigned short HostCommandAbortsDone; /* Bytes 20-21 */
- unsigned short DeferredWriteErrors; /* Bytes 22-23 */
- unsigned int :32; /* Bytes 24-27 */
- unsigned int :32; /* Bytes 28-31 */
- /* Device Size Information */
- unsigned short :16; /* Bytes 32-33 */
- unsigned short DeviceBlockSizeInBytes; /* Bytes 34-35 */
- unsigned int OriginalDeviceSize; /* Bytes 36-39 */
- unsigned int ConfigurableDeviceSize; /* Bytes 40-43 */
- unsigned int :32; /* Bytes 44-47 */
- unsigned char LogicalDeviceName[32]; /* Bytes 48-79 */
- unsigned char SCSI_InquiryData[36]; /* Bytes 80-115 */
- unsigned char Reserved1[12]; /* Bytes 116-127 */
- DAC960_ByteCount64_T LastReadBlockNumber; /* Bytes 128-135 */
- DAC960_ByteCount64_T LastWrittenBlockNumber; /* Bytes 136-143 */
- DAC960_ByteCount64_T ConsistencyCheckBlockNumber; /* Bytes 144-151 */
- DAC960_ByteCount64_T RebuildBlockNumber; /* Bytes 152-159 */
- DAC960_ByteCount64_T BackgroundInitializationBlockNumber; /* Bytes 160-167 */
- DAC960_ByteCount64_T ForegroundInitializationBlockNumber; /* Bytes 168-175 */
- DAC960_ByteCount64_T DataMigrationBlockNumber; /* Bytes 176-183 */
- DAC960_ByteCount64_T PatrolOperationBlockNumber; /* Bytes 184-191 */
- unsigned char Reserved2[64]; /* Bytes 192-255 */
-}
-DAC960_V2_LogicalDeviceInfo_T;
-
-
-/*
- Define the DAC960 V2 Firmware Physical Device State type.
-*/
-
-typedef enum
-{
- DAC960_V2_Device_Unconfigured = 0x00,
- DAC960_V2_Device_Online = 0x01,
- DAC960_V2_Device_Rebuild = 0x03,
- DAC960_V2_Device_Missing = 0x04,
- DAC960_V2_Device_Critical = 0x05,
- DAC960_V2_Device_Dead = 0x08,
- DAC960_V2_Device_SuspectedDead = 0x0C,
- DAC960_V2_Device_CommandedOffline = 0x10,
- DAC960_V2_Device_Standby = 0x21,
- DAC960_V2_Device_InvalidState = 0xFF
-}
-__attribute__ ((packed))
-DAC960_V2_PhysicalDeviceState_T;
-
-
-/*
- Define the DAC960 V2 Firmware Get Physical Device Info reply structure.
-*/
-
-typedef struct DAC960_V2_PhysicalDeviceInfo
-{
- unsigned char :8; /* Byte 0 */
- unsigned char Channel; /* Byte 1 */
- unsigned char TargetID; /* Byte 2 */
- unsigned char LogicalUnit; /* Byte 3 */
- /* Configuration Status Bits */
- bool PhysicalDeviceFaultTolerant:1; /* Byte 4 Bit 0 */
- bool PhysicalDeviceConnected:1; /* Byte 4 Bit 1 */
- bool PhysicalDeviceLocalToController:1; /* Byte 4 Bit 2 */
- unsigned char :5; /* Byte 4 Bits 3-7 */
- /* Multiple Host/Controller Status Bits */
- bool RemoteHostSystemDead:1; /* Byte 5 Bit 0 */
- bool RemoteControllerDead:1; /* Byte 5 Bit 1 */
- unsigned char :6; /* Byte 5 Bits 2-7 */
- DAC960_V2_PhysicalDeviceState_T PhysicalDeviceState; /* Byte 6 */
- unsigned char NegotiatedDataWidthBits; /* Byte 7 */
- unsigned short NegotiatedSynchronousMegaTransfers; /* Bytes 8-9 */
- /* Multiported Physical Device Information */
- unsigned char NumberOfPortConnections; /* Byte 10 */
- unsigned char DriveAccessibilityBitmap; /* Byte 11 */
- unsigned int :32; /* Bytes 12-15 */
- unsigned char NetworkAddress[16]; /* Bytes 16-31 */
- unsigned short MaximumTags; /* Bytes 32-33 */
- /* Physical Device Operations Status */
- bool ConsistencyCheckInProgress:1; /* Byte 34 Bit 0 */
- bool RebuildInProgress:1; /* Byte 34 Bit 1 */
- bool MakingDataConsistentInProgress:1; /* Byte 34 Bit 2 */
- bool PhysicalDeviceInitializationInProgress:1; /* Byte 34 Bit 3 */
- bool DataMigrationInProgress:1; /* Byte 34 Bit 4 */
- bool PatrolOperationInProgress:1; /* Byte 34 Bit 5 */
- unsigned char :2; /* Byte 34 Bits 6-7 */
- unsigned char LongOperationStatus; /* Byte 35 */
- unsigned char ParityErrors; /* Byte 36 */
- unsigned char SoftErrors; /* Byte 37 */
- unsigned char HardErrors; /* Byte 38 */
- unsigned char MiscellaneousErrors; /* Byte 39 */
- unsigned char CommandTimeouts; /* Byte 40 */
- unsigned char Retries; /* Byte 41 */
- unsigned char Aborts; /* Byte 42 */
- unsigned char PredictedFailuresDetected; /* Byte 43 */
- unsigned int :32; /* Bytes 44-47 */
- unsigned short :16; /* Bytes 48-49 */
- unsigned short DeviceBlockSizeInBytes; /* Bytes 50-51 */
- unsigned int OriginalDeviceSize; /* Bytes 52-55 */
- unsigned int ConfigurableDeviceSize; /* Bytes 56-59 */
- unsigned int :32; /* Bytes 60-63 */
- unsigned char PhysicalDeviceName[16]; /* Bytes 64-79 */
- unsigned char Reserved1[16]; /* Bytes 80-95 */
- unsigned char Reserved2[32]; /* Bytes 96-127 */
- unsigned char SCSI_InquiryData[36]; /* Bytes 128-163 */
- unsigned char Reserved3[20]; /* Bytes 164-183 */
- unsigned char Reserved4[8]; /* Bytes 184-191 */
- DAC960_ByteCount64_T LastReadBlockNumber; /* Bytes 192-199 */
- DAC960_ByteCount64_T LastWrittenBlockNumber; /* Bytes 200-207 */
- DAC960_ByteCount64_T ConsistencyCheckBlockNumber; /* Bytes 208-215 */
- DAC960_ByteCount64_T RebuildBlockNumber; /* Bytes 216-223 */
- DAC960_ByteCount64_T MakingDataConsistentBlockNumber; /* Bytes 224-231 */
- DAC960_ByteCount64_T DeviceInitializationBlockNumber; /* Bytes 232-239 */
- DAC960_ByteCount64_T DataMigrationBlockNumber; /* Bytes 240-247 */
- DAC960_ByteCount64_T PatrolOperationBlockNumber; /* Bytes 248-255 */
- unsigned char Reserved5[256]; /* Bytes 256-511 */
-}
-DAC960_V2_PhysicalDeviceInfo_T;
-
-
-/*
- Define the DAC960 V2 Firmware Health Status Buffer structure.
-*/
-
-typedef struct DAC960_V2_HealthStatusBuffer
-{
- unsigned int MicrosecondsFromControllerStartTime; /* Bytes 0-3 */
- unsigned int MillisecondsFromControllerStartTime; /* Bytes 4-7 */
- unsigned int SecondsFrom1January1970; /* Bytes 8-11 */
- unsigned int :32; /* Bytes 12-15 */
- unsigned int StatusChangeCounter; /* Bytes 16-19 */
- unsigned int :32; /* Bytes 20-23 */
- unsigned int DebugOutputMessageBufferIndex; /* Bytes 24-27 */
- unsigned int CodedMessageBufferIndex; /* Bytes 28-31 */
- unsigned int CurrentTimeTracePageNumber; /* Bytes 32-35 */
- unsigned int CurrentProfilerPageNumber; /* Bytes 36-39 */
- unsigned int NextEventSequenceNumber; /* Bytes 40-43 */
- unsigned int :32; /* Bytes 44-47 */
- unsigned char Reserved1[16]; /* Bytes 48-63 */
- unsigned char Reserved2[64]; /* Bytes 64-127 */
-}
-DAC960_V2_HealthStatusBuffer_T;
-
-
-/*
- Define the DAC960 V2 Firmware Get Event reply structure.
-*/
-
-typedef struct DAC960_V2_Event
-{
- unsigned int EventSequenceNumber; /* Bytes 0-3 */
- unsigned int EventTime; /* Bytes 4-7 */
- unsigned int EventCode; /* Bytes 8-11 */
- unsigned char :8; /* Byte 12 */
- unsigned char Channel; /* Byte 13 */
- unsigned char TargetID; /* Byte 14 */
- unsigned char LogicalUnit; /* Byte 15 */
- unsigned int :32; /* Bytes 16-19 */
- unsigned int EventSpecificParameter; /* Bytes 20-23 */
- unsigned char RequestSenseData[40]; /* Bytes 24-63 */
-}
-DAC960_V2_Event_T;
-
-
-/*
- Define the DAC960 V2 Firmware Command Control Bits structure.
-*/
-
-typedef struct DAC960_V2_CommandControlBits
-{
- bool ForceUnitAccess:1; /* Byte 0 Bit 0 */
- bool DisablePageOut:1; /* Byte 0 Bit 1 */
- bool :1; /* Byte 0 Bit 2 */
- bool AdditionalScatterGatherListMemory:1; /* Byte 0 Bit 3 */
- bool DataTransferControllerToHost:1; /* Byte 0 Bit 4 */
- bool :1; /* Byte 0 Bit 5 */
- bool NoAutoRequestSense:1; /* Byte 0 Bit 6 */
- bool DisconnectProhibited:1; /* Byte 0 Bit 7 */
-}
-DAC960_V2_CommandControlBits_T;
-
-
-/*
- Define the DAC960 V2 Firmware Command Timeout structure.
-*/
-
-typedef struct DAC960_V2_CommandTimeout
-{
- unsigned char TimeoutValue:6; /* Byte 0 Bits 0-5 */
- enum {
- DAC960_V2_TimeoutScale_Seconds = 0,
- DAC960_V2_TimeoutScale_Minutes = 1,
- DAC960_V2_TimeoutScale_Hours = 2,
- DAC960_V2_TimeoutScale_Reserved = 3
- } __attribute__ ((packed)) TimeoutScale:2; /* Byte 0 Bits 6-7 */
-}
-DAC960_V2_CommandTimeout_T;
-
-
-/*
- Define the DAC960 V2 Firmware Physical Device structure.
-*/
-
-typedef struct DAC960_V2_PhysicalDevice
-{
- unsigned char LogicalUnit; /* Byte 0 */
- unsigned char TargetID; /* Byte 1 */
- unsigned char Channel:3; /* Byte 2 Bits 0-2 */
- unsigned char Controller:5; /* Byte 2 Bits 3-7 */
-}
-__attribute__ ((packed))
-DAC960_V2_PhysicalDevice_T;
-
-
-/*
- Define the DAC960 V2 Firmware Logical Device structure.
-*/
-
-typedef struct DAC960_V2_LogicalDevice
-{
- unsigned short LogicalDeviceNumber; /* Bytes 0-1 */
- unsigned char :3; /* Byte 2 Bits 0-2 */
- unsigned char Controller:5; /* Byte 2 Bits 3-7 */
-}
-__attribute__ ((packed))
-DAC960_V2_LogicalDevice_T;
-
-
-/*
- Define the DAC960 V2 Firmware Operation Device type.
-*/
-
-typedef enum
-{
- DAC960_V2_Physical_Device = 0x00,
- DAC960_V2_RAID_Device = 0x01,
- DAC960_V2_Physical_Channel = 0x02,
- DAC960_V2_RAID_Channel = 0x03,
- DAC960_V2_Physical_Controller = 0x04,
- DAC960_V2_RAID_Controller = 0x05,
- DAC960_V2_Configuration_Group = 0x10,
- DAC960_V2_Enclosure = 0x11
-}
-__attribute__ ((packed))
-DAC960_V2_OperationDevice_T;
-
-
-/*
- Define the DAC960 V2 Firmware Translate Physical To Logical Device structure.
-*/
-
-typedef struct DAC960_V2_PhysicalToLogicalDevice
-{
- unsigned short LogicalDeviceNumber; /* Bytes 0-1 */
- unsigned short :16; /* Bytes 2-3 */
- unsigned char PreviousBootController; /* Byte 4 */
- unsigned char PreviousBootChannel; /* Byte 5 */
- unsigned char PreviousBootTargetID; /* Byte 6 */
- unsigned char PreviousBootLogicalUnit; /* Byte 7 */
-}
-DAC960_V2_PhysicalToLogicalDevice_T;
-
-
-
-/*
- Define the DAC960 V2 Firmware Scatter/Gather List Entry structure.
-*/
-
-typedef struct DAC960_V2_ScatterGatherSegment
-{
- DAC960_BusAddress64_T SegmentDataPointer; /* Bytes 0-7 */
- DAC960_ByteCount64_T SegmentByteCount; /* Bytes 8-15 */
-}
-DAC960_V2_ScatterGatherSegment_T;
-
-
-/*
- Define the DAC960 V2 Firmware Data Transfer Memory Address structure.
-*/
-
-typedef union DAC960_V2_DataTransferMemoryAddress
-{
- DAC960_V2_ScatterGatherSegment_T ScatterGatherSegments[2]; /* Bytes 0-31 */
- struct {
- unsigned short ScatterGatherList0Length; /* Bytes 0-1 */
- unsigned short ScatterGatherList1Length; /* Bytes 2-3 */
- unsigned short ScatterGatherList2Length; /* Bytes 4-5 */
- unsigned short :16; /* Bytes 6-7 */
- DAC960_BusAddress64_T ScatterGatherList0Address; /* Bytes 8-15 */
- DAC960_BusAddress64_T ScatterGatherList1Address; /* Bytes 16-23 */
- DAC960_BusAddress64_T ScatterGatherList2Address; /* Bytes 24-31 */
- } ExtendedScatterGather;
-}
-DAC960_V2_DataTransferMemoryAddress_T;
-
-
-/*
- Define the 64 Byte DAC960 V2 Firmware Command Mailbox structure.
-*/
-
-typedef union DAC960_V2_CommandMailbox
-{
- unsigned int Words[16]; /* Words 0-15 */
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- unsigned int :24; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned char Reserved[10]; /* Bytes 22-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } Common;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize; /* Bytes 4-7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char CDBLength; /* Byte 21 */
- unsigned char SCSI_CDB[10]; /* Bytes 22-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } SCSI_10;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize; /* Bytes 4-7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char CDBLength; /* Byte 21 */
- unsigned short :16; /* Bytes 22-23 */
- DAC960_BusAddress64_T SCSI_CDB_BusAddress; /* Bytes 24-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } SCSI_255;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- unsigned short :16; /* Bytes 16-17 */
- unsigned char ControllerNumber; /* Byte 18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned char Reserved[10]; /* Bytes 22-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } ControllerInfo;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned char Reserved[10]; /* Bytes 22-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } LogicalDeviceInfo;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned char Reserved[10]; /* Bytes 22-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } PhysicalDeviceInfo;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- unsigned short EventSequenceNumberHigh16; /* Bytes 16-17 */
- unsigned char ControllerNumber; /* Byte 18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned short EventSequenceNumberLow16; /* Bytes 22-23 */
- unsigned char Reserved[8]; /* Bytes 24-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } GetEvent;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- union {
- DAC960_V2_LogicalDeviceState_T LogicalDeviceState;
- DAC960_V2_PhysicalDeviceState_T PhysicalDeviceState;
- } DeviceState; /* Byte 22 */
- unsigned char Reserved[9]; /* Bytes 23-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } SetDeviceState;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_LogicalDevice_T LogicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- bool RestoreConsistency:1; /* Byte 22 Bit 0 */
- bool InitializedAreaOnly:1; /* Byte 22 Bit 1 */
- unsigned char :6; /* Byte 22 Bits 2-7 */
- unsigned char Reserved[9]; /* Bytes 23-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } ConsistencyCheck;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- unsigned char FirstCommandMailboxSizeKB; /* Byte 4 */
- unsigned char FirstStatusMailboxSizeKB; /* Byte 5 */
- unsigned char SecondCommandMailboxSizeKB; /* Byte 6 */
- unsigned char SecondStatusMailboxSizeKB; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- unsigned int :24; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- unsigned char HealthStatusBufferSizeKB; /* Byte 22 */
- unsigned char :8; /* Byte 23 */
- DAC960_BusAddress64_T HealthStatusBufferBusAddress; /* Bytes 24-31 */
- DAC960_BusAddress64_T FirstCommandMailboxBusAddress; /* Bytes 32-39 */
- DAC960_BusAddress64_T FirstStatusMailboxBusAddress; /* Bytes 40-47 */
- DAC960_BusAddress64_T SecondCommandMailboxBusAddress; /* Bytes 48-55 */
- DAC960_BusAddress64_T SecondStatusMailboxBusAddress; /* Bytes 56-63 */
- } SetMemoryMailbox;
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandOpcode_T CommandOpcode; /* Byte 2 */
- DAC960_V2_CommandControlBits_T CommandControlBits; /* Byte 3 */
- DAC960_ByteCount32_T DataTransferSize:24; /* Bytes 4-6 */
- unsigned char DataTransferPageNumber; /* Byte 7 */
- DAC960_BusAddress64_T RequestSenseBusAddress; /* Bytes 8-15 */
- DAC960_V2_PhysicalDevice_T PhysicalDevice; /* Bytes 16-18 */
- DAC960_V2_CommandTimeout_T CommandTimeout; /* Byte 19 */
- unsigned char RequestSenseSize; /* Byte 20 */
- unsigned char IOCTL_Opcode; /* Byte 21 */
- DAC960_V2_OperationDevice_T OperationDevice; /* Byte 22 */
- unsigned char Reserved[9]; /* Bytes 23-31 */
- DAC960_V2_DataTransferMemoryAddress_T
- DataTransferMemoryAddress; /* Bytes 32-63 */
- } DeviceOperation;
-}
-DAC960_V2_CommandMailbox_T;
-
-
-/*
- Define the DAC960 Driver IOCTL requests.
-*/
-
-#define DAC960_IOCTL_GET_CONTROLLER_COUNT 0xDAC001
-#define DAC960_IOCTL_GET_CONTROLLER_INFO 0xDAC002
-#define DAC960_IOCTL_V1_EXECUTE_COMMAND 0xDAC003
-#define DAC960_IOCTL_V2_EXECUTE_COMMAND 0xDAC004
-#define DAC960_IOCTL_V2_GET_HEALTH_STATUS 0xDAC005
-
-
-/*
- Define the DAC960_IOCTL_GET_CONTROLLER_INFO reply structure.
-*/
-
-typedef struct DAC960_ControllerInfo
-{
- unsigned char ControllerNumber;
- unsigned char FirmwareType;
- unsigned char Channels;
- unsigned char Targets;
- unsigned char PCI_Bus;
- unsigned char PCI_Device;
- unsigned char PCI_Function;
- unsigned char IRQ_Channel;
- DAC960_PCI_Address_T PCI_Address;
- unsigned char ModelName[20];
- unsigned char FirmwareVersion[12];
-}
-DAC960_ControllerInfo_T;
-
-
-/*
- Define the User Mode DAC960_IOCTL_V1_EXECUTE_COMMAND request structure.
-*/
-
-typedef struct DAC960_V1_UserCommand
-{
- unsigned char ControllerNumber;
- DAC960_V1_CommandMailbox_T CommandMailbox;
- int DataTransferLength;
- void __user *DataTransferBuffer;
- DAC960_V1_DCDB_T __user *DCDB;
-}
-DAC960_V1_UserCommand_T;
-
-
-/*
- Define the Kernel Mode DAC960_IOCTL_V1_EXECUTE_COMMAND request structure.
-*/
-
-typedef struct DAC960_V1_KernelCommand
-{
- unsigned char ControllerNumber;
- DAC960_V1_CommandMailbox_T CommandMailbox;
- int DataTransferLength;
- void *DataTransferBuffer;
- DAC960_V1_DCDB_T *DCDB;
- DAC960_V1_CommandStatus_T CommandStatus;
- void (*CompletionFunction)(struct DAC960_V1_KernelCommand *);
- void *CompletionData;
-}
-DAC960_V1_KernelCommand_T;
-
-
-/*
- Define the User Mode DAC960_IOCTL_V2_EXECUTE_COMMAND request structure.
-*/
-
-typedef struct DAC960_V2_UserCommand
-{
- unsigned char ControllerNumber;
- DAC960_V2_CommandMailbox_T CommandMailbox;
- int DataTransferLength;
- int RequestSenseLength;
- void __user *DataTransferBuffer;
- void __user *RequestSenseBuffer;
-}
-DAC960_V2_UserCommand_T;
-
-
-/*
- Define the Kernel Mode DAC960_IOCTL_V2_EXECUTE_COMMAND request structure.
-*/
-
-typedef struct DAC960_V2_KernelCommand
-{
- unsigned char ControllerNumber;
- DAC960_V2_CommandMailbox_T CommandMailbox;
- int DataTransferLength;
- int RequestSenseLength;
- void *DataTransferBuffer;
- void *RequestSenseBuffer;
- DAC960_V2_CommandStatus_T CommandStatus;
- void (*CompletionFunction)(struct DAC960_V2_KernelCommand *);
- void *CompletionData;
-}
-DAC960_V2_KernelCommand_T;
-
-
-/*
- Define the User Mode DAC960_IOCTL_V2_GET_HEALTH_STATUS request structure.
-*/
-
-typedef struct DAC960_V2_GetHealthStatus
-{
- unsigned char ControllerNumber;
- DAC960_V2_HealthStatusBuffer_T __user *HealthStatusBuffer;
-}
-DAC960_V2_GetHealthStatus_T;
-
-
-/*
- Import the Kernel Mode IOCTL interface.
-*/
-
-extern int DAC960_KernelIOCTL(unsigned int Request, void *Argument);
-
-
-/*
- DAC960_DriverVersion protects the private portion of this file.
-*/
-
-#ifdef DAC960_DriverVersion
-
-
-/*
- Define the maximum Driver Queue Depth and Controller Queue Depth supported
- by DAC960 V1 and V2 Firmware Controllers.
-*/
-
-#define DAC960_MaxDriverQueueDepth 511
-#define DAC960_MaxControllerQueueDepth 512
-
-
-/*
- Define the maximum number of Scatter/Gather Segments supported for any
- DAC960 V1 and V2 Firmware controller.
-*/
-
-#define DAC960_V1_ScatterGatherLimit 33
-#define DAC960_V2_ScatterGatherLimit 128
-
-
-/*
- Define the number of Command Mailboxes and Status Mailboxes used by the
- DAC960 V1 and V2 Firmware Memory Mailbox Interface.
-*/
-
-#define DAC960_V1_CommandMailboxCount 256
-#define DAC960_V1_StatusMailboxCount 1024
-#define DAC960_V2_CommandMailboxCount 512
-#define DAC960_V2_StatusMailboxCount 512
-
-
-/*
- Define the DAC960 Controller Monitoring Timer Interval.
-*/
-
-#define DAC960_MonitoringTimerInterval (10 * HZ)
-
-
-/*
- Define the DAC960 Controller Secondary Monitoring Interval.
-*/
-
-#define DAC960_SecondaryMonitoringInterval (60 * HZ)
-
-
-/*
- Define the DAC960 Controller Health Status Monitoring Interval.
-*/
-
-#define DAC960_HealthStatusMonitoringInterval (1 * HZ)
-
-
-/*
- Define the DAC960 Controller Progress Reporting Interval.
-*/
-
-#define DAC960_ProgressReportingInterval (60 * HZ)
-
-
-/*
- Define the maximum number of Partitions allowed for each Logical Drive.
-*/
-
-#define DAC960_MaxPartitions 8
-#define DAC960_MaxPartitionsBits 3
-
-/*
- Define the DAC960 Controller fixed Block Size and Block Size Bits.
-*/
-
-#define DAC960_BlockSize 512
-#define DAC960_BlockSizeBits 9
-
-
-/*
- Define the number of Command structures that should be allocated as a
- group to optimize kernel memory allocation.
-*/
-
-#define DAC960_V1_CommandAllocationGroupSize 11
-#define DAC960_V2_CommandAllocationGroupSize 29
-
-
-/*
- Define the Controller Line Buffer, Progress Buffer, User Message, and
- Initial Status Buffer sizes.
-*/
-
-#define DAC960_LineBufferSize 100
-#define DAC960_ProgressBufferSize 200
-#define DAC960_UserMessageSize 200
-#define DAC960_InitialStatusBufferSize (8192-32)
-
-
-/*
- Define the DAC960 Controller Firmware Types.
-*/
-
-typedef enum
-{
- DAC960_V1_Controller = 1,
- DAC960_V2_Controller = 2
-}
-DAC960_FirmwareType_T;
-
-
-/*
- Define the DAC960 Controller Hardware Types.
-*/
-
-typedef enum
-{
- DAC960_BA_Controller = 1, /* eXtremeRAID 2000 */
- DAC960_LP_Controller = 2, /* AcceleRAID 352 */
- DAC960_LA_Controller = 3, /* DAC1164P */
- DAC960_PG_Controller = 4, /* DAC960PTL/PJ/PG */
- DAC960_PD_Controller = 5, /* DAC960PU/PD/PL/P */
- DAC960_P_Controller = 6, /* DAC960PU/PD/PL/P */
- DAC960_GEM_Controller = 7, /* AcceleRAID 4/5/600 */
-}
-DAC960_HardwareType_T;
-
-
-/*
- Define the Driver Message Levels.
-*/
-
-typedef enum DAC960_MessageLevel
-{
- DAC960_AnnounceLevel = 0,
- DAC960_InfoLevel = 1,
- DAC960_NoticeLevel = 2,
- DAC960_WarningLevel = 3,
- DAC960_ErrorLevel = 4,
- DAC960_ProgressLevel = 5,
- DAC960_CriticalLevel = 6,
- DAC960_UserCriticalLevel = 7
-}
-DAC960_MessageLevel_T;
-
-static char
- *DAC960_MessageLevelMap[] =
- { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING,
- KERN_ERR, KERN_CRIT, KERN_CRIT, KERN_CRIT };
-
-
-/*
- Define Driver Message macros.
-*/
-
-#define DAC960_Announce(Format, Arguments...) \
- DAC960_Message(DAC960_AnnounceLevel, Format, ##Arguments)
-
-#define DAC960_Info(Format, Arguments...) \
- DAC960_Message(DAC960_InfoLevel, Format, ##Arguments)
-
-#define DAC960_Notice(Format, Arguments...) \
- DAC960_Message(DAC960_NoticeLevel, Format, ##Arguments)
-
-#define DAC960_Warning(Format, Arguments...) \
- DAC960_Message(DAC960_WarningLevel, Format, ##Arguments)
-
-#define DAC960_Error(Format, Arguments...) \
- DAC960_Message(DAC960_ErrorLevel, Format, ##Arguments)
-
-#define DAC960_Progress(Format, Arguments...) \
- DAC960_Message(DAC960_ProgressLevel, Format, ##Arguments)
-
-#define DAC960_Critical(Format, Arguments...) \
- DAC960_Message(DAC960_CriticalLevel, Format, ##Arguments)
-
-#define DAC960_UserCritical(Format, Arguments...) \
- DAC960_Message(DAC960_UserCriticalLevel, Format, ##Arguments)
-
-
-struct DAC960_privdata {
- DAC960_HardwareType_T HardwareType;
- DAC960_FirmwareType_T FirmwareType;
- irq_handler_t InterruptHandler;
- unsigned int MemoryWindowSize;
-};
-
-
-/*
- Define the DAC960 V1 Firmware Controller Status Mailbox structure.
-*/
-
-typedef union DAC960_V1_StatusMailbox
-{
- unsigned int Word; /* Word 0 */
- struct {
- DAC960_V1_CommandIdentifier_T CommandIdentifier; /* Byte 0 */
- unsigned char :7; /* Byte 1 Bits 0-6 */
- bool Valid:1; /* Byte 1 Bit 7 */
- DAC960_V1_CommandStatus_T CommandStatus; /* Bytes 2-3 */
- } Fields;
-}
-DAC960_V1_StatusMailbox_T;
-
-
-/*
- Define the DAC960 V2 Firmware Controller Status Mailbox structure.
-*/
-
-typedef union DAC960_V2_StatusMailbox
-{
- unsigned int Words[2]; /* Words 0-1 */
- struct {
- DAC960_V2_CommandIdentifier_T CommandIdentifier; /* Bytes 0-1 */
- DAC960_V2_CommandStatus_T CommandStatus; /* Byte 2 */
- unsigned char RequestSenseLength; /* Byte 3 */
- int DataTransferResidue; /* Bytes 4-7 */
- } Fields;
-}
-DAC960_V2_StatusMailbox_T;
-
-
-/*
- Define the DAC960 Driver Command Types.
-*/
-
-typedef enum
-{
- DAC960_ReadCommand = 1,
- DAC960_WriteCommand = 2,
- DAC960_ReadRetryCommand = 3,
- DAC960_WriteRetryCommand = 4,
- DAC960_MonitoringCommand = 5,
- DAC960_ImmediateCommand = 6,
- DAC960_QueuedCommand = 7
-}
-DAC960_CommandType_T;
-
-
-/*
- Define the DAC960 Driver Command structure.
-*/
-
-typedef struct DAC960_Command
-{
- int CommandIdentifier;
- DAC960_CommandType_T CommandType;
- struct DAC960_Controller *Controller;
- struct DAC960_Command *Next;
- struct completion *Completion;
- unsigned int LogicalDriveNumber;
- unsigned int BlockNumber;
- unsigned int BlockCount;
- unsigned int SegmentCount;
- int DmaDirection;
- struct scatterlist *cmd_sglist;
- struct request *Request;
- union {
- struct {
- DAC960_V1_CommandMailbox_T CommandMailbox;
- DAC960_V1_KernelCommand_T *KernelCommand;
- DAC960_V1_CommandStatus_T CommandStatus;
- DAC960_V1_ScatterGatherSegment_T *ScatterGatherList;
- dma_addr_t ScatterGatherListDMA;
- struct scatterlist ScatterList[DAC960_V1_ScatterGatherLimit];
- unsigned int EndMarker[0];
- } V1;
- struct {
- DAC960_V2_CommandMailbox_T CommandMailbox;
- DAC960_V2_KernelCommand_T *KernelCommand;
- DAC960_V2_CommandStatus_T CommandStatus;
- unsigned char RequestSenseLength;
- int DataTransferResidue;
- DAC960_V2_ScatterGatherSegment_T *ScatterGatherList;
- dma_addr_t ScatterGatherListDMA;
- DAC960_SCSI_RequestSense_T *RequestSense;
- dma_addr_t RequestSenseDMA;
- struct scatterlist ScatterList[DAC960_V2_ScatterGatherLimit];
- unsigned int EndMarker[0];
- } V2;
- } FW;
-}
-DAC960_Command_T;
-
-
-/*
- Define the DAC960 Driver Controller structure.
-*/
-
-typedef struct DAC960_Controller
-{
- void __iomem *BaseAddress;
- void __iomem *MemoryMappedAddress;
- DAC960_FirmwareType_T FirmwareType;
- DAC960_HardwareType_T HardwareType;
- DAC960_IO_Address_T IO_Address;
- DAC960_PCI_Address_T PCI_Address;
- struct pci_dev *PCIDevice;
- unsigned char ControllerNumber;
- unsigned char ControllerName[4];
- unsigned char ModelName[20];
- unsigned char FullModelName[28];
- unsigned char FirmwareVersion[12];
- unsigned char Bus;
- unsigned char Device;
- unsigned char Function;
- unsigned char IRQ_Channel;
- unsigned char Channels;
- unsigned char Targets;
- unsigned char MemorySize;
- unsigned char LogicalDriveCount;
- unsigned short CommandAllocationGroupSize;
- unsigned short ControllerQueueDepth;
- unsigned short DriverQueueDepth;
- unsigned short MaxBlocksPerCommand;
- unsigned short ControllerScatterGatherLimit;
- unsigned short DriverScatterGatherLimit;
- unsigned int CombinedStatusBufferLength;
- unsigned int InitialStatusLength;
- unsigned int CurrentStatusLength;
- unsigned int ProgressBufferLength;
- unsigned int UserStatusLength;
- struct dma_loaf DmaPages;
- unsigned long MonitoringTimerCount;
- unsigned long PrimaryMonitoringTime;
- unsigned long SecondaryMonitoringTime;
- unsigned long ShutdownMonitoringTimer;
- unsigned long LastProgressReportTime;
- unsigned long LastCurrentStatusTime;
- bool ControllerInitialized;
- bool MonitoringCommandDeferred;
- bool EphemeralProgressMessage;
- bool DriveSpinUpMessageDisplayed;
- bool MonitoringAlertMode;
- bool SuppressEnclosureMessages;
- struct timer_list MonitoringTimer;
- struct gendisk *disks[DAC960_MaxLogicalDrives];
- struct dma_pool *ScatterGatherPool;
- DAC960_Command_T *FreeCommands;
- unsigned char *CombinedStatusBuffer;
- unsigned char *CurrentStatusBuffer;
- struct request_queue *RequestQueue[DAC960_MaxLogicalDrives];
- int req_q_index;
- spinlock_t queue_lock;
- wait_queue_head_t CommandWaitQueue;
- wait_queue_head_t HealthStatusWaitQueue;
- DAC960_Command_T InitialCommand;
- DAC960_Command_T *Commands[DAC960_MaxDriverQueueDepth];
- struct proc_dir_entry *ControllerProcEntry;
- bool LogicalDriveInitiallyAccessible[DAC960_MaxLogicalDrives];
- void (*QueueCommand)(DAC960_Command_T *Command);
- bool (*ReadControllerConfiguration)(struct DAC960_Controller *);
- bool (*ReadDeviceConfiguration)(struct DAC960_Controller *);
- bool (*ReportDeviceConfiguration)(struct DAC960_Controller *);
- void (*QueueReadWriteCommand)(DAC960_Command_T *Command);
- union {
- struct {
- unsigned char GeometryTranslationHeads;
- unsigned char GeometryTranslationSectors;
- unsigned char PendingRebuildFlag;
- unsigned short StripeSize;
- unsigned short SegmentSize;
- unsigned short NewEventLogSequenceNumber;
- unsigned short OldEventLogSequenceNumber;
- unsigned short DeviceStateChannel;
- unsigned short DeviceStateTargetID;
- bool DualModeMemoryMailboxInterface;
- bool BackgroundInitializationStatusSupported;
- bool SAFTE_EnclosureManagementEnabled;
- bool NeedLogicalDriveInformation;
- bool NeedErrorTableInformation;
- bool NeedDeviceStateInformation;
- bool NeedDeviceInquiryInformation;
- bool NeedDeviceSerialNumberInformation;
- bool NeedRebuildProgress;
- bool NeedConsistencyCheckProgress;
- bool NeedBackgroundInitializationStatus;
- bool StartDeviceStateScan;
- bool RebuildProgressFirst;
- bool RebuildFlagPending;
- bool RebuildStatusPending;
-
- dma_addr_t FirstCommandMailboxDMA;
- DAC960_V1_CommandMailbox_T *FirstCommandMailbox;
- DAC960_V1_CommandMailbox_T *LastCommandMailbox;
- DAC960_V1_CommandMailbox_T *NextCommandMailbox;
- DAC960_V1_CommandMailbox_T *PreviousCommandMailbox1;
- DAC960_V1_CommandMailbox_T *PreviousCommandMailbox2;
-
- dma_addr_t FirstStatusMailboxDMA;
- DAC960_V1_StatusMailbox_T *FirstStatusMailbox;
- DAC960_V1_StatusMailbox_T *LastStatusMailbox;
- DAC960_V1_StatusMailbox_T *NextStatusMailbox;
-
- DAC960_V1_DCDB_T *MonitoringDCDB;
- dma_addr_t MonitoringDCDB_DMA;
-
- DAC960_V1_Enquiry_T Enquiry;
- DAC960_V1_Enquiry_T *NewEnquiry;
- dma_addr_t NewEnquiryDMA;
-
- DAC960_V1_ErrorTable_T ErrorTable;
- DAC960_V1_ErrorTable_T *NewErrorTable;
- dma_addr_t NewErrorTableDMA;
-
- DAC960_V1_EventLogEntry_T *EventLogEntry;
- dma_addr_t EventLogEntryDMA;
-
- DAC960_V1_RebuildProgress_T *RebuildProgress;
- dma_addr_t RebuildProgressDMA;
- DAC960_V1_CommandStatus_T LastRebuildStatus;
- DAC960_V1_CommandStatus_T PendingRebuildStatus;
-
- DAC960_V1_LogicalDriveInformationArray_T LogicalDriveInformation;
- DAC960_V1_LogicalDriveInformationArray_T *NewLogicalDriveInformation;
- dma_addr_t NewLogicalDriveInformationDMA;
-
- DAC960_V1_BackgroundInitializationStatus_T
- *BackgroundInitializationStatus;
- dma_addr_t BackgroundInitializationStatusDMA;
- DAC960_V1_BackgroundInitializationStatus_T
- LastBackgroundInitializationStatus;
-
- DAC960_V1_DeviceState_T
- DeviceState[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
- DAC960_V1_DeviceState_T *NewDeviceState;
- dma_addr_t NewDeviceStateDMA;
-
- DAC960_SCSI_Inquiry_T
- InquiryStandardData[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
- DAC960_SCSI_Inquiry_T *NewInquiryStandardData;
- dma_addr_t NewInquiryStandardDataDMA;
-
- DAC960_SCSI_Inquiry_UnitSerialNumber_T
- InquiryUnitSerialNumber[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber;
- dma_addr_t NewInquiryUnitSerialNumberDMA;
-
- int DeviceResetCount[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
- bool DirectCommandActive[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
- } V1;
- struct {
- unsigned int StatusChangeCounter;
- unsigned int NextEventSequenceNumber;
- unsigned int PhysicalDeviceIndex;
- bool NeedLogicalDeviceInformation;
- bool NeedPhysicalDeviceInformation;
- bool NeedDeviceSerialNumberInformation;
- bool StartLogicalDeviceInformationScan;
- bool StartPhysicalDeviceInformationScan;
- struct dma_pool *RequestSensePool;
-
- dma_addr_t FirstCommandMailboxDMA;
- DAC960_V2_CommandMailbox_T *FirstCommandMailbox;
- DAC960_V2_CommandMailbox_T *LastCommandMailbox;
- DAC960_V2_CommandMailbox_T *NextCommandMailbox;
- DAC960_V2_CommandMailbox_T *PreviousCommandMailbox1;
- DAC960_V2_CommandMailbox_T *PreviousCommandMailbox2;
-
- dma_addr_t FirstStatusMailboxDMA;
- DAC960_V2_StatusMailbox_T *FirstStatusMailbox;
- DAC960_V2_StatusMailbox_T *LastStatusMailbox;
- DAC960_V2_StatusMailbox_T *NextStatusMailbox;
-
- dma_addr_t HealthStatusBufferDMA;
- DAC960_V2_HealthStatusBuffer_T *HealthStatusBuffer;
-
- DAC960_V2_ControllerInfo_T ControllerInformation;
- DAC960_V2_ControllerInfo_T *NewControllerInformation;
- dma_addr_t NewControllerInformationDMA;
-
- DAC960_V2_LogicalDeviceInfo_T
- *LogicalDeviceInformation[DAC960_MaxLogicalDrives];
- DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInformation;
- dma_addr_t NewLogicalDeviceInformationDMA;
-
- DAC960_V2_PhysicalDeviceInfo_T
- *PhysicalDeviceInformation[DAC960_V2_MaxPhysicalDevices];
- DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInformation;
- dma_addr_t NewPhysicalDeviceInformationDMA;
-
- DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber;
- dma_addr_t NewInquiryUnitSerialNumberDMA;
- DAC960_SCSI_Inquiry_UnitSerialNumber_T
- *InquiryUnitSerialNumber[DAC960_V2_MaxPhysicalDevices];
-
- DAC960_V2_Event_T *Event;
- dma_addr_t EventDMA;
-
- DAC960_V2_PhysicalToLogicalDevice_T *PhysicalToLogicalDevice;
- dma_addr_t PhysicalToLogicalDeviceDMA;
-
- DAC960_V2_PhysicalDevice_T
- LogicalDriveToVirtualDevice[DAC960_MaxLogicalDrives];
- bool LogicalDriveFoundDuringScan[DAC960_MaxLogicalDrives];
- } V2;
- } FW;
- unsigned char ProgressBuffer[DAC960_ProgressBufferSize];
- unsigned char UserStatusBuffer[DAC960_UserMessageSize];
-}
-DAC960_Controller_T;
-
-
-/*
- Simplify access to Firmware Version Dependent Data Structure Components
- and Functions.
-*/
-
-#define V1 FW.V1
-#define V2 FW.V2
-#define DAC960_QueueCommand(Command) \
- (Controller->QueueCommand)(Command)
-#define DAC960_ReadControllerConfiguration(Controller) \
- (Controller->ReadControllerConfiguration)(Controller)
-#define DAC960_ReadDeviceConfiguration(Controller) \
- (Controller->ReadDeviceConfiguration)(Controller)
-#define DAC960_ReportDeviceConfiguration(Controller) \
- (Controller->ReportDeviceConfiguration)(Controller)
-#define DAC960_QueueReadWriteCommand(Command) \
- (Controller->QueueReadWriteCommand)(Command)
-
-/*
- * dma_addr_writeql is provided to write dma_addr_t types
- * to a 64-bit pci address space register. The controller
- * will accept having the register written as two 32-bit
- * values.
- *
- * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
- * without HIGHMEM, dma_addr_t is a 32-bit value.
- *
- * The compiler should always fix up the assignment
- * to u.wq appropriately, depending upon the size of
- * dma_addr_t.
- */
-static inline
-void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
-{
- union {
- u64 wq;
- uint wl[2];
- } u;
-
- u.wq = addr;
-
- writel(u.wl[0], write_address);
- writel(u.wl[1], write_address + 4);
-}
-
-/*
- Define the DAC960 GEM Series Controller Interface Register Offsets.
- */
-
-#define DAC960_GEM_RegisterWindowSize 0x600
-
-typedef enum
-{
- DAC960_GEM_InboundDoorBellRegisterReadSetOffset = 0x214,
- DAC960_GEM_InboundDoorBellRegisterClearOffset = 0x218,
- DAC960_GEM_OutboundDoorBellRegisterReadSetOffset = 0x224,
- DAC960_GEM_OutboundDoorBellRegisterClearOffset = 0x228,
- DAC960_GEM_InterruptStatusRegisterOffset = 0x208,
- DAC960_GEM_InterruptMaskRegisterReadSetOffset = 0x22C,
- DAC960_GEM_InterruptMaskRegisterClearOffset = 0x230,
- DAC960_GEM_CommandMailboxBusAddressOffset = 0x510,
- DAC960_GEM_CommandStatusOffset = 0x518,
- DAC960_GEM_ErrorStatusRegisterReadSetOffset = 0x224,
- DAC960_GEM_ErrorStatusRegisterClearOffset = 0x228,
-}
-DAC960_GEM_RegisterOffsets_T;
-
-/*
- Define the structure of the DAC960 GEM Series Inbound Door Bell
- */
-
-typedef union DAC960_GEM_InboundDoorBellRegister
-{
- unsigned int All;
- struct {
- unsigned int :24;
- bool HardwareMailboxNewCommand:1;
- bool AcknowledgeHardwareMailboxStatus:1;
- bool GenerateInterrupt:1;
- bool ControllerReset:1;
- bool MemoryMailboxNewCommand:1;
- unsigned int :3;
- } Write;
- struct {
- unsigned int :24;
- bool HardwareMailboxFull:1;
- bool InitializationInProgress:1;
- unsigned int :6;
- } Read;
-}
-DAC960_GEM_InboundDoorBellRegister_T;
-
-/*
- Define the structure of the DAC960 GEM Series Outbound Door Bell Register.
- */
-typedef union DAC960_GEM_OutboundDoorBellRegister
-{
- unsigned int All;
- struct {
- unsigned int :24;
- bool AcknowledgeHardwareMailboxInterrupt:1;
- bool AcknowledgeMemoryMailboxInterrupt:1;
- unsigned int :6;
- } Write;
- struct {
- unsigned int :24;
- bool HardwareMailboxStatusAvailable:1;
- bool MemoryMailboxStatusAvailable:1;
- unsigned int :6;
- } Read;
-}
-DAC960_GEM_OutboundDoorBellRegister_T;
-
-/*
- Define the structure of the DAC960 GEM Series Interrupt Mask Register.
- */
-typedef union DAC960_GEM_InterruptMaskRegister
-{
- unsigned int All;
- struct {
- unsigned int :16;
- unsigned int :8;
- unsigned int HardwareMailboxInterrupt:1;
- unsigned int MemoryMailboxInterrupt:1;
- unsigned int :6;
- } Bits;
-}
-DAC960_GEM_InterruptMaskRegister_T;
-
-/*
- Define the structure of the DAC960 GEM Series Error Status Register.
- */
-
-typedef union DAC960_GEM_ErrorStatusRegister
-{
- unsigned int All;
- struct {
- unsigned int :24;
- unsigned int :5;
- bool ErrorStatusPending:1;
- unsigned int :2;
- } Bits;
-}
-DAC960_GEM_ErrorStatusRegister_T;
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 GEM Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_GEM_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
-}
-
-static inline
-void DAC960_GEM_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterClearOffset);
-}
-
-static inline
-void DAC960_GEM_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
-}
-
-static inline
-void DAC960_GEM_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
-}
-
-static inline
-void DAC960_GEM_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
-}
-
-static inline
-bool DAC960_GEM_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readl(ControllerBaseAddress +
- DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
- return InboundDoorBellRegister.Read.HardwareMailboxFull;
-}
-
-static inline
-bool DAC960_GEM_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readl(ControllerBaseAddress +
- DAC960_GEM_InboundDoorBellRegisterReadSetOffset);
- return InboundDoorBellRegister.Read.InitializationInProgress;
-}
-
-static inline
-void DAC960_GEM_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_OutboundDoorBellRegisterClearOffset);
-}
-
-static inline
-void DAC960_GEM_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_OutboundDoorBellRegisterClearOffset);
-}
-
-static inline
-void DAC960_GEM_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_GEM_OutboundDoorBellRegisterClearOffset);
-}
-
-static inline
-bool DAC960_GEM_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readl(ControllerBaseAddress +
- DAC960_GEM_OutboundDoorBellRegisterReadSetOffset);
- return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
-}
-
-static inline
-bool DAC960_GEM_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readl(ControllerBaseAddress +
- DAC960_GEM_OutboundDoorBellRegisterReadSetOffset);
- return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
-}
-
-static inline
-void DAC960_GEM_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0;
- InterruptMaskRegister.Bits.HardwareMailboxInterrupt = true;
- InterruptMaskRegister.Bits.MemoryMailboxInterrupt = true;
- writel(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_GEM_InterruptMaskRegisterClearOffset);
-}
-
-static inline
-void DAC960_GEM_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0;
- InterruptMaskRegister.Bits.HardwareMailboxInterrupt = true;
- InterruptMaskRegister.Bits.MemoryMailboxInterrupt = true;
- writel(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_GEM_InterruptMaskRegisterReadSetOffset);
-}
-
-static inline
-bool DAC960_GEM_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_GEM_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All =
- readl(ControllerBaseAddress +
- DAC960_GEM_InterruptMaskRegisterReadSetOffset);
- return !(InterruptMaskRegister.Bits.HardwareMailboxInterrupt ||
- InterruptMaskRegister.Bits.MemoryMailboxInterrupt);
-}
-
-static inline
-void DAC960_GEM_WriteCommandMailbox(DAC960_V2_CommandMailbox_T
- *MemoryCommandMailbox,
- DAC960_V2_CommandMailbox_T
- *CommandMailbox)
-{
- memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1],
- sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
- wmb();
- MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
- mb();
-}
-
-static inline
-void DAC960_GEM_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
- dma_addr_t CommandMailboxDMA)
-{
- dma_addr_writeql(CommandMailboxDMA,
- ControllerBaseAddress +
- DAC960_GEM_CommandMailboxBusAddressOffset);
-}
-
-static inline DAC960_V2_CommandIdentifier_T
-DAC960_GEM_ReadCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_GEM_CommandStatusOffset);
-}
-
-static inline DAC960_V2_CommandStatus_T
-DAC960_GEM_ReadCommandStatus(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_GEM_CommandStatusOffset + 2);
-}
-
-static inline bool
-DAC960_GEM_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_GEM_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readl(ControllerBaseAddress + DAC960_GEM_ErrorStatusRegisterReadSetOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_GEM_CommandMailboxBusAddressOffset + 0);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_GEM_CommandMailboxBusAddressOffset + 1);
- writel(0x03000000, ControllerBaseAddress +
- DAC960_GEM_ErrorStatusRegisterClearOffset);
- return true;
-}
-
-/*
- Define the DAC960 BA Series Controller Interface Register Offsets.
-*/
-
-#define DAC960_BA_RegisterWindowSize 0x80
-
-typedef enum
-{
- DAC960_BA_InboundDoorBellRegisterOffset = 0x60,
- DAC960_BA_OutboundDoorBellRegisterOffset = 0x61,
- DAC960_BA_InterruptStatusRegisterOffset = 0x30,
- DAC960_BA_InterruptMaskRegisterOffset = 0x34,
- DAC960_BA_CommandMailboxBusAddressOffset = 0x50,
- DAC960_BA_CommandStatusOffset = 0x58,
- DAC960_BA_ErrorStatusRegisterOffset = 0x63
-}
-DAC960_BA_RegisterOffsets_T;
-
-
-/*
- Define the structure of the DAC960 BA Series Inbound Door Bell Register.
-*/
-
-typedef union DAC960_BA_InboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool HardwareMailboxNewCommand:1; /* Bit 0 */
- bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
- bool GenerateInterrupt:1; /* Bit 2 */
- bool ControllerReset:1; /* Bit 3 */
- bool MemoryMailboxNewCommand:1; /* Bit 4 */
- unsigned char :3; /* Bits 5-7 */
- } Write;
- struct {
- bool HardwareMailboxEmpty:1; /* Bit 0 */
- bool InitializationNotInProgress:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_BA_InboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 BA Series Outbound Door Bell Register.
-*/
-
-typedef union DAC960_BA_OutboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
- bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Write;
- struct {
- bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
- bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_BA_OutboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 BA Series Interrupt Mask Register.
-*/
-
-typedef union DAC960_BA_InterruptMaskRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool DisableInterrupts:1; /* Bit 2 */
- bool DisableInterruptsI2O:1; /* Bit 3 */
- unsigned int :4; /* Bits 4-7 */
- } Bits;
-}
-DAC960_BA_InterruptMaskRegister_T;
-
-
-/*
- Define the structure of the DAC960 BA Series Error Status Register.
-*/
-
-typedef union DAC960_BA_ErrorStatusRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool ErrorStatusPending:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_BA_ErrorStatusRegister_T;
-
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 BA Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_BA_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_BA_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
- return !InboundDoorBellRegister.Read.HardwareMailboxEmpty;
-}
-
-static inline
-bool DAC960_BA_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
- return !InboundDoorBellRegister.Read.InitializationNotInProgress;
-}
-
-static inline
-void DAC960_BA_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_BA_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_BA_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
-}
-
-static inline
-bool DAC960_BA_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
-}
-
-static inline
-void DAC960_BA_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = false;
- InterruptMaskRegister.Bits.DisableInterruptsI2O = true;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
-}
-
-static inline
-void DAC960_BA_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = true;
- InterruptMaskRegister.Bits.DisableInterruptsI2O = true;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
-}
-
-static inline
-bool DAC960_BA_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
- return !InterruptMaskRegister.Bits.DisableInterrupts;
-}
-
-static inline
-void DAC960_BA_WriteCommandMailbox(DAC960_V2_CommandMailbox_T
- *MemoryCommandMailbox,
- DAC960_V2_CommandMailbox_T
- *CommandMailbox)
-{
- memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1],
- sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
- wmb();
- MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
- mb();
-}
-
-
-static inline
-void DAC960_BA_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
- dma_addr_t CommandMailboxDMA)
-{
- dma_addr_writeql(CommandMailboxDMA,
- ControllerBaseAddress +
- DAC960_BA_CommandMailboxBusAddressOffset);
-}
-
-static inline DAC960_V2_CommandIdentifier_T
-DAC960_BA_ReadCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_BA_CommandStatusOffset);
-}
-
-static inline DAC960_V2_CommandStatus_T
-DAC960_BA_ReadCommandStatus(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_BA_CommandStatusOffset + 2);
-}
-
-static inline bool
-DAC960_BA_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_BA_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readb(ControllerBaseAddress + DAC960_BA_ErrorStatusRegisterOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_BA_CommandMailboxBusAddressOffset + 0);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_BA_CommandMailboxBusAddressOffset + 1);
- writeb(0xFF, ControllerBaseAddress + DAC960_BA_ErrorStatusRegisterOffset);
- return true;
-}
-
-
-/*
- Define the DAC960 LP Series Controller Interface Register Offsets.
-*/
-
-#define DAC960_LP_RegisterWindowSize 0x80
-
-typedef enum
-{
- DAC960_LP_InboundDoorBellRegisterOffset = 0x20,
- DAC960_LP_OutboundDoorBellRegisterOffset = 0x2C,
- DAC960_LP_InterruptStatusRegisterOffset = 0x30,
- DAC960_LP_InterruptMaskRegisterOffset = 0x34,
- DAC960_LP_CommandMailboxBusAddressOffset = 0x10,
- DAC960_LP_CommandStatusOffset = 0x18,
- DAC960_LP_ErrorStatusRegisterOffset = 0x2E
-}
-DAC960_LP_RegisterOffsets_T;
-
-
-/*
- Define the structure of the DAC960 LP Series Inbound Door Bell Register.
-*/
-
-typedef union DAC960_LP_InboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool HardwareMailboxNewCommand:1; /* Bit 0 */
- bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
- bool GenerateInterrupt:1; /* Bit 2 */
- bool ControllerReset:1; /* Bit 3 */
- bool MemoryMailboxNewCommand:1; /* Bit 4 */
- unsigned char :3; /* Bits 5-7 */
- } Write;
- struct {
- bool HardwareMailboxFull:1; /* Bit 0 */
- bool InitializationInProgress:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_LP_InboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 LP Series Outbound Door Bell Register.
-*/
-
-typedef union DAC960_LP_OutboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
- bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Write;
- struct {
- bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
- bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_LP_OutboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 LP Series Interrupt Mask Register.
-*/
-
-typedef union DAC960_LP_InterruptMaskRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool DisableInterrupts:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_LP_InterruptMaskRegister_T;
-
-
-/*
- Define the structure of the DAC960 LP Series Error Status Register.
-*/
-
-typedef union DAC960_LP_ErrorStatusRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool ErrorStatusPending:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_LP_ErrorStatusRegister_T;
-
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 LP Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_LP_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_LP_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.HardwareMailboxFull;
-}
-
-static inline
-bool DAC960_LP_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.InitializationInProgress;
-}
-
-static inline
-void DAC960_LP_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LP_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_LP_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
-}
-
-static inline
-bool DAC960_LP_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
-}
-
-static inline
-void DAC960_LP_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = false;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
-}
-
-static inline
-void DAC960_LP_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = true;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
-}
-
-static inline
-bool DAC960_LP_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
- return !InterruptMaskRegister.Bits.DisableInterrupts;
-}
-
-static inline
-void DAC960_LP_WriteCommandMailbox(DAC960_V2_CommandMailbox_T
- *MemoryCommandMailbox,
- DAC960_V2_CommandMailbox_T
- *CommandMailbox)
-{
- memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1],
- sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
- wmb();
- MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
- mb();
-}
-
-static inline
-void DAC960_LP_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
- dma_addr_t CommandMailboxDMA)
-{
- dma_addr_writeql(CommandMailboxDMA,
- ControllerBaseAddress +
- DAC960_LP_CommandMailboxBusAddressOffset);
-}
-
-static inline DAC960_V2_CommandIdentifier_T
-DAC960_LP_ReadCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_LP_CommandStatusOffset);
-}
-
-static inline DAC960_V2_CommandStatus_T
-DAC960_LP_ReadCommandStatus(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_LP_CommandStatusOffset + 2);
-}
-
-static inline bool
-DAC960_LP_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_LP_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readb(ControllerBaseAddress + DAC960_LP_ErrorStatusRegisterOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_LP_CommandMailboxBusAddressOffset + 0);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_LP_CommandMailboxBusAddressOffset + 1);
- writeb(0xFF, ControllerBaseAddress + DAC960_LP_ErrorStatusRegisterOffset);
- return true;
-}
-
-
-/*
- Define the DAC960 LA Series Controller Interface Register Offsets.
-*/
-
-#define DAC960_LA_RegisterWindowSize 0x80
-
-typedef enum
-{
- DAC960_LA_InboundDoorBellRegisterOffset = 0x60,
- DAC960_LA_OutboundDoorBellRegisterOffset = 0x61,
- DAC960_LA_InterruptMaskRegisterOffset = 0x34,
- DAC960_LA_CommandOpcodeRegisterOffset = 0x50,
- DAC960_LA_CommandIdentifierRegisterOffset = 0x51,
- DAC960_LA_MailboxRegister2Offset = 0x52,
- DAC960_LA_MailboxRegister3Offset = 0x53,
- DAC960_LA_MailboxRegister4Offset = 0x54,
- DAC960_LA_MailboxRegister5Offset = 0x55,
- DAC960_LA_MailboxRegister6Offset = 0x56,
- DAC960_LA_MailboxRegister7Offset = 0x57,
- DAC960_LA_MailboxRegister8Offset = 0x58,
- DAC960_LA_MailboxRegister9Offset = 0x59,
- DAC960_LA_MailboxRegister10Offset = 0x5A,
- DAC960_LA_MailboxRegister11Offset = 0x5B,
- DAC960_LA_MailboxRegister12Offset = 0x5C,
- DAC960_LA_StatusCommandIdentifierRegOffset = 0x5D,
- DAC960_LA_StatusRegisterOffset = 0x5E,
- DAC960_LA_ErrorStatusRegisterOffset = 0x63
-}
-DAC960_LA_RegisterOffsets_T;
-
-
-/*
- Define the structure of the DAC960 LA Series Inbound Door Bell Register.
-*/
-
-typedef union DAC960_LA_InboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool HardwareMailboxNewCommand:1; /* Bit 0 */
- bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
- bool GenerateInterrupt:1; /* Bit 2 */
- bool ControllerReset:1; /* Bit 3 */
- bool MemoryMailboxNewCommand:1; /* Bit 4 */
- unsigned char :3; /* Bits 5-7 */
- } Write;
- struct {
- bool HardwareMailboxEmpty:1; /* Bit 0 */
- bool InitializationNotInProgress:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_LA_InboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 LA Series Outbound Door Bell Register.
-*/
-
-typedef union DAC960_LA_OutboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
- bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Write;
- struct {
- bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
- bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_LA_OutboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 LA Series Interrupt Mask Register.
-*/
-
-typedef union DAC960_LA_InterruptMaskRegister
-{
- unsigned char All;
- struct {
- unsigned char :2; /* Bits 0-1 */
- bool DisableInterrupts:1; /* Bit 2 */
- unsigned char :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_LA_InterruptMaskRegister_T;
-
-
-/*
- Define the structure of the DAC960 LA Series Error Status Register.
-*/
-
-typedef union DAC960_LA_ErrorStatusRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool ErrorStatusPending:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_LA_ErrorStatusRegister_T;
-
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 LA Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_LA_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_LA_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
- return !InboundDoorBellRegister.Read.HardwareMailboxEmpty;
-}
-
-static inline
-bool DAC960_LA_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
- return !InboundDoorBellRegister.Read.InitializationNotInProgress;
-}
-
-static inline
-void DAC960_LA_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_LA_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_LA_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
-}
-
-static inline
-bool DAC960_LA_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
-}
-
-static inline
-void DAC960_LA_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = false;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
-}
-
-static inline
-void DAC960_LA_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0xFF;
- InterruptMaskRegister.Bits.DisableInterrupts = true;
- writeb(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
-}
-
-static inline
-bool DAC960_LA_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
- return !InterruptMaskRegister.Bits.DisableInterrupts;
-}
-
-static inline
-void DAC960_LA_WriteCommandMailbox(DAC960_V1_CommandMailbox_T
- *MemoryCommandMailbox,
- DAC960_V1_CommandMailbox_T
- *CommandMailbox)
-{
- MemoryCommandMailbox->Words[1] = CommandMailbox->Words[1];
- MemoryCommandMailbox->Words[2] = CommandMailbox->Words[2];
- MemoryCommandMailbox->Words[3] = CommandMailbox->Words[3];
- wmb();
- MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
- mb();
-}
-
-static inline
-void DAC960_LA_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
- DAC960_V1_CommandMailbox_T *CommandMailbox)
-{
- writel(CommandMailbox->Words[0],
- ControllerBaseAddress + DAC960_LA_CommandOpcodeRegisterOffset);
- writel(CommandMailbox->Words[1],
- ControllerBaseAddress + DAC960_LA_MailboxRegister4Offset);
- writel(CommandMailbox->Words[2],
- ControllerBaseAddress + DAC960_LA_MailboxRegister8Offset);
- writeb(CommandMailbox->Bytes[12],
- ControllerBaseAddress + DAC960_LA_MailboxRegister12Offset);
-}
-
-static inline DAC960_V1_CommandIdentifier_T
-DAC960_LA_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readb(ControllerBaseAddress
- + DAC960_LA_StatusCommandIdentifierRegOffset);
-}
-
-static inline DAC960_V1_CommandStatus_T
-DAC960_LA_ReadStatusRegister(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_LA_StatusRegisterOffset);
-}
-
-static inline bool
-DAC960_LA_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_LA_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readb(ControllerBaseAddress + DAC960_LA_ErrorStatusRegisterOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_LA_CommandOpcodeRegisterOffset);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_LA_CommandIdentifierRegisterOffset);
- writeb(0xFF, ControllerBaseAddress + DAC960_LA_ErrorStatusRegisterOffset);
- return true;
-}
-
-/*
- Define the DAC960 PG Series Controller Interface Register Offsets.
-*/
-
-#define DAC960_PG_RegisterWindowSize 0x2000
-
-typedef enum
-{
- DAC960_PG_InboundDoorBellRegisterOffset = 0x0020,
- DAC960_PG_OutboundDoorBellRegisterOffset = 0x002C,
- DAC960_PG_InterruptMaskRegisterOffset = 0x0034,
- DAC960_PG_CommandOpcodeRegisterOffset = 0x1000,
- DAC960_PG_CommandIdentifierRegisterOffset = 0x1001,
- DAC960_PG_MailboxRegister2Offset = 0x1002,
- DAC960_PG_MailboxRegister3Offset = 0x1003,
- DAC960_PG_MailboxRegister4Offset = 0x1004,
- DAC960_PG_MailboxRegister5Offset = 0x1005,
- DAC960_PG_MailboxRegister6Offset = 0x1006,
- DAC960_PG_MailboxRegister7Offset = 0x1007,
- DAC960_PG_MailboxRegister8Offset = 0x1008,
- DAC960_PG_MailboxRegister9Offset = 0x1009,
- DAC960_PG_MailboxRegister10Offset = 0x100A,
- DAC960_PG_MailboxRegister11Offset = 0x100B,
- DAC960_PG_MailboxRegister12Offset = 0x100C,
- DAC960_PG_StatusCommandIdentifierRegOffset = 0x1018,
- DAC960_PG_StatusRegisterOffset = 0x101A,
- DAC960_PG_ErrorStatusRegisterOffset = 0x103F
-}
-DAC960_PG_RegisterOffsets_T;
-
-
-/*
- Define the structure of the DAC960 PG Series Inbound Door Bell Register.
-*/
-
-typedef union DAC960_PG_InboundDoorBellRegister
-{
- unsigned int All;
- struct {
- bool HardwareMailboxNewCommand:1; /* Bit 0 */
- bool AcknowledgeHardwareMailboxStatus:1; /* Bit 1 */
- bool GenerateInterrupt:1; /* Bit 2 */
- bool ControllerReset:1; /* Bit 3 */
- bool MemoryMailboxNewCommand:1; /* Bit 4 */
- unsigned int :27; /* Bits 5-31 */
- } Write;
- struct {
- bool HardwareMailboxFull:1; /* Bit 0 */
- bool InitializationInProgress:1; /* Bit 1 */
- unsigned int :30; /* Bits 2-31 */
- } Read;
-}
-DAC960_PG_InboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 PG Series Outbound Door Bell Register.
-*/
-
-typedef union DAC960_PG_OutboundDoorBellRegister
-{
- unsigned int All;
- struct {
- bool AcknowledgeHardwareMailboxInterrupt:1; /* Bit 0 */
- bool AcknowledgeMemoryMailboxInterrupt:1; /* Bit 1 */
- unsigned int :30; /* Bits 2-31 */
- } Write;
- struct {
- bool HardwareMailboxStatusAvailable:1; /* Bit 0 */
- bool MemoryMailboxStatusAvailable:1; /* Bit 1 */
- unsigned int :30; /* Bits 2-31 */
- } Read;
-}
-DAC960_PG_OutboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 PG Series Interrupt Mask Register.
-*/
-
-typedef union DAC960_PG_InterruptMaskRegister
-{
- unsigned int All;
- struct {
- unsigned int MessageUnitInterruptMask1:2; /* Bits 0-1 */
- bool DisableInterrupts:1; /* Bit 2 */
- unsigned int MessageUnitInterruptMask2:5; /* Bits 3-7 */
- unsigned int Reserved0:24; /* Bits 8-31 */
- } Bits;
-}
-DAC960_PG_InterruptMaskRegister_T;
-
-
-/*
- Define the structure of the DAC960 PG Series Error Status Register.
-*/
-
-typedef union DAC960_PG_ErrorStatusRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool ErrorStatusPending:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_PG_ErrorStatusRegister_T;
-
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 PG Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_PG_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
- writel(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_PG_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readl(ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.HardwareMailboxFull;
-}
-
-static inline
-bool DAC960_PG_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readl(ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.InitializationInProgress;
-}
-
-static inline
-void DAC960_PG_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PG_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
- OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
- writel(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_PG_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readl(ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
-}
-
-static inline
-bool DAC960_PG_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readl(ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
-}
-
-static inline
-void DAC960_PG_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0;
- InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3;
- InterruptMaskRegister.Bits.DisableInterrupts = false;
- InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F;
- writel(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
-}
-
-static inline
-void DAC960_PG_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All = 0;
- InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3;
- InterruptMaskRegister.Bits.DisableInterrupts = true;
- InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F;
- writel(InterruptMaskRegister.All,
- ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
-}
-
-static inline
-bool DAC960_PG_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
- InterruptMaskRegister.All =
- readl(ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
- return !InterruptMaskRegister.Bits.DisableInterrupts;
-}
-
-static inline
-void DAC960_PG_WriteCommandMailbox(DAC960_V1_CommandMailbox_T
- *MemoryCommandMailbox,
- DAC960_V1_CommandMailbox_T
- *CommandMailbox)
-{
- MemoryCommandMailbox->Words[1] = CommandMailbox->Words[1];
- MemoryCommandMailbox->Words[2] = CommandMailbox->Words[2];
- MemoryCommandMailbox->Words[3] = CommandMailbox->Words[3];
- wmb();
- MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
- mb();
-}
-
-static inline
-void DAC960_PG_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
- DAC960_V1_CommandMailbox_T *CommandMailbox)
-{
- writel(CommandMailbox->Words[0],
- ControllerBaseAddress + DAC960_PG_CommandOpcodeRegisterOffset);
- writel(CommandMailbox->Words[1],
- ControllerBaseAddress + DAC960_PG_MailboxRegister4Offset);
- writel(CommandMailbox->Words[2],
- ControllerBaseAddress + DAC960_PG_MailboxRegister8Offset);
- writeb(CommandMailbox->Bytes[12],
- ControllerBaseAddress + DAC960_PG_MailboxRegister12Offset);
-}
-
-static inline DAC960_V1_CommandIdentifier_T
-DAC960_PG_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readb(ControllerBaseAddress
- + DAC960_PG_StatusCommandIdentifierRegOffset);
-}
-
-static inline DAC960_V1_CommandStatus_T
-DAC960_PG_ReadStatusRegister(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_PG_StatusRegisterOffset);
-}
-
-static inline bool
-DAC960_PG_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_PG_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readb(ControllerBaseAddress + DAC960_PG_ErrorStatusRegisterOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_PG_CommandOpcodeRegisterOffset);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_PG_CommandIdentifierRegisterOffset);
- writeb(0, ControllerBaseAddress + DAC960_PG_ErrorStatusRegisterOffset);
- return true;
-}
-
-/*
- Define the DAC960 PD Series Controller Interface Register Offsets.
-*/
-
-#define DAC960_PD_RegisterWindowSize 0x80
-
-typedef enum
-{
- DAC960_PD_CommandOpcodeRegisterOffset = 0x00,
- DAC960_PD_CommandIdentifierRegisterOffset = 0x01,
- DAC960_PD_MailboxRegister2Offset = 0x02,
- DAC960_PD_MailboxRegister3Offset = 0x03,
- DAC960_PD_MailboxRegister4Offset = 0x04,
- DAC960_PD_MailboxRegister5Offset = 0x05,
- DAC960_PD_MailboxRegister6Offset = 0x06,
- DAC960_PD_MailboxRegister7Offset = 0x07,
- DAC960_PD_MailboxRegister8Offset = 0x08,
- DAC960_PD_MailboxRegister9Offset = 0x09,
- DAC960_PD_MailboxRegister10Offset = 0x0A,
- DAC960_PD_MailboxRegister11Offset = 0x0B,
- DAC960_PD_MailboxRegister12Offset = 0x0C,
- DAC960_PD_StatusCommandIdentifierRegOffset = 0x0D,
- DAC960_PD_StatusRegisterOffset = 0x0E,
- DAC960_PD_ErrorStatusRegisterOffset = 0x3F,
- DAC960_PD_InboundDoorBellRegisterOffset = 0x40,
- DAC960_PD_OutboundDoorBellRegisterOffset = 0x41,
- DAC960_PD_InterruptEnableRegisterOffset = 0x43
-}
-DAC960_PD_RegisterOffsets_T;
-
-
-/*
- Define the structure of the DAC960 PD Series Inbound Door Bell Register.
-*/
-
-typedef union DAC960_PD_InboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool NewCommand:1; /* Bit 0 */
- bool AcknowledgeStatus:1; /* Bit 1 */
- bool GenerateInterrupt:1; /* Bit 2 */
- bool ControllerReset:1; /* Bit 3 */
- unsigned char :4; /* Bits 4-7 */
- } Write;
- struct {
- bool MailboxFull:1; /* Bit 0 */
- bool InitializationInProgress:1; /* Bit 1 */
- unsigned char :6; /* Bits 2-7 */
- } Read;
-}
-DAC960_PD_InboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 PD Series Outbound Door Bell Register.
-*/
-
-typedef union DAC960_PD_OutboundDoorBellRegister
-{
- unsigned char All;
- struct {
- bool AcknowledgeInterrupt:1; /* Bit 0 */
- unsigned char :7; /* Bits 1-7 */
- } Write;
- struct {
- bool StatusAvailable:1; /* Bit 0 */
- unsigned char :7; /* Bits 1-7 */
- } Read;
-}
-DAC960_PD_OutboundDoorBellRegister_T;
-
-
-/*
- Define the structure of the DAC960 PD Series Interrupt Enable Register.
-*/
-
-typedef union DAC960_PD_InterruptEnableRegister
-{
- unsigned char All;
- struct {
- bool EnableInterrupts:1; /* Bit 0 */
- unsigned char :7; /* Bits 1-7 */
- } Bits;
-}
-DAC960_PD_InterruptEnableRegister_T;
-
-
-/*
- Define the structure of the DAC960 PD Series Error Status Register.
-*/
-
-typedef union DAC960_PD_ErrorStatusRegister
-{
- unsigned char All;
- struct {
- unsigned int :2; /* Bits 0-1 */
- bool ErrorStatusPending:1; /* Bit 2 */
- unsigned int :5; /* Bits 3-7 */
- } Bits;
-}
-DAC960_PD_ErrorStatusRegister_T;
-
-
-/*
- Define inline functions to provide an abstraction for reading and writing the
- DAC960 PD Series Controller Interface Registers.
-*/
-
-static inline
-void DAC960_PD_NewCommand(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.NewCommand = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PD_AcknowledgeStatus(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.AcknowledgeStatus = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PD_GenerateInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.GenerateInterrupt = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
-}
-
-static inline
-void DAC960_PD_ControllerReset(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All = 0;
- InboundDoorBellRegister.Write.ControllerReset = true;
- writeb(InboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_PD_MailboxFullP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.MailboxFull;
-}
-
-static inline
-bool DAC960_PD_InitializationInProgressP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
- InboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
- return InboundDoorBellRegister.Read.InitializationInProgress;
-}
-
-static inline
-void DAC960_PD_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All = 0;
- OutboundDoorBellRegister.Write.AcknowledgeInterrupt = true;
- writeb(OutboundDoorBellRegister.All,
- ControllerBaseAddress + DAC960_PD_OutboundDoorBellRegisterOffset);
-}
-
-static inline
-bool DAC960_PD_StatusAvailableP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister;
- OutboundDoorBellRegister.All =
- readb(ControllerBaseAddress + DAC960_PD_OutboundDoorBellRegisterOffset);
- return OutboundDoorBellRegister.Read.StatusAvailable;
-}
-
-static inline
-void DAC960_PD_EnableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
- InterruptEnableRegister.All = 0;
- InterruptEnableRegister.Bits.EnableInterrupts = true;
- writeb(InterruptEnableRegister.All,
- ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
-}
-
-static inline
-void DAC960_PD_DisableInterrupts(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
- InterruptEnableRegister.All = 0;
- InterruptEnableRegister.Bits.EnableInterrupts = false;
- writeb(InterruptEnableRegister.All,
- ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
-}
-
-static inline
-bool DAC960_PD_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
-{
- DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
- InterruptEnableRegister.All =
- readb(ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
- return InterruptEnableRegister.Bits.EnableInterrupts;
-}
-
-static inline
-void DAC960_PD_WriteCommandMailbox(void __iomem *ControllerBaseAddress,
- DAC960_V1_CommandMailbox_T *CommandMailbox)
-{
- writel(CommandMailbox->Words[0],
- ControllerBaseAddress + DAC960_PD_CommandOpcodeRegisterOffset);
- writel(CommandMailbox->Words[1],
- ControllerBaseAddress + DAC960_PD_MailboxRegister4Offset);
- writel(CommandMailbox->Words[2],
- ControllerBaseAddress + DAC960_PD_MailboxRegister8Offset);
- writeb(CommandMailbox->Bytes[12],
- ControllerBaseAddress + DAC960_PD_MailboxRegister12Offset);
-}
-
-static inline DAC960_V1_CommandIdentifier_T
-DAC960_PD_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
-{
- return readb(ControllerBaseAddress
- + DAC960_PD_StatusCommandIdentifierRegOffset);
-}
-
-static inline DAC960_V1_CommandStatus_T
-DAC960_PD_ReadStatusRegister(void __iomem *ControllerBaseAddress)
-{
- return readw(ControllerBaseAddress + DAC960_PD_StatusRegisterOffset);
-}
-
-static inline bool
-DAC960_PD_ReadErrorStatus(void __iomem *ControllerBaseAddress,
- unsigned char *ErrorStatus,
- unsigned char *Parameter0,
- unsigned char *Parameter1)
-{
- DAC960_PD_ErrorStatusRegister_T ErrorStatusRegister;
- ErrorStatusRegister.All =
- readb(ControllerBaseAddress + DAC960_PD_ErrorStatusRegisterOffset);
- if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
- ErrorStatusRegister.Bits.ErrorStatusPending = false;
- *ErrorStatus = ErrorStatusRegister.All;
- *Parameter0 =
- readb(ControllerBaseAddress + DAC960_PD_CommandOpcodeRegisterOffset);
- *Parameter1 =
- readb(ControllerBaseAddress + DAC960_PD_CommandIdentifierRegisterOffset);
- writeb(0, ControllerBaseAddress + DAC960_PD_ErrorStatusRegisterOffset);
- return true;
-}
-
-static inline void DAC960_P_To_PD_TranslateEnquiry(void *Enquiry)
-{
- memcpy(Enquiry + 132, Enquiry + 36, 64);
- memset(Enquiry + 36, 0, 96);
-}
-
-static inline void DAC960_P_To_PD_TranslateDeviceState(void *DeviceState)
-{
- memcpy(DeviceState + 2, DeviceState + 3, 1);
- memmove(DeviceState + 4, DeviceState + 5, 2);
- memmove(DeviceState + 6, DeviceState + 8, 4);
-}
-
-static inline
-void DAC960_PD_To_P_TranslateReadWriteCommand(DAC960_V1_CommandMailbox_T
- *CommandMailbox)
-{
- int LogicalDriveNumber = CommandMailbox->Type5.LD.LogicalDriveNumber;
- CommandMailbox->Bytes[3] &= 0x7;
- CommandMailbox->Bytes[3] |= CommandMailbox->Bytes[7] << 6;
- CommandMailbox->Bytes[7] = LogicalDriveNumber;
-}
-
-static inline
-void DAC960_P_To_PD_TranslateReadWriteCommand(DAC960_V1_CommandMailbox_T
- *CommandMailbox)
-{
- int LogicalDriveNumber = CommandMailbox->Bytes[7];
- CommandMailbox->Bytes[7] = CommandMailbox->Bytes[3] >> 6;
- CommandMailbox->Bytes[3] &= 0x7;
- CommandMailbox->Bytes[3] |= LogicalDriveNumber << 3;
-}
-
-
-/*
- Define prototypes for the forward referenced DAC960 Driver Internal Functions.
-*/
-
-static void DAC960_FinalizeController(DAC960_Controller_T *);
-static void DAC960_V1_QueueReadWriteCommand(DAC960_Command_T *);
-static void DAC960_V2_QueueReadWriteCommand(DAC960_Command_T *);
-static void DAC960_RequestFunction(struct request_queue *);
-static irqreturn_t DAC960_BA_InterruptHandler(int, void *);
-static irqreturn_t DAC960_LP_InterruptHandler(int, void *);
-static irqreturn_t DAC960_LA_InterruptHandler(int, void *);
-static irqreturn_t DAC960_PG_InterruptHandler(int, void *);
-static irqreturn_t DAC960_PD_InterruptHandler(int, void *);
-static irqreturn_t DAC960_P_InterruptHandler(int, void *);
-static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
-static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
-static void DAC960_MonitoringTimerFunction(struct timer_list *);
-static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
- DAC960_Controller_T *, ...);
-static void DAC960_CreateProcEntries(DAC960_Controller_T *);
-static void DAC960_DestroyProcEntries(DAC960_Controller_T *);
-
-#endif /* DAC960_DriverVersion */
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index d4913516823f..20bb4bfa4be6 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -121,18 +121,6 @@ source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
-config BLK_DEV_DAC960
- tristate "Mylex DAC960/DAC1100 PCI RAID Controller support"
- depends on PCI
- help
- This driver adds support for the Mylex DAC960, AcceleRAID, and
- eXtremeRAID PCI RAID controllers. See the file
- <file:Documentation/blockdev/README.DAC960> for further information
- about this driver.
-
- To compile this driver as a module, choose M here: the
- module will be called DAC960.
-
config BLK_DEV_UMEM
tristate "Micro Memory MM5415 Battery Backed RAM support"
depends on PCI
@@ -461,7 +449,6 @@ config BLK_DEV_RBD
select LIBCRC32C
select CRYPTO_AES
select CRYPTO
- default n
help
Say Y here if you want include the Rados block device, which stripes
a block device over objects stored in the Ceph distributed object
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 8566b188368b..a53cc1e3a2d3 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 3aaf6af3ec23..bf996bd44cfc 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -61,10 +61,8 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/mutex.h>
-#include <linux/amifdreg.h>
-#include <linux/amifd.h>
#include <linux/fs.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/elevator.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -87,6 +85,126 @@
*/
/*
+ * CIAAPRA bits (read only)
+ */
+
+#define DSKRDY (0x1<<5) /* disk ready when low */
+#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
+#define DSKPROT (0x1<<3) /* disk protected when low */
+#define DSKCHANGE (0x1<<2) /* low when disk removed */
+
+/*
+ * CIAAPRB bits (read/write)
+ */
+
+#define DSKMOTOR (0x1<<7) /* motor on when low */
+#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
+#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
+#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
+#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
+#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
+#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
+#define DSKSTEP (0x1) /* pulse low to step head 1 track */
+
+/*
+ * DSKBYTR bits (read only)
+ */
+
+#define DSKBYT (1<<15) /* register contains valid byte when set */
+#define DMAON (1<<14) /* disk DMA enabled */
+#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
+#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
+/* bits 7-0 are data */
+
+/*
+ * ADKCON/ADKCONR bits
+ */
+
+#ifndef SETCLR
+#define ADK_SETCLR (1<<15) /* control bit */
+#endif
+#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
+#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
+#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
+#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
+#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
+#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
+
+/*
+ * DSKLEN bits
+ */
+
+#define DSKLEN_DMAEN (1<<15)
+#define DSKLEN_WRITE (1<<14)
+
+/*
+ * INTENA/INTREQ bits
+ */
+
+#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
+
+/*
+ * Misc
+ */
+
+#define MFM_SYNC 0x4489 /* standard MFM sync value */
+
+/* Values for FD_COMMAND */
+#define FD_RECALIBRATE 0x07 /* move to track 0 */
+#define FD_SEEK 0x0F /* seek track */
+#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
+#define FD_WRITE 0xC5 /* write with MT, MFM */
+#define FD_SENSEI 0x08 /* Sense Interrupt Status */
+#define FD_SPECIFY 0x03 /* specify HUT etc */
+#define FD_FORMAT 0x4D /* format one track */
+#define FD_VERSION 0x10 /* get version code */
+#define FD_CONFIGURE 0x13 /* configure FIFO operation */
+#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
+
+#define FD_MAX_UNITS 4 /* Max. Number of drives */
+#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
+
+struct fd_data_type {
+ char *name; /* description of data type */
+ int sects; /* sectors per track */
+ int (*read_fkt)(int); /* read whole track */
+ void (*write_fkt)(int); /* write whole track */
+};
+
+struct fd_drive_type {
+ unsigned long code; /* code returned from drive */
+ char *name; /* description of drive */
+ unsigned int tracks; /* number of tracks */
+ unsigned int heads; /* number of heads */
+ unsigned int read_size; /* raw read size for one track */
+ unsigned int write_size; /* raw write size for one track */
+ unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
+ unsigned int precomp1; /* start track for precomp 1 */
+ unsigned int precomp2; /* start track for precomp 2 */
+ unsigned int step_delay; /* time (in ms) for delay after step */
+ unsigned int settle_time; /* time to settle after dir change */
+ unsigned int side_time; /* time needed to change sides */
+};
+
+struct amiga_floppy_struct {
+ struct fd_drive_type *type; /* type of floppy for this unit */
+ struct fd_data_type *dtype; /* type of floppy for this unit */
+ int track; /* current track (-1 == unknown) */
+ unsigned char *trackbuf; /* current track (kmaloc()'d */
+
+ int blocks; /* total # blocks on disk */
+
+ int changed; /* true when not known */
+ int disk; /* disk in drive (-1 == unknown) */
+ int motor; /* true when motor is at speed */
+ int busy; /* true when drive is active */
+ int dirty; /* true when trackbuf is not on disk */
+ int status; /* current error code for unit */
+ struct gendisk *gendisk;
+ struct blk_mq_tag_set tag_set;
+};
+
+/*
* Error codes
*/
#define FD_OK 0 /* operation succeeded */
@@ -164,7 +282,6 @@ static volatile int selected = -1; /* currently selected drive */
static int writepending;
static int writefromint;
static char *raw_buf;
-static int fdc_queue;
static DEFINE_SPINLOCK(amiflop_lock);
@@ -1337,76 +1454,20 @@ static int get_track(int drive, int track)
return -1;
}
-/*
- * Round-robin between our available drives, doing one request from each
- */
-static struct request *set_next_request(void)
-{
- struct request_queue *q;
- int cnt = FD_MAX_UNITS;
- struct request *rq = NULL;
-
- /* Find next queue we can dispatch from */
- fdc_queue = fdc_queue + 1;
- if (fdc_queue == FD_MAX_UNITS)
- fdc_queue = 0;
-
- for(cnt = FD_MAX_UNITS; cnt > 0; cnt--) {
-
- if (unit[fdc_queue].type->code == FD_NODRIVE) {
- if (++fdc_queue == FD_MAX_UNITS)
- fdc_queue = 0;
- continue;
- }
-
- q = unit[fdc_queue].gendisk->queue;
- if (q) {
- rq = blk_fetch_request(q);
- if (rq)
- break;
- }
-
- if (++fdc_queue == FD_MAX_UNITS)
- fdc_queue = 0;
- }
-
- return rq;
-}
-
-static void redo_fd_request(void)
+static blk_status_t amiflop_rw_cur_segment(struct amiga_floppy_struct *floppy,
+ struct request *rq)
{
- struct request *rq;
+ int drive = floppy - unit;
unsigned int cnt, block, track, sector;
- int drive;
- struct amiga_floppy_struct *floppy;
char *data;
- unsigned long flags;
- blk_status_t err;
-
-next_req:
- rq = set_next_request();
- if (!rq) {
- /* Nothing left to do */
- return;
- }
-
- floppy = rq->rq_disk->private_data;
- drive = floppy - unit;
-next_segment:
- /* Here someone could investigate to be more efficient */
- for (cnt = 0, err = BLK_STS_OK; cnt < blk_rq_cur_sectors(rq); cnt++) {
+ for (cnt = 0; cnt < blk_rq_cur_sectors(rq); cnt++) {
#ifdef DEBUG
printk("fd: sector %ld + %d requested for %s\n",
blk_rq_pos(rq), cnt,
(rq_data_dir(rq) == READ) ? "read" : "write");
#endif
block = blk_rq_pos(rq) + cnt;
- if ((int)block > floppy->blocks) {
- err = BLK_STS_IOERR;
- break;
- }
-
track = block / (floppy->dtype->sects * floppy->type->sect_mult);
sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
data = bio_data(rq->bio) + 512 * cnt;
@@ -1415,10 +1476,8 @@ next_segment:
"0x%08lx\n", track, sector, data);
#endif
- if (get_track(drive, track) == -1) {
- err = BLK_STS_IOERR;
- break;
- }
+ if (get_track(drive, track) == -1)
+ return BLK_STS_IOERR;
if (rq_data_dir(rq) == READ) {
memcpy(data, floppy->trackbuf + sector * 512, 512);
@@ -1426,31 +1485,40 @@ next_segment:
memcpy(floppy->trackbuf + sector * 512, data, 512);
/* keep the drive spinning while writes are scheduled */
- if (!fd_motor_on(drive)) {
- err = BLK_STS_IOERR;
- break;
- }
+ if (!fd_motor_on(drive))
+ return BLK_STS_IOERR;
/*
* setup a callback to write the track buffer
* after a short (1 tick) delay.
*/
- local_irq_save(flags);
-
floppy->dirty = 1;
/* reset the timer */
mod_timer (flush_track_timer + drive, jiffies + 1);
- local_irq_restore(flags);
}
}
- if (__blk_end_request_cur(rq, err))
- goto next_segment;
- goto next_req;
+ return BLK_STS_OK;
}
-static void do_fd_request(struct request_queue * q)
+static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- redo_fd_request();
+ struct request *rq = bd->rq;
+ struct amiga_floppy_struct *floppy = rq->rq_disk->private_data;
+ blk_status_t err;
+
+ if (!spin_trylock_irq(&amiflop_lock))
+ return BLK_STS_DEV_RESOURCE;
+
+ blk_mq_start_request(rq);
+
+ do {
+ err = amiflop_rw_cur_segment(floppy, rq);
+ } while (blk_update_request(rq, err, blk_rq_cur_bytes(rq)));
+ blk_mq_end_request(rq, err);
+
+ spin_unlock_irq(&amiflop_lock);
+ return BLK_STS_OK;
}
static int fd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -1701,11 +1769,47 @@ static const struct block_device_operations floppy_fops = {
.check_events = amiga_check_events,
};
+static const struct blk_mq_ops amiflop_mq_ops = {
+ .queue_rq = amiflop_queue_rq,
+};
+
+static struct gendisk *fd_alloc_disk(int drive)
+{
+ struct gendisk *disk;
+
+ disk = alloc_disk(1);
+ if (!disk)
+ goto out;
+
+ disk->queue = blk_mq_init_sq_queue(&unit[drive].tag_set, &amiflop_mq_ops,
+ 2, BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(disk->queue)) {
+ disk->queue = NULL;
+ goto out_put_disk;
+ }
+
+ unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
+ if (!unit[drive].trackbuf)
+ goto out_cleanup_queue;
+
+ return disk;
+
+out_cleanup_queue:
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+ blk_mq_free_tag_set(&unit[drive].tag_set);
+out_put_disk:
+ put_disk(disk);
+out:
+ unit[drive].type->code = FD_NODRIVE;
+ return NULL;
+}
+
static int __init fd_probe_drives(void)
{
int drive,drives,nomem;
- printk(KERN_INFO "FD: probing units\nfound ");
+ pr_info("FD: probing units\nfound");
drives=0;
nomem=0;
for(drive=0;drive<FD_MAX_UNITS;drive++) {
@@ -1713,27 +1817,17 @@ static int __init fd_probe_drives(void)
fd_probe(drive);
if (unit[drive].type->code == FD_NODRIVE)
continue;
- disk = alloc_disk(1);
+
+ disk = fd_alloc_disk(drive);
if (!disk) {
- unit[drive].type->code = FD_NODRIVE;
+ pr_cont(" no mem for fd%d", drive);
+ nomem = 1;
continue;
}
unit[drive].gendisk = disk;
-
- disk->queue = blk_init_queue(do_fd_request, &amiflop_lock);
- if (!disk->queue) {
- unit[drive].type->code = FD_NODRIVE;
- continue;
- }
-
drives++;
- if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
- printk("no mem for ");
- unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */
- drives--;
- nomem = 1;
- }
- printk("fd%d ",drive);
+
+ pr_cont(" fd%d",drive);
disk->major = FLOPPY_MAJOR;
disk->first_minor = drive;
disk->fops = &floppy_fops;
@@ -1744,11 +1838,11 @@ static int __init fd_probe_drives(void)
}
if ((drives > 0) || (nomem == 0)) {
if (drives == 0)
- printk("no drives");
- printk("\n");
+ pr_cont(" no drives");
+ pr_cont("\n");
return drives;
}
- printk("\n");
+ pr_cont("\n");
return -ENOMEM;
}
@@ -1831,30 +1925,6 @@ out_blkdev:
return ret;
}
-#if 0 /* not safe to unload */
-static int __exit amiga_floppy_remove(struct platform_device *pdev)
-{
- int i;
-
- for( i = 0; i < FD_MAX_UNITS; i++) {
- if (unit[i].type->code != FD_NODRIVE) {
- struct request_queue *q = unit[i].gendisk->queue;
- del_gendisk(unit[i].gendisk);
- put_disk(unit[i].gendisk);
- kfree(unit[i].trackbuf);
- if (q)
- blk_cleanup_queue(q);
- }
- }
- blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
- free_irq(IRQ_AMIGA_CIAA_TB, NULL);
- free_irq(IRQ_AMIGA_DSKBLK, NULL);
- custom.dmacon = DMAF_DISK; /* disable DMA */
- amiga_chip_free(raw_buf);
- unregister_blkdev(FLOPPY_MAJOR, "fd");
-}
-#endif
-
static struct platform_driver amiga_floppy_driver = {
.driver = {
.name = "amiga-floppy",
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index c0ebda1283cc..7ca76ed2e71a 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -1,4 +1,6 @@
/* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
+#include <linux/blk-mq.h>
+
#define VERSION "85"
#define AOE_MAJOR 152
#define DEVICE_NAME "aoe"
@@ -164,6 +166,8 @@ struct aoedev {
struct gendisk *gd;
struct dentry *debugfs;
struct request_queue *blkq;
+ struct list_head rq_list;
+ struct blk_mq_tag_set tag_set;
struct hd_geometry geo;
sector_t ssize;
struct timer_list timer;
@@ -201,7 +205,6 @@ int aoeblk_init(void);
void aoeblk_exit(void);
void aoeblk_gdalloc(void *);
void aoedisk_rm_debugfs(struct aoedev *d);
-void aoedisk_rm_sysfs(struct aoedev *d);
int aoechr_init(void);
void aoechr_exit(void);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 429ebb84b592..ed26b7287256 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -6,7 +6,7 @@
#include <linux/kernel.h>
#include <linux/hdreg.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/ioctl.h>
@@ -177,10 +177,15 @@ static struct attribute *aoe_attrs[] = {
NULL,
};
-static const struct attribute_group attr_group = {
+static const struct attribute_group aoe_attr_group = {
.attrs = aoe_attrs,
};
+static const struct attribute_group *aoe_attr_groups[] = {
+ &aoe_attr_group,
+ NULL,
+};
+
static const struct file_operations aoe_debugfs_fops = {
.open = aoe_debugfs_open,
.read = seq_read,
@@ -220,17 +225,6 @@ aoedisk_rm_debugfs(struct aoedev *d)
}
static int
-aoedisk_add_sysfs(struct aoedev *d)
-{
- return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group);
-}
-void
-aoedisk_rm_sysfs(struct aoedev *d)
-{
- sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group);
-}
-
-static int
aoeblk_open(struct block_device *bdev, fmode_t mode)
{
struct aoedev *d = bdev->bd_disk->private_data;
@@ -274,23 +268,25 @@ aoeblk_release(struct gendisk *disk, fmode_t mode)
spin_unlock_irqrestore(&d->lock, flags);
}
-static void
-aoeblk_request(struct request_queue *q)
+static blk_status_t aoeblk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct aoedev *d;
- struct request *rq;
+ struct aoedev *d = hctx->queue->queuedata;
+
+ spin_lock_irq(&d->lock);
- d = q->queuedata;
if ((d->flags & DEVFL_UP) == 0) {
pr_info_ratelimited("aoe: device %ld.%d is not up\n",
d->aoemajor, d->aoeminor);
- while ((rq = blk_peek_request(q))) {
- blk_start_request(rq);
- aoe_end_request(d, rq, 1);
- }
- return;
+ spin_unlock_irq(&d->lock);
+ blk_mq_start_request(bd->rq);
+ return BLK_STS_IOERR;
}
+
+ list_add_tail(&bd->rq->queuelist, &d->rq_list);
aoecmd_work(d);
+ spin_unlock_irq(&d->lock);
+ return BLK_STS_OK;
}
static int
@@ -345,6 +341,10 @@ static const struct block_device_operations aoe_bdops = {
.owner = THIS_MODULE,
};
+static const struct blk_mq_ops aoeblk_mq_ops = {
+ .queue_rq = aoeblk_queue_rq,
+};
+
/* alloc_disk and add_disk can sleep */
void
aoeblk_gdalloc(void *vp)
@@ -353,9 +353,11 @@ aoeblk_gdalloc(void *vp)
struct gendisk *gd;
mempool_t *mp;
struct request_queue *q;
+ struct blk_mq_tag_set *set;
enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, };
ulong flags;
int late = 0;
+ int err;
spin_lock_irqsave(&d->lock, flags);
if (d->flags & DEVFL_GDALLOC
@@ -382,10 +384,25 @@ aoeblk_gdalloc(void *vp)
d->aoemajor, d->aoeminor);
goto err_disk;
}
- q = blk_init_queue(aoeblk_request, &d->lock);
- if (q == NULL) {
+
+ set = &d->tag_set;
+ set->ops = &aoeblk_mq_ops;
+ set->nr_hw_queues = 1;
+ set->queue_depth = 128;
+ set->numa_node = NUMA_NO_NODE;
+ set->flags = BLK_MQ_F_SHOULD_MERGE;
+ err = blk_mq_alloc_tag_set(set);
+ if (err) {
+ pr_err("aoe: cannot allocate tag set for %ld.%d\n",
+ d->aoemajor, d->aoeminor);
+ goto err_mempool;
+ }
+
+ q = blk_mq_init_queue(set);
+ if (IS_ERR(q)) {
pr_err("aoe: cannot allocate block queue for %ld.%d\n",
d->aoemajor, d->aoeminor);
+ blk_mq_free_tag_set(set);
goto err_mempool;
}
@@ -417,8 +434,7 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
- add_disk(gd);
- aoedisk_add_sysfs(d);
+ device_add_disk(NULL, gd, aoe_attr_groups);
aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 136dc507d020..bb2fba651bd2 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -7,7 +7,7 @@
#include <linux/ata.h>
#include <linux/slab.h>
#include <linux/hdreg.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/genhd.h>
@@ -813,7 +813,7 @@ rexmit_timer(struct timer_list *timer)
out:
if ((d->flags & DEVFL_KICKME) && d->blkq) {
d->flags &= ~DEVFL_KICKME;
- d->blkq->request_fn(d->blkq);
+ blk_mq_run_hw_queues(d->blkq, true);
}
d->timer.expires = jiffies + TIMERTICK;
@@ -857,10 +857,12 @@ nextbuf(struct aoedev *d)
return d->ip.buf;
rq = d->ip.rq;
if (rq == NULL) {
- rq = blk_peek_request(q);
+ rq = list_first_entry_or_null(&d->rq_list, struct request,
+ queuelist);
if (rq == NULL)
return NULL;
- blk_start_request(rq);
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
d->ip.rq = rq;
d->ip.nxbio = rq->bio;
rq->special = (void *) rqbiocnt(rq);
@@ -1045,6 +1047,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
struct bio *bio;
int bok;
struct request_queue *q;
+ blk_status_t err = BLK_STS_OK;
q = d->blkq;
if (rq == d->ip.rq)
@@ -1052,11 +1055,15 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
do {
bio = rq->bio;
bok = !fastfail && !bio->bi_status;
- } while (__blk_end_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
+ if (!bok)
+ err = BLK_STS_IOERR;
+ } while (blk_update_request(rq, bok ? BLK_STS_OK : BLK_STS_IOERR, bio->bi_iter.bi_size));
+
+ __blk_mq_end_request(rq, err);
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
- __blk_run_queue(q);
+ blk_mq_run_hw_queues(q, true);
}
static void
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
index 41060e9cedf2..9063f8efbd3b 100644
--- a/drivers/block/aoe/aoedev.c
+++ b/drivers/block/aoe/aoedev.c
@@ -5,7 +5,7 @@
*/
#include <linux/hdreg.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -197,7 +197,6 @@ aoedev_downdev(struct aoedev *d)
{
struct aoetgt *t, **tt, **te;
struct list_head *head, *pos, *nx;
- struct request *rq;
int i;
d->flags &= ~DEVFL_UP;
@@ -225,10 +224,11 @@ aoedev_downdev(struct aoedev *d)
/* fast fail all pending I/O */
if (d->blkq) {
- while ((rq = blk_peek_request(d->blkq))) {
- blk_start_request(rq);
- aoe_end_request(d, rq, 1);
- }
+ /* UP is cleared, freeze+quiesce to insure all are errored */
+ blk_mq_freeze_queue(d->blkq);
+ blk_mq_quiesce_queue(d->blkq);
+ blk_mq_unquiesce_queue(d->blkq);
+ blk_mq_unfreeze_queue(d->blkq);
}
if (d->gd)
@@ -275,9 +275,9 @@ freedev(struct aoedev *d)
del_timer_sync(&d->timer);
if (d->gd) {
aoedisk_rm_debugfs(d);
- aoedisk_rm_sysfs(d);
del_gendisk(d->gd);
put_disk(d->gd);
+ blk_mq_free_tag_set(&d->tag_set);
blk_cleanup_queue(d->blkq);
}
t = d->targets;
@@ -464,6 +464,7 @@ aoedev_by_aoeaddr(ulong maj, int min, int do_alloc)
d->ntargets = NTARGETS;
INIT_WORK(&d->work, aoecmd_sleepwork);
spin_lock_init(&d->lock);
+ INIT_LIST_HEAD(&d->rq_list);
skb_queue_head_init(&d->skbpool);
timer_setup(&d->timer, dummy_timer, 0);
d->timer.expires = jiffies + HZ;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index dfb2c2622e5a..f88b4c26d422 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -66,13 +66,11 @@
#include <linux/fd.h>
#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/wait.h>
-#include <asm/atafd.h>
-#include <asm/atafdreg.h>
#include <asm/atariints.h>
#include <asm/atari_stdma.h>
#include <asm/atari_stram.h>
@@ -83,7 +81,87 @@
static DEFINE_MUTEX(ataflop_mutex);
static struct request *fd_request;
-static int fdc_queue;
+
+/*
+ * WD1772 stuff
+ */
+
+/* register codes */
+
+#define FDCSELREG_STP (0x80) /* command/status register */
+#define FDCSELREG_TRA (0x82) /* track register */
+#define FDCSELREG_SEC (0x84) /* sector register */
+#define FDCSELREG_DTA (0x86) /* data register */
+
+/* register names for FDC_READ/WRITE macros */
+
+#define FDCREG_CMD 0
+#define FDCREG_STATUS 0
+#define FDCREG_TRACK 2
+#define FDCREG_SECTOR 4
+#define FDCREG_DATA 6
+
+/* command opcodes */
+
+#define FDCCMD_RESTORE (0x00) /* - */
+#define FDCCMD_SEEK (0x10) /* | */
+#define FDCCMD_STEP (0x20) /* | TYP 1 Commands */
+#define FDCCMD_STIN (0x40) /* | */
+#define FDCCMD_STOT (0x60) /* - */
+#define FDCCMD_RDSEC (0x80) /* - TYP 2 Commands */
+#define FDCCMD_WRSEC (0xa0) /* - " */
+#define FDCCMD_RDADR (0xc0) /* - */
+#define FDCCMD_RDTRA (0xe0) /* | TYP 3 Commands */
+#define FDCCMD_WRTRA (0xf0) /* - */
+#define FDCCMD_FORCI (0xd0) /* - TYP 4 Command */
+
+/* command modifier bits */
+
+#define FDCCMDADD_SR6 (0x00) /* step rate settings */
+#define FDCCMDADD_SR12 (0x01)
+#define FDCCMDADD_SR2 (0x02)
+#define FDCCMDADD_SR3 (0x03)
+#define FDCCMDADD_V (0x04) /* verify */
+#define FDCCMDADD_H (0x08) /* wait for spin-up */
+#define FDCCMDADD_U (0x10) /* update track register */
+#define FDCCMDADD_M (0x10) /* multiple sector access */
+#define FDCCMDADD_E (0x04) /* head settling flag */
+#define FDCCMDADD_P (0x02) /* precompensation off */
+#define FDCCMDADD_A0 (0x01) /* DAM flag */
+
+/* status register bits */
+
+#define FDCSTAT_MOTORON (0x80) /* motor on */
+#define FDCSTAT_WPROT (0x40) /* write protected (FDCCMD_WR*) */
+#define FDCSTAT_SPINUP (0x20) /* motor speed stable (Type I) */
+#define FDCSTAT_DELDAM (0x20) /* sector has deleted DAM (Type II+III) */
+#define FDCSTAT_RECNF (0x10) /* record not found */
+#define FDCSTAT_CRC (0x08) /* CRC error */
+#define FDCSTAT_TR00 (0x04) /* Track 00 flag (Type I) */
+#define FDCSTAT_LOST (0x04) /* Lost Data (Type II+III) */
+#define FDCSTAT_IDX (0x02) /* Index status (Type I) */
+#define FDCSTAT_DRQ (0x02) /* DRQ status (Type II+III) */
+#define FDCSTAT_BUSY (0x01) /* FDC is busy */
+
+
+/* PSG Port A Bit Nr 0 .. Side Sel .. 0 -> Side 1 1 -> Side 2 */
+#define DSKSIDE (0x01)
+
+#define DSKDRVNONE (0x06)
+#define DSKDRV0 (0x02)
+#define DSKDRV1 (0x04)
+
+/* step rates */
+#define FDCSTEP_6 0x00
+#define FDCSTEP_12 0x01
+#define FDCSTEP_2 0x02
+#define FDCSTEP_3 0x03
+
+struct atari_format_descr {
+ int track; /* to be formatted */
+ int head; /* "" "" */
+ int sect_offset; /* offset of first sector */
+};
/* Disk types: DD, HD, ED */
static struct atari_disk_type {
@@ -221,6 +299,7 @@ static struct atari_floppy_struct {
struct gendisk *disk;
int ref;
int type;
+ struct blk_mq_tag_set tag_set;
} unit[FD_MAX_UNITS];
#define UD unit[drive]
@@ -300,9 +379,6 @@ static int IsFormatting = 0, FormatError;
static int UserSteprate[FD_MAX_UNITS] = { -1, -1 };
module_param_array(UserSteprate, int, NULL, 0);
-/* Synchronization of FDC access. */
-static volatile int fdc_busy = 0;
-static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
static DECLARE_COMPLETION(format_wait);
static unsigned long changed_floppies = 0xff, fake_change = 0;
@@ -362,7 +438,6 @@ static void fd_times_out(struct timer_list *unused);
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
-static void redo_fd_request( void);
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
cmd, unsigned long param);
static void fd_probe( int drive );
@@ -380,8 +455,11 @@ static DEFINE_TIMER(fd_timer, check_change);
static void fd_end_request_cur(blk_status_t err)
{
- if (!__blk_end_request_cur(fd_request, err))
+ if (!blk_update_request(fd_request, err,
+ blk_rq_cur_bytes(fd_request))) {
+ __blk_mq_end_request(fd_request, err);
fd_request = NULL;
+ }
}
static inline void start_motor_off_timer(void)
@@ -627,7 +705,6 @@ static void fd_error( void )
if (SelectedDrive != -1)
SUD.track = -1;
}
- redo_fd_request();
}
@@ -645,14 +722,15 @@ static void fd_error( void )
static int do_format(int drive, int type, struct atari_format_descr *desc)
{
+ struct request_queue *q = unit[drive].disk->queue;
unsigned char *p;
int sect, nsect;
unsigned long flags;
+ int ret;
- DPRINT(("do_format( dr=%d tr=%d he=%d offs=%d )\n",
- drive, desc->track, desc->head, desc->sect_offset ));
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
- wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
local_irq_save(flags);
stdma_lock(floppy_irq, NULL);
atari_turnon_irq( IRQ_MFP_FDC ); /* should be already, just to be sure */
@@ -661,16 +739,16 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
if (type) {
if (--type >= NUM_DISK_MINORS ||
minor2disktype[type].drive_types > DriveType) {
- redo_fd_request();
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
}
if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
- redo_fd_request();
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
nsect = UDT->spt;
@@ -709,8 +787,11 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
wait_for_completion(&format_wait);
- redo_fd_request();
- return( FormatError ? -EIO : 0 );
+ ret = FormatError ? -EIO : 0;
+out:
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
+ return ret;
}
@@ -740,7 +821,6 @@ static void do_fd_action( int drive )
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
- redo_fd_request();
return;
}
}
@@ -1145,7 +1225,6 @@ static void fd_rwsec_done1(int status)
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
- redo_fd_request();
}
return;
@@ -1303,8 +1382,6 @@ static void finish_fdc_done( int dummy )
local_irq_save(flags);
stdma_release();
- fdc_busy = 0;
- wake_up( &fdc_wait );
local_irq_restore(flags);
DPRINT(("finish_fdc() finished\n"));
@@ -1394,59 +1471,34 @@ static void setup_req_params( int drive )
ReqTrack, ReqSector, (unsigned long)ReqData ));
}
-/*
- * Round-robin between our available drives, doing one request from each
- */
-static struct request *set_next_request(void)
+static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request_queue *q;
- int old_pos = fdc_queue;
- struct request *rq = NULL;
-
- do {
- q = unit[fdc_queue].disk->queue;
- if (++fdc_queue == FD_MAX_UNITS)
- fdc_queue = 0;
- if (q) {
- rq = blk_fetch_request(q);
- if (rq) {
- rq->error_count = 0;
- break;
- }
- }
- } while (fdc_queue != old_pos);
-
- return rq;
-}
-
+ struct atari_floppy_struct *floppy = bd->rq->rq_disk->private_data;
+ int drive = floppy - unit;
+ int type = floppy->type;
-static void redo_fd_request(void)
-{
- int drive, type;
- struct atari_floppy_struct *floppy;
+ spin_lock_irq(&ataflop_lock);
+ if (fd_request) {
+ spin_unlock_irq(&ataflop_lock);
+ return BLK_STS_DEV_RESOURCE;
+ }
+ if (!stdma_try_lock(floppy_irq, NULL)) {
+ spin_unlock_irq(&ataflop_lock);
+ return BLK_STS_RESOURCE;
+ }
+ fd_request = bd->rq;
+ blk_mq_start_request(fd_request);
- DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n",
- fd_request, fd_request ? fd_request->rq_disk->disk_name : "",
- fd_request ? blk_rq_pos(fd_request) : 0 ));
+ atari_disable_irq( IRQ_MFP_FDC );
IsFormatting = 0;
-repeat:
- if (!fd_request) {
- fd_request = set_next_request();
- if (!fd_request)
- goto the_end;
- }
-
- floppy = fd_request->rq_disk->private_data;
- drive = floppy - unit;
- type = floppy->type;
-
if (!UD.connected) {
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
fd_end_request_cur(BLK_STS_IOERR);
- goto repeat;
+ goto out;
}
if (type == 0) {
@@ -1462,23 +1514,18 @@ repeat:
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
- goto repeat;
+ goto out;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
- goto repeat;
+ goto out;
}
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
set_capacity(floppy->disk, UDT->blocks);
UD.autoprobe = 0;
}
-
- if (blk_rq_pos(fd_request) + 1 > UDT->blocks) {
- fd_end_request_cur(BLK_STS_IOERR);
- goto repeat;
- }
/* stop deselect timer */
del_timer( &motor_off_timer );
@@ -1490,22 +1537,13 @@ repeat:
setup_req_params( drive );
do_fd_action( drive );
- return;
-
- the_end:
- finish_fdc();
-}
-
-
-void do_fd_request(struct request_queue * q)
-{
- DPRINT(("do_fd_request for pid %d\n",current->pid));
- wait_event(fdc_wait, cmpxchg(&fdc_busy, 0, 1) == 0);
- stdma_lock(floppy_irq, NULL);
-
- atari_disable_irq( IRQ_MFP_FDC );
- redo_fd_request();
+ if (bd->last)
+ finish_fdc();
atari_enable_irq( IRQ_MFP_FDC );
+
+out:
+ spin_unlock_irq(&ataflop_lock);
+ return BLK_STS_OK;
}
static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
@@ -1583,7 +1621,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* what if type > 0 here? Overwrite specified entry ? */
if (type) {
/* refuse to re-set a predefined type for now */
- redo_fd_request();
return -EINVAL;
}
@@ -1651,10 +1688,8 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* sanity check */
if (setprm.track != dtp->blocks/dtp->spt/2 ||
- setprm.head != 2) {
- redo_fd_request();
+ setprm.head != 2)
return -EINVAL;
- }
UDT = dtp;
set_capacity(floppy->disk, UDT->blocks);
@@ -1910,6 +1945,10 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
+static const struct blk_mq_ops ataflop_mq_ops = {
+ .queue_rq = ataflop_queue_rq,
+};
+
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
{
int drive = *part & 3;
@@ -1923,6 +1962,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data)
static int __init atari_floppy_init (void)
{
int i;
+ int ret;
if (!MACH_IS_ATARI)
/* Amiga, Mac, ... don't have Atari-compatible floppy :-) */
@@ -1933,8 +1973,19 @@ static int __init atari_floppy_init (void)
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].disk = alloc_disk(1);
- if (!unit[i].disk)
- goto Enomem;
+ if (!unit[i].disk) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ unit[i].disk->queue = blk_mq_init_sq_queue(&unit[i].tag_set,
+ &ataflop_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(unit[i].disk->queue)) {
+ ret = PTR_ERR(unit[i].disk->queue);
+ unit[i].disk->queue = NULL;
+ goto err;
+ }
}
if (UseTrackbuffer < 0)
@@ -1951,7 +2002,8 @@ static int __init atari_floppy_init (void)
DMABuffer = atari_stram_alloc(BUFFER_SIZE+512, "ataflop");
if (!DMABuffer) {
printk(KERN_ERR "atari_floppy_init: cannot get dma buffer\n");
- goto Enomem;
+ ret = -ENOMEM;
+ goto err;
}
TrackBuffer = DMABuffer + 512;
PhysDMABuffer = atari_stram_to_phys(DMABuffer);
@@ -1966,10 +2018,6 @@ static int __init atari_floppy_init (void)
sprintf(unit[i].disk->disk_name, "fd%d", i);
unit[i].disk->fops = &floppy_fops;
unit[i].disk->private_data = &unit[i];
- unit[i].disk->queue = blk_init_queue(do_fd_request,
- &ataflop_lock);
- if (!unit[i].disk->queue)
- goto Enomem;
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
add_disk(unit[i].disk);
}
@@ -1983,17 +2031,23 @@ static int __init atari_floppy_init (void)
config_types();
return 0;
-Enomem:
- while (i--) {
- struct request_queue *q = unit[i].disk->queue;
- put_disk(unit[i].disk);
- if (q)
- blk_cleanup_queue(q);
- }
+err:
+ do {
+ struct gendisk *disk = unit[i].disk;
+
+ if (disk) {
+ if (disk->queue) {
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+ }
+ blk_mq_free_tag_set(&unit[i].tag_set);
+ put_disk(unit[i].disk);
+ }
+ } while (i--);
unregister_blkdev(FLOPPY_MAJOR, "fd");
- return -ENOMEM;
+ return ret;
}
#ifndef MODULE
@@ -2040,11 +2094,10 @@ static void __exit atari_floppy_exit(void)
int i;
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
for (i = 0; i < FD_MAX_UNITS; i++) {
- struct request_queue *q = unit[i].disk->queue;
-
del_gendisk(unit[i].disk);
+ blk_cleanup_queue(unit[i].disk->queue);
+ blk_mq_free_tag_set(&unit[i].tag_set);
put_disk(unit[i].disk);
- blk_cleanup_queue(q);
}
unregister_blkdev(FLOPPY_MAJOR, "fd");
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index 87aab6910d2d..52d885cdccb5 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -11,7 +11,6 @@ config BLK_DEV_DRBD
depends on PROC_FS && INET
select LRU_CACHE
select LIBCRC32C
- default n
help
NOTE: In order to authenticate connections you have to select
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index e35a234b0a8f..1e47db57b9d2 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -429,7 +429,7 @@ enum {
__EE_CALL_AL_COMPLETE_IO,
__EE_MAY_SET_IN_SYNC,
- /* is this a TRIM aka REQ_DISCARD? */
+ /* is this a TRIM aka REQ_OP_DISCARD? */
__EE_IS_TRIM,
/* In case a barrier failed,
@@ -724,10 +724,10 @@ struct drbd_connection {
struct list_head transfer_log; /* all requests not yet fully processed */
struct crypto_shash *cram_hmac_tfm;
- struct crypto_ahash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
- struct crypto_ahash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
- struct crypto_ahash *csums_tfm;
- struct crypto_ahash *verify_tfm;
+ struct crypto_shash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
+ struct crypto_shash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
+ struct crypto_shash *csums_tfm;
+ struct crypto_shash *verify_tfm;
void *int_dig_in;
void *int_dig_vv;
@@ -1531,8 +1531,9 @@ static inline void ov_out_of_sync_print(struct drbd_device *device)
}
-extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
-extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
+extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
+extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
+ void *);
/* worker callbacks */
extern int w_e_end_data_req(struct drbd_work *, int);
extern int w_e_end_rsdata_req(struct drbd_work *, int);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index ef8212a4b73e..55fd104f1ed4 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1377,7 +1377,7 @@ void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd
struct p_data *dp, int data_size)
{
if (peer_device->connection->peer_integrity_tfm)
- data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
+ data_size -= crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
_drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id);
}
@@ -1673,7 +1673,7 @@ static u32 bio_flags_to_wire(struct drbd_connection *connection,
return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
}
-/* Used to send write or TRIM aka REQ_DISCARD requests
+/* Used to send write or TRIM aka REQ_OP_DISCARD requests
* R_PRIMARY -> Peer (P_DATA, P_TRIM)
*/
int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
@@ -1690,7 +1690,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
sock = &peer_device->connection->data;
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
- crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
+ crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -1796,7 +1796,7 @@ int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
p = drbd_prepare_command(peer_device, sock);
digest_size = peer_device->connection->integrity_tfm ?
- crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
+ crypto_shash_digestsize(peer_device->connection->integrity_tfm) : 0;
if (!p)
return -EIO;
@@ -2557,11 +2557,11 @@ void conn_free_crypto(struct drbd_connection *connection)
{
drbd_free_sock(connection);
- crypto_free_ahash(connection->csums_tfm);
- crypto_free_ahash(connection->verify_tfm);
+ crypto_free_shash(connection->csums_tfm);
+ crypto_free_shash(connection->verify_tfm);
crypto_free_shash(connection->cram_hmac_tfm);
- crypto_free_ahash(connection->integrity_tfm);
- crypto_free_ahash(connection->peer_integrity_tfm);
+ crypto_free_shash(connection->integrity_tfm);
+ crypto_free_shash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index b4f02768ba47..d15703b1ffe8 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2303,10 +2303,10 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c
}
struct crypto {
- struct crypto_ahash *verify_tfm;
- struct crypto_ahash *csums_tfm;
+ struct crypto_shash *verify_tfm;
+ struct crypto_shash *csums_tfm;
struct crypto_shash *cram_hmac_tfm;
- struct crypto_ahash *integrity_tfm;
+ struct crypto_shash *integrity_tfm;
};
static int
@@ -2324,36 +2324,21 @@ alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
return NO_ERROR;
}
-static int
-alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg)
-{
- if (!tfm_name[0])
- return NO_ERROR;
-
- *tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(*tfm)) {
- *tfm = NULL;
- return err_alg;
- }
-
- return NO_ERROR;
-}
-
static enum drbd_ret_code
alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
{
char hmac_name[CRYPTO_MAX_ALG_NAME];
enum drbd_ret_code rv;
- rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg,
+ rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
ERR_CSUMS_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg,
+ rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
ERR_VERIFY_ALG);
if (rv != NO_ERROR)
return rv;
- rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
+ rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
ERR_INTEGRITY_ALG);
if (rv != NO_ERROR)
return rv;
@@ -2371,9 +2356,9 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
static void free_crypto(struct crypto *crypto)
{
crypto_free_shash(crypto->cram_hmac_tfm);
- crypto_free_ahash(crypto->integrity_tfm);
- crypto_free_ahash(crypto->csums_tfm);
- crypto_free_ahash(crypto->verify_tfm);
+ crypto_free_shash(crypto->integrity_tfm);
+ crypto_free_shash(crypto->csums_tfm);
+ crypto_free_shash(crypto->verify_tfm);
}
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
@@ -2450,17 +2435,17 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(connection->net_conf, new_net_conf);
if (!rsr) {
- crypto_free_ahash(connection->csums_tfm);
+ crypto_free_shash(connection->csums_tfm);
connection->csums_tfm = crypto.csums_tfm;
crypto.csums_tfm = NULL;
}
if (!ovr) {
- crypto_free_ahash(connection->verify_tfm);
+ crypto_free_shash(connection->verify_tfm);
connection->verify_tfm = crypto.verify_tfm;
crypto.verify_tfm = NULL;
}
- crypto_free_ahash(connection->integrity_tfm);
+ crypto_free_shash(connection->integrity_tfm);
connection->integrity_tfm = crypto.integrity_tfm;
if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
/* Do this without trying to take connection->data.mutex again. */
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
index c3081f93051c..48dabbb21e11 100644
--- a/drivers/block/drbd/drbd_protocol.h
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -57,7 +57,7 @@ enum drbd_packet {
P_PROTOCOL_UPDATE = 0x2d, /* data sock: is used in established connections */
/* 0x2e to 0x30 reserved, used in drbd 9 */
- /* REQ_DISCARD. We used "discard" in different contexts before,
+ /* REQ_OP_DISCARD. We used "discard" in different contexts before,
* which is why I chose TRIM here, to disambiguate. */
P_TRIM = 0x31,
@@ -126,7 +126,7 @@ struct p_header100 {
#define DP_UNPLUG 8 /* not used anymore */
#define DP_FUA 16 /* equals REQ_FUA */
#define DP_FLUSH 32 /* equals REQ_PREFLUSH */
-#define DP_DISCARD 64 /* equals REQ_DISCARD */
+#define DP_DISCARD 64 /* equals REQ_OP_DISCARD */
#define DP_SEND_RECEIVE_ACK 128 /* This is a proto B write request */
#define DP_SEND_WRITE_ACK 256 /* This is a proto C write request */
#define DP_WSAME 512 /* equiv. REQ_WRITE_SAME */
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 75f6b47169e6..fc67fd853375 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1732,7 +1732,7 @@ static int receive_Barrier(struct drbd_connection *connection, struct packet_inf
}
/* quick wrapper in case payload size != request_size (write same) */
-static void drbd_csum_ee_size(struct crypto_ahash *h,
+static void drbd_csum_ee_size(struct crypto_shash *h,
struct drbd_peer_request *r, void *d,
unsigned int payload_size)
{
@@ -1769,7 +1769,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
digest_size = 0;
if (!trim && peer_device->connection->peer_integrity_tfm) {
- digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
/*
* FIXME: Receive the incoming digest into the receive buffer
* here, together with its struct p_data?
@@ -1905,7 +1905,7 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
digest_size = 0;
if (peer_device->connection->peer_integrity_tfm) {
- digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->peer_integrity_tfm);
err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
if (err)
return err;
@@ -3542,7 +3542,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
int p_proto, p_discard_my_data, p_two_primaries, cf;
struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
char integrity_alg[SHARED_SECRET_MAX] = "";
- struct crypto_ahash *peer_integrity_tfm = NULL;
+ struct crypto_shash *peer_integrity_tfm = NULL;
void *int_dig_in = NULL, *int_dig_vv = NULL;
p_proto = be32_to_cpu(p->protocol);
@@ -3623,7 +3623,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
* change.
*/
- peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+ peer_integrity_tfm = crypto_alloc_shash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(peer_integrity_tfm)) {
peer_integrity_tfm = NULL;
drbd_err(connection, "peer data-integrity-alg %s not supported\n",
@@ -3631,7 +3631,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
goto disconnect;
}
- hash_size = crypto_ahash_digestsize(peer_integrity_tfm);
+ hash_size = crypto_shash_digestsize(peer_integrity_tfm);
int_dig_in = kmalloc(hash_size, GFP_KERNEL);
int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
if (!(int_dig_in && int_dig_vv)) {
@@ -3661,7 +3661,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
mutex_unlock(&connection->resource->conf_update);
mutex_unlock(&connection->data.mutex);
- crypto_free_ahash(connection->peer_integrity_tfm);
+ crypto_free_shash(connection->peer_integrity_tfm);
kfree(connection->int_dig_in);
kfree(connection->int_dig_vv);
connection->peer_integrity_tfm = peer_integrity_tfm;
@@ -3679,7 +3679,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
disconnect_rcu_unlock:
rcu_read_unlock();
disconnect:
- crypto_free_ahash(peer_integrity_tfm);
+ crypto_free_shash(peer_integrity_tfm);
kfree(int_dig_in);
kfree(int_dig_vv);
conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
@@ -3691,15 +3691,16 @@ disconnect:
* return: NULL (alg name was "")
* ERR_PTR(error) if something goes wrong
* or the crypto hash ptr, if it worked out ok. */
-static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
+static struct crypto_shash *drbd_crypto_alloc_digest_safe(
+ const struct drbd_device *device,
const char *alg, const char *name)
{
- struct crypto_ahash *tfm;
+ struct crypto_shash *tfm;
if (!alg[0])
return NULL;
- tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_shash(alg, 0, 0);
if (IS_ERR(tfm)) {
drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
alg, name, PTR_ERR(tfm));
@@ -3752,8 +3753,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
struct drbd_device *device;
struct p_rs_param_95 *p;
unsigned int header_size, data_size, exp_max_sz;
- struct crypto_ahash *verify_tfm = NULL;
- struct crypto_ahash *csums_tfm = NULL;
+ struct crypto_shash *verify_tfm = NULL;
+ struct crypto_shash *csums_tfm = NULL;
struct net_conf *old_net_conf, *new_net_conf = NULL;
struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
const int apv = connection->agreed_pro_version;
@@ -3900,14 +3901,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
if (verify_tfm) {
strcpy(new_net_conf->verify_alg, p->verify_alg);
new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
- crypto_free_ahash(peer_device->connection->verify_tfm);
+ crypto_free_shash(peer_device->connection->verify_tfm);
peer_device->connection->verify_tfm = verify_tfm;
drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
}
if (csums_tfm) {
strcpy(new_net_conf->csums_alg, p->csums_alg);
new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
- crypto_free_ahash(peer_device->connection->csums_tfm);
+ crypto_free_shash(peer_device->connection->csums_tfm);
peer_device->connection->csums_tfm = csums_tfm;
drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
}
@@ -3951,9 +3952,9 @@ disconnect:
mutex_unlock(&connection->resource->conf_update);
/* just for completeness: actually not needed,
* as this is not reached if csums_tfm was ok. */
- crypto_free_ahash(csums_tfm);
+ crypto_free_shash(csums_tfm);
/* but free the verify_tfm again, if csums_tfm did not work out */
- crypto_free_ahash(verify_tfm);
+ crypto_free_shash(verify_tfm);
conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return -EIO;
}
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 19cac36e9737..1c4da17e902e 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -650,7 +650,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
case DISCARD_COMPLETED_NOTSUPP:
case DISCARD_COMPLETED_WITH_ERROR:
/* I'd rather not detach from local disk just because it
- * failed a REQ_DISCARD. */
+ * failed a REQ_OP_DISCARD. */
mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
break;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index b8f77e83d456..99255d0c9e2f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -152,7 +152,7 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
do_wake = list_empty(block_id == ID_SYNCER ? &device->sync_ee : &device->active_ee);
- /* FIXME do we want to detach for failed REQ_DISCARD?
+ /* FIXME do we want to detach for failed REQ_OP_DISCARD?
* ((peer_req->flags & (EE_WAS_ERROR|EE_IS_TRIM)) == EE_WAS_ERROR) */
if (peer_req->flags & EE_WAS_ERROR)
__drbd_chk_io_error(device, DRBD_WRITE_ERROR);
@@ -295,60 +295,61 @@ void drbd_request_endio(struct bio *bio)
complete_master_bio(device, &m);
}
-void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest)
+void drbd_csum_ee(struct crypto_shash *tfm, struct drbd_peer_request *peer_req, void *digest)
{
- AHASH_REQUEST_ON_STACK(req, tfm);
- struct scatterlist sg;
+ SHASH_DESC_ON_STACK(desc, tfm);
struct page *page = peer_req->pages;
struct page *tmp;
unsigned len;
+ void *src;
- ahash_request_set_tfm(req, tfm);
- ahash_request_set_callback(req, 0, NULL, NULL);
+ desc->tfm = tfm;
+ desc->flags = 0;
- sg_init_table(&sg, 1);
- crypto_ahash_init(req);
+ crypto_shash_init(desc);
+ src = kmap_atomic(page);
while ((tmp = page_chain_next(page))) {
/* all but the last page will be fully used */
- sg_set_page(&sg, page, PAGE_SIZE, 0);
- ahash_request_set_crypt(req, &sg, NULL, sg.length);
- crypto_ahash_update(req);
+ crypto_shash_update(desc, src, PAGE_SIZE);
+ kunmap_atomic(src);
page = tmp;
+ src = kmap_atomic(page);
}
/* and now the last, possibly only partially used page */
len = peer_req->i.size & (PAGE_SIZE - 1);
- sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
- ahash_request_set_crypt(req, &sg, digest, sg.length);
- crypto_ahash_finup(req);
- ahash_request_zero(req);
+ crypto_shash_update(desc, src, len ?: PAGE_SIZE);
+ kunmap_atomic(src);
+
+ crypto_shash_final(desc, digest);
+ shash_desc_zero(desc);
}
-void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
+void drbd_csum_bio(struct crypto_shash *tfm, struct bio *bio, void *digest)
{
- AHASH_REQUEST_ON_STACK(req, tfm);
- struct scatterlist sg;
+ SHASH_DESC_ON_STACK(desc, tfm);
struct bio_vec bvec;
struct bvec_iter iter;
- ahash_request_set_tfm(req, tfm);
- ahash_request_set_callback(req, 0, NULL, NULL);
+ desc->tfm = tfm;
+ desc->flags = 0;
- sg_init_table(&sg, 1);
- crypto_ahash_init(req);
+ crypto_shash_init(desc);
bio_for_each_segment(bvec, bio, iter) {
- sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
- ahash_request_set_crypt(req, &sg, NULL, sg.length);
- crypto_ahash_update(req);
+ u8 *src;
+
+ src = kmap_atomic(bvec.bv_page);
+ crypto_shash_update(desc, src + bvec.bv_offset, bvec.bv_len);
+ kunmap_atomic(src);
+
/* REQ_OP_WRITE_SAME has only one segment,
* checksum the payload only once. */
if (bio_op(bio) == REQ_OP_WRITE_SAME)
break;
}
- ahash_request_set_crypt(req, NULL, digest, 0);
- crypto_ahash_final(req);
- ahash_request_zero(req);
+ crypto_shash_final(desc, digest);
+ shash_desc_zero(desc);
}
/* MAYBE merge common code with w_e_end_ov_req */
@@ -367,7 +368,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out;
- digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
sector_t sector = peer_req->i.sector;
@@ -1205,7 +1206,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
* a real fix would be much more involved,
* introducing more locking mechanisms */
if (peer_device->connection->csums_tfm) {
- digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->csums_tfm);
D_ASSERT(device, digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO);
}
@@ -1255,7 +1256,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
if (unlikely(cancel))
goto out;
- digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) {
err = 1; /* terminate the connection in case the allocation failed */
@@ -1327,7 +1328,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
- digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
+ digest_size = crypto_shash_digestsize(peer_device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO);
if (digest) {
drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 48f622728ce6..a8cfa011c284 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -252,13 +252,13 @@ static int allowed_drive_mask = 0x33;
static int irqdma_allocated;
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/cdrom.h> /* for the compatibility eject ioctl */
#include <linux/completion.h>
+static LIST_HEAD(floppy_reqs);
static struct request *current_req;
-static void do_fd_request(struct request_queue *q);
static int set_next_request(void);
#ifndef fd_get_dma_residue
@@ -414,10 +414,10 @@ static struct floppy_drive_struct drive_state[N_DRIVE];
static struct floppy_write_errors write_errors[N_DRIVE];
static struct timer_list motor_off_timer[N_DRIVE];
static struct gendisk *disks[N_DRIVE];
+static struct blk_mq_tag_set tag_sets[N_DRIVE];
static struct block_device *opened_bdev[N_DRIVE];
static DEFINE_MUTEX(open_lock);
static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
-static int fdc_queue;
/*
* This struct defines the different floppy types.
@@ -2216,8 +2216,9 @@ static void floppy_end_request(struct request *req, blk_status_t error)
/* current_count_sectors can be zero if transfer failed */
if (error)
nr_sectors = blk_rq_cur_sectors(req);
- if (__blk_end_request(req, error, nr_sectors << 9))
+ if (blk_update_request(req, error, nr_sectors << 9))
return;
+ __blk_mq_end_request(req, error);
/* We're done with the request */
floppy_off(drive);
@@ -2797,27 +2798,14 @@ static int make_raw_rw_request(void)
return 2;
}
-/*
- * Round-robin between our available drives, doing one request from each
- */
static int set_next_request(void)
{
- struct request_queue *q;
- int old_pos = fdc_queue;
-
- do {
- q = disks[fdc_queue]->queue;
- if (++fdc_queue == N_DRIVE)
- fdc_queue = 0;
- if (q) {
- current_req = blk_fetch_request(q);
- if (current_req) {
- current_req->error_count = 0;
- break;
- }
- }
- } while (fdc_queue != old_pos);
-
+ current_req = list_first_entry_or_null(&floppy_reqs, struct request,
+ queuelist);
+ if (current_req) {
+ current_req->error_count = 0;
+ list_del_init(&current_req->queuelist);
+ }
return current_req != NULL;
}
@@ -2901,29 +2889,38 @@ static void process_fd_request(void)
schedule_bh(redo_fd_request);
}
-static void do_fd_request(struct request_queue *q)
+static blk_status_t floppy_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ blk_mq_start_request(bd->rq);
+
if (WARN(max_buffer_sectors == 0,
"VFS: %s called on non-open device\n", __func__))
- return;
+ return BLK_STS_IOERR;
if (WARN(atomic_read(&usage_count) == 0,
"warning: usage count=0, current_req=%p sect=%ld flags=%llx\n",
current_req, (long)blk_rq_pos(current_req),
(unsigned long long) current_req->cmd_flags))
- return;
+ return BLK_STS_IOERR;
+
+ spin_lock_irq(&floppy_lock);
+ list_add_tail(&bd->rq->queuelist, &floppy_reqs);
+ spin_unlock_irq(&floppy_lock);
if (test_and_set_bit(0, &fdc_busy)) {
/* fdc busy, this new request will be treated when the
current one is done */
is_alive(__func__, "old request running");
- return;
+ return BLK_STS_OK;
}
+
command_status = FD_COMMAND_NONE;
__reschedule_timeout(MAXTIMEOUT, "fd_request");
set_fdc(0);
process_fd_request();
is_alive(__func__, "");
+ return BLK_STS_OK;
}
static const struct cont_t poll_cont = {
@@ -3467,6 +3464,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
(struct floppy_struct **)&outparam);
if (ret)
return ret;
+ memcpy(&inparam.g, outparam,
+ offsetof(struct floppy_struct, name));
+ outparam = &inparam.g;
break;
case FDMSGON:
UDP->flags |= FTD_MSG;
@@ -4483,6 +4483,10 @@ static struct platform_driver floppy_driver = {
},
};
+static const struct blk_mq_ops floppy_mq_ops = {
+ .queue_rq = floppy_queue_rq,
+};
+
static struct platform_device floppy_device[N_DRIVE];
static bool floppy_available(int drive)
@@ -4530,9 +4534,12 @@ static int __init do_floppy_init(void)
goto out_put_disk;
}
- disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock);
- if (!disks[drive]->queue) {
- err = -ENOMEM;
+ disks[drive]->queue = blk_mq_init_sq_queue(&tag_sets[drive],
+ &floppy_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(disks[drive]->queue)) {
+ err = PTR_ERR(disks[drive]->queue);
+ disks[drive]->queue = NULL;
goto out_put_disk;
}
@@ -4676,7 +4683,7 @@ static int __init do_floppy_init(void)
/* to be cleaned up... */
disks[drive]->private_data = (void *)(long)drive;
disks[drive]->flags |= GENHD_FL_REMOVABLE;
- device_add_disk(&floppy_device[drive].dev, disks[drive]);
+ device_add_disk(&floppy_device[drive].dev, disks[drive], NULL);
}
return 0;
@@ -4705,6 +4712,7 @@ out_put_disk:
del_timer_sync(&motor_off_timer[drive]);
blk_cleanup_queue(disks[drive]->queue);
disks[drive]->queue = NULL;
+ blk_mq_free_tag_set(&tag_sets[drive]);
}
put_disk(disks[drive]);
}
@@ -4932,6 +4940,7 @@ static void __exit floppy_module_exit(void)
platform_device_unregister(&floppy_device[drive]);
}
blk_cleanup_queue(disks[drive]->queue);
+ blk_mq_free_tag_set(&tag_sets[drive]);
/*
* These disks have not called add_disk(). Don't put down
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ea9debf59b22..abad6d15f956 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -77,6 +77,7 @@
#include <linux/falloc.h>
#include <linux/uio.h>
#include <linux/ioprio.h>
+#include <linux/blk-cgroup.h>
#include "loop.h"
@@ -1760,8 +1761,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* always use the first bio's css */
#ifdef CONFIG_BLK_CGROUP
- if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
- cmd->css = rq->bio->bi_css;
+ if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
+ cmd->css = &bio_blkcg(rq->bio)->css;
css_get(cmd->css);
} else
#endif
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index d0666f5ce003..dfc8de6ce525 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1862,11 +1862,9 @@ static int exec_drive_taskfile(struct driver_data *dd,
if (IS_ERR(outbuf))
return PTR_ERR(outbuf);
- outbuf_dma = pci_map_single(dd->pdev,
- outbuf,
- taskout,
- DMA_TO_DEVICE);
- if (pci_dma_mapping_error(dd->pdev, outbuf_dma)) {
+ outbuf_dma = dma_map_single(&dd->pdev->dev, outbuf,
+ taskout, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pdev->dev, outbuf_dma)) {
err = -ENOMEM;
goto abort;
}
@@ -1880,10 +1878,9 @@ static int exec_drive_taskfile(struct driver_data *dd,
inbuf = NULL;
goto abort;
}
- inbuf_dma = pci_map_single(dd->pdev,
- inbuf,
- taskin, DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(dd->pdev, inbuf_dma)) {
+ inbuf_dma = dma_map_single(&dd->pdev->dev, inbuf,
+ taskin, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&dd->pdev->dev, inbuf_dma)) {
err = -ENOMEM;
goto abort;
}
@@ -2002,11 +1999,11 @@ static int exec_drive_taskfile(struct driver_data *dd,
/* reclaim the DMA buffers.*/
if (inbuf_dma)
- pci_unmap_single(dd->pdev, inbuf_dma,
- taskin, DMA_FROM_DEVICE);
+ dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
+ DMA_FROM_DEVICE);
if (outbuf_dma)
- pci_unmap_single(dd->pdev, outbuf_dma,
- taskout, DMA_TO_DEVICE);
+ dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
+ DMA_TO_DEVICE);
inbuf_dma = 0;
outbuf_dma = 0;
@@ -2053,11 +2050,11 @@ static int exec_drive_taskfile(struct driver_data *dd,
}
abort:
if (inbuf_dma)
- pci_unmap_single(dd->pdev, inbuf_dma,
- taskin, DMA_FROM_DEVICE);
+ dma_unmap_single(&dd->pdev->dev, inbuf_dma, taskin,
+ DMA_FROM_DEVICE);
if (outbuf_dma)
- pci_unmap_single(dd->pdev, outbuf_dma,
- taskout, DMA_TO_DEVICE);
+ dma_unmap_single(&dd->pdev->dev, outbuf_dma, taskout,
+ DMA_TO_DEVICE);
kfree(outbuf);
kfree(inbuf);
@@ -3861,7 +3858,7 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
- device_add_disk(&dd->pdev->dev, dd->disk);
+ device_add_disk(&dd->pdev->dev, dd->disk, NULL);
dd->bdev = bdget_disk(dd->disk, 0);
/*
@@ -4216,18 +4213,10 @@ static int mtip_pci_probe(struct pci_dev *pdev,
goto iomap_err;
}
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-
- if (rv) {
- rv = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- if (rv) {
- dev_warn(&pdev->dev,
- "64-bit DMA enable failed\n");
- goto setmask_err;
- }
- }
+ rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rv) {
+ dev_warn(&pdev->dev, "64-bit DMA enable failed\n");
+ goto setmask_err;
}
/* Copy the info we may need later into the private data structure. */
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index d81781f22dba..34e0030f0592 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -87,10 +87,10 @@ struct nullb {
#ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
-blk_status_t null_zone_report(struct nullb *nullb,
- struct nullb_cmd *cmd);
-void null_zone_write(struct nullb_cmd *cmd);
-void null_zone_reset(struct nullb_cmd *cmd);
+blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
+void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int nr_sectors);
+void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
#else
static inline int null_zone_init(struct nullb_device *dev)
{
@@ -98,11 +98,14 @@ static inline int null_zone_init(struct nullb_device *dev)
}
static inline void null_zone_exit(struct nullb_device *dev) {}
static inline blk_status_t null_zone_report(struct nullb *nullb,
- struct nullb_cmd *cmd)
+ struct bio *bio)
{
return BLK_STS_NOTSUPP;
}
-static inline void null_zone_write(struct nullb_cmd *cmd) {}
-static inline void null_zone_reset(struct nullb_cmd *cmd) {}
+static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int nr_sectors)
+{
+}
+static inline void null_zone_reset(struct nullb_cmd *cmd, sector_t sector) {}
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* __NULL_BLK_H */
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 6127e3ff7b4b..e94591021682 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -606,20 +606,12 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
- struct request_queue *q = NULL;
int queue_mode = cmd->nq->dev->queue_mode;
- if (cmd->rq)
- q = cmd->rq->q;
-
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, cmd->error);
return;
- case NULL_Q_RQ:
- INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, cmd->error);
- break;
case NULL_Q_BIO:
cmd->bio->bi_status = cmd->error;
bio_endio(cmd->bio);
@@ -627,15 +619,6 @@ static void end_cmd(struct nullb_cmd *cmd)
}
free_cmd(cmd);
-
- /* Restart queue if needed, as we are freeing a tag */
- if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue_async(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -1136,25 +1119,33 @@ static void null_stop_queue(struct nullb *nullb)
if (nullb->dev->queue_mode == NULL_Q_MQ)
blk_mq_stop_hw_queues(q);
- else {
- spin_lock_irq(q->queue_lock);
- blk_stop_queue(q);
- spin_unlock_irq(q->queue_lock);
- }
}
static void null_restart_queue_async(struct nullb *nullb)
{
struct request_queue *q = nullb->q;
- unsigned long flags;
if (nullb->dev->queue_mode == NULL_Q_MQ)
blk_mq_start_stopped_hw_queues(q, true);
- else {
- spin_lock_irqsave(q->queue_lock, flags);
- blk_start_queue_async(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+
+ if (dev->queue_mode == NULL_Q_BIO) {
+ if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
+ cmd->error = null_zone_report(nullb, cmd->bio);
+ return true;
+ }
+ } else {
+ if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
+ cmd->error = null_zone_report(nullb, cmd->rq->bio);
+ return true;
+ }
}
+
+ return false;
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
@@ -1163,10 +1154,8 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
struct nullb *nullb = dev->nullb;
int err = 0;
- if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
- cmd->error = null_zone_report(nullb, cmd);
+ if (cmd_report_zone(nullb, cmd))
goto out;
- }
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq;
@@ -1180,17 +1169,8 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
/* race with timer */
if (atomic_long_read(&nullb->cur_bytes) > 0)
null_restart_queue_async(nullb);
- if (dev->queue_mode == NULL_Q_RQ) {
- struct request_queue *q = nullb->q;
-
- spin_lock_irq(q->queue_lock);
- rq->rq_flags |= RQF_DONTPREP;
- blk_requeue_request(q, rq);
- spin_unlock_irq(q->queue_lock);
- return BLK_STS_OK;
- } else
- /* requeue request */
- return BLK_STS_DEV_RESOURCE;
+ /* requeue request */
+ return BLK_STS_DEV_RESOURCE;
}
}
@@ -1234,10 +1214,24 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
cmd->error = errno_to_blk_status(err);
if (!cmd->error && dev->zoned) {
- if (req_op(cmd->rq) == REQ_OP_WRITE)
- null_zone_write(cmd);
- else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET)
- null_zone_reset(cmd);
+ sector_t sector;
+ unsigned int nr_sectors;
+ int op;
+
+ if (dev->queue_mode == NULL_Q_BIO) {
+ op = bio_op(cmd->bio);
+ sector = cmd->bio->bi_iter.bi_sector;
+ nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
+ } else {
+ op = req_op(cmd->rq);
+ sector = blk_rq_pos(cmd->rq);
+ nr_sectors = blk_rq_sectors(cmd->rq);
+ }
+
+ if (op == REQ_OP_WRITE)
+ null_zone_write(cmd, sector, nr_sectors);
+ else if (op == REQ_OP_ZONE_RESET)
+ null_zone_reset(cmd, sector);
}
out:
/* Complete IO by inline, softirq or timer */
@@ -1247,9 +1241,6 @@ out:
case NULL_Q_MQ:
blk_mq_complete_request(cmd->rq);
break;
- case NULL_Q_RQ:
- blk_complete_request(cmd->rq);
- break;
case NULL_Q_BIO:
/*
* XXX: no proper submitting cpu information available.
@@ -1318,30 +1309,6 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
-static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
-{
- pr_info("null: rq %p timed out\n", rq);
- __blk_complete_request(rq);
- return BLK_EH_DONE;
-}
-
-static int null_rq_prep_fn(struct request_queue *q, struct request *req)
-{
- struct nullb *nullb = q->queuedata;
- struct nullb_queue *nq = nullb_to_queue(nullb);
- struct nullb_cmd *cmd;
-
- cmd = alloc_cmd(nq, 0);
- if (cmd) {
- cmd->rq = req;
- req->special = cmd;
- return BLKPREP_OK;
- }
- blk_stop_queue(q);
-
- return BLKPREP_DEFER;
-}
-
static bool should_timeout_request(struct request *rq)
{
#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
@@ -1360,27 +1327,6 @@ static bool should_requeue_request(struct request *rq)
return false;
}
-static void null_request_fn(struct request_queue *q)
-{
- struct request *rq;
-
- while ((rq = blk_fetch_request(q)) != NULL) {
- struct nullb_cmd *cmd = rq->special;
-
- /* just ignore the request */
- if (should_timeout_request(rq))
- continue;
- if (should_requeue_request(rq)) {
- blk_requeue_request(q, rq);
- continue;
- }
-
- spin_unlock_irq(q->queue_lock);
- null_handle_cmd(cmd);
- spin_lock_irq(q->queue_lock);
- }
-}
-
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
pr_info("null: rq %p timed out\n", rq);
@@ -1735,24 +1681,6 @@ static int null_add_dev(struct nullb_device *dev)
rv = init_driver_queues(nullb);
if (rv)
goto out_cleanup_blk_queue;
- } else {
- nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
- dev->home_node);
- if (!nullb->q) {
- rv = -ENOMEM;
- goto out_cleanup_queues;
- }
-
- if (!null_setup_fault())
- goto out_cleanup_blk_queue;
-
- blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
- blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
- blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn);
- nullb->q->rq_timeout = 5 * HZ;
- rv = init_driver_queues(nullb);
- if (rv)
- goto out_cleanup_blk_queue;
}
if (dev->mbps) {
@@ -1834,6 +1762,10 @@ static int __init null_init(void)
return -EINVAL;
}
+ if (g_queue_mode == NULL_Q_RQ) {
+ pr_err("null_blk: legacy IO path no longer available\n");
+ return -EINVAL;
+ }
if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
if (g_submit_queues != nr_online_nodes) {
pr_warn("null_blk: submit_queues param is set to %u.\n",
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index a979ca00d7be..7c6b86d98700 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -48,8 +48,8 @@ void null_zone_exit(struct nullb_device *dev)
kvfree(dev->zones);
}
-static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
- unsigned int zno, unsigned int nr_zones)
+static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
+ unsigned int zno, unsigned int nr_zones)
{
struct blk_zone_report_hdr *hdr = NULL;
struct bio_vec bvec;
@@ -57,7 +57,7 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
void *addr;
unsigned int zones_to_cpy;
- bio_for_each_segment(bvec, rq->bio, iter) {
+ bio_for_each_segment(bvec, bio, iter) {
addr = kmap_atomic(bvec.bv_page);
zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
@@ -84,29 +84,24 @@ static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq,
}
}
-blk_status_t null_zone_report(struct nullb *nullb,
- struct nullb_cmd *cmd)
+blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
{
struct nullb_device *dev = nullb->dev;
- struct request *rq = cmd->rq;
- unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+ unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
unsigned int nr_zones = dev->nr_zones - zno;
- unsigned int max_zones = (blk_rq_bytes(rq) /
- sizeof(struct blk_zone)) - 1;
+ unsigned int max_zones;
+ max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
nr_zones = min_t(unsigned int, nr_zones, max_zones);
-
- null_zone_fill_rq(nullb->dev, rq, zno, nr_zones);
+ null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
return BLK_STS_OK;
}
-void null_zone_write(struct nullb_cmd *cmd)
+void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
+ unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
- struct request *rq = cmd->rq;
- sector_t sector = blk_rq_pos(rq);
- unsigned int rq_sectors = blk_rq_sectors(rq);
unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
@@ -118,7 +113,7 @@ void null_zone_write(struct nullb_cmd *cmd)
case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN:
/* Writes must be at the write pointer position */
- if (blk_rq_pos(rq) != zone->wp) {
+ if (sector != zone->wp) {
cmd->error = BLK_STS_IOERR;
break;
}
@@ -126,7 +121,7 @@ void null_zone_write(struct nullb_cmd *cmd)
if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->cond = BLK_ZONE_COND_IMP_OPEN;
- zone->wp += rq_sectors;
+ zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->len)
zone->cond = BLK_ZONE_COND_FULL;
break;
@@ -137,11 +132,10 @@ void null_zone_write(struct nullb_cmd *cmd)
}
}
-void null_zone_reset(struct nullb_cmd *cmd)
+void null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
{
struct nullb_device *dev = cmd->nq->dev;
- struct request *rq = cmd->rq;
- unsigned int zno = null_zone_no(dev, blk_rq_pos(rq));
+ unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno];
zone->cond = BLK_ZONE_COND_EMPTY;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index a026211afb51..96670eefaeb2 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -137,7 +137,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
#include <linux/delay.h>
#include <linux/cdrom.h>
#include <linux/spinlock.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
@@ -186,7 +186,8 @@ static int pcd_packet(struct cdrom_device_info *cdi,
static int pcd_detect(void);
static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
-static void do_pcd_request(struct request_queue * q);
+static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd);
static void do_pcd_read(void);
struct pcd_unit {
@@ -199,6 +200,8 @@ struct pcd_unit {
char *name; /* pcd0, pcd1, etc */
struct cdrom_device_info info; /* uniform cdrom interface */
struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
+ struct list_head rq_list;
};
static struct pcd_unit pcd[PCD_UNITS];
@@ -292,6 +295,10 @@ static const struct cdrom_device_ops pcd_dops = {
CDC_CD_RW,
};
+static const struct blk_mq_ops pcd_mq_ops = {
+ .queue_rq = pcd_queue_rq,
+};
+
static void pcd_init_units(void)
{
struct pcd_unit *cd;
@@ -300,13 +307,19 @@ static void pcd_init_units(void)
pcd_drive_count = 0;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
struct gendisk *disk = alloc_disk(1);
+
if (!disk)
continue;
- disk->queue = blk_init_queue(do_pcd_request, &pcd_lock);
- if (!disk->queue) {
- put_disk(disk);
+
+ disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
+ 1, BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(disk->queue)) {
+ disk->queue = NULL;
continue;
}
+
+ INIT_LIST_HEAD(&cd->rq_list);
+ disk->queue->queuedata = cd;
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
cd->disk = disk;
cd->pi = &cd->pia;
@@ -748,18 +761,18 @@ static int pcd_queue;
static int set_next_request(void)
{
struct pcd_unit *cd;
- struct request_queue *q;
int old_pos = pcd_queue;
do {
cd = &pcd[pcd_queue];
- q = cd->present ? cd->disk->queue : NULL;
if (++pcd_queue == PCD_UNITS)
pcd_queue = 0;
- if (q) {
- pcd_req = blk_fetch_request(q);
- if (pcd_req)
- break;
+ if (cd->present && !list_empty(&cd->rq_list)) {
+ pcd_req = list_first_entry(&cd->rq_list, struct request,
+ queuelist);
+ list_del_init(&pcd_req->queuelist);
+ blk_mq_start_request(pcd_req);
+ break;
}
} while (pcd_queue != old_pos);
@@ -768,33 +781,41 @@ static int set_next_request(void)
static void pcd_request(void)
{
+ struct pcd_unit *cd;
+
if (pcd_busy)
return;
- while (1) {
- if (!pcd_req && !set_next_request())
- return;
- if (rq_data_dir(pcd_req) == READ) {
- struct pcd_unit *cd = pcd_req->rq_disk->private_data;
- if (cd != pcd_current)
- pcd_bufblk = -1;
- pcd_current = cd;
- pcd_sector = blk_rq_pos(pcd_req);
- pcd_count = blk_rq_cur_sectors(pcd_req);
- pcd_buf = bio_data(pcd_req->bio);
- pcd_busy = 1;
- ps_set_intr(do_pcd_read, NULL, 0, nice);
- return;
- } else {
- __blk_end_request_all(pcd_req, BLK_STS_IOERR);
- pcd_req = NULL;
- }
- }
+ if (!pcd_req && !set_next_request())
+ return;
+
+ cd = pcd_req->rq_disk->private_data;
+ if (cd != pcd_current)
+ pcd_bufblk = -1;
+ pcd_current = cd;
+ pcd_sector = blk_rq_pos(pcd_req);
+ pcd_count = blk_rq_cur_sectors(pcd_req);
+ pcd_buf = bio_data(pcd_req->bio);
+ pcd_busy = 1;
+ ps_set_intr(do_pcd_read, NULL, 0, nice);
}
-static void do_pcd_request(struct request_queue *q)
+static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct pcd_unit *cd = hctx->queue->queuedata;
+
+ if (rq_data_dir(bd->rq) != READ) {
+ blk_mq_start_request(bd->rq);
+ return BLK_STS_IOERR;
+ }
+
+ spin_lock_irq(&pcd_lock);
+ list_add_tail(&bd->rq->queuelist, &cd->rq_list);
pcd_request();
+ spin_unlock_irq(&pcd_lock);
+
+ return BLK_STS_OK;
}
static inline void next_request(blk_status_t err)
@@ -802,8 +823,10 @@ static inline void next_request(blk_status_t err)
unsigned long saved_flags;
spin_lock_irqsave(&pcd_lock, saved_flags);
- if (!__blk_end_request_cur(pcd_req, err))
+ if (!blk_update_request(pcd_req, err, blk_rq_cur_bytes(pcd_req))) {
+ __blk_mq_end_request(pcd_req, err);
pcd_req = NULL;
+ }
pcd_busy = 0;
pcd_request();
spin_unlock_irqrestore(&pcd_lock, saved_flags);
@@ -1011,6 +1034,7 @@ static void __exit pcd_exit(void)
unregister_cdrom(&cd->info);
}
blk_cleanup_queue(cd->disk->queue);
+ blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
}
unregister_blkdev(major, name);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 7cf947586fe4..ae4971e5d9a8 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -151,7 +151,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
#include <linux/delay.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h> /* for the eject ioctl */
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -236,6 +236,8 @@ struct pd_unit {
int alt_geom;
char name[PD_NAMELEN]; /* pda, pdb, etc ... */
struct gendisk *gd;
+ struct blk_mq_tag_set tag_set;
+ struct list_head rq_list;
};
static struct pd_unit pd[PD_UNITS];
@@ -399,9 +401,17 @@ static int set_next_request(void)
if (++pd_queue == PD_UNITS)
pd_queue = 0;
if (q) {
- pd_req = blk_fetch_request(q);
- if (pd_req)
- break;
+ struct pd_unit *disk = q->queuedata;
+
+ if (list_empty(&disk->rq_list))
+ continue;
+
+ pd_req = list_first_entry(&disk->rq_list,
+ struct request,
+ queuelist);
+ list_del_init(&pd_req->queuelist);
+ blk_mq_start_request(pd_req);
+ break;
}
} while (pd_queue != old_pos);
@@ -412,7 +422,6 @@ static void run_fsm(void)
{
while (1) {
enum action res;
- unsigned long saved_flags;
int stop = 0;
if (!phase) {
@@ -433,19 +442,24 @@ static void run_fsm(void)
}
switch(res = phase()) {
- case Ok: case Fail:
+ case Ok: case Fail: {
+ blk_status_t err;
+
+ err = res == Ok ? 0 : BLK_STS_IOERR;
pi_disconnect(pi_current);
pd_claimed = 0;
phase = NULL;
- spin_lock_irqsave(&pd_lock, saved_flags);
- if (!__blk_end_request_cur(pd_req,
- res == Ok ? 0 : BLK_STS_IOERR)) {
- if (!set_next_request())
- stop = 1;
+ spin_lock_irq(&pd_lock);
+ if (!blk_update_request(pd_req, err,
+ blk_rq_cur_bytes(pd_req))) {
+ __blk_mq_end_request(pd_req, err);
+ pd_req = NULL;
+ stop = !set_next_request();
}
- spin_unlock_irqrestore(&pd_lock, saved_flags);
+ spin_unlock_irq(&pd_lock);
if (stop)
return;
+ }
/* fall through */
case Hold:
schedule_fsm();
@@ -505,11 +519,17 @@ static int pd_next_buf(void)
if (pd_count)
return 0;
spin_lock_irqsave(&pd_lock, saved_flags);
- __blk_end_request_cur(pd_req, 0);
- pd_count = blk_rq_cur_sectors(pd_req);
- pd_buf = bio_data(pd_req->bio);
+ if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) {
+ __blk_mq_end_request(pd_req, 0);
+ pd_req = NULL;
+ pd_count = 0;
+ pd_buf = NULL;
+ } else {
+ pd_count = blk_rq_cur_sectors(pd_req);
+ pd_buf = bio_data(pd_req->bio);
+ }
spin_unlock_irqrestore(&pd_lock, saved_flags);
- return 0;
+ return !pd_count;
}
static unsigned long pd_timeout;
@@ -726,15 +746,21 @@ static enum action pd_identify(struct pd_unit *disk)
/* end of io request engine */
-static void do_pd_request(struct request_queue * q)
+static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- if (pd_req)
- return;
- pd_req = blk_fetch_request(q);
- if (!pd_req)
- return;
+ struct pd_unit *disk = hctx->queue->queuedata;
+
+ spin_lock_irq(&pd_lock);
+ if (!pd_req) {
+ pd_req = bd->rq;
+ blk_mq_start_request(pd_req);
+ } else
+ list_add_tail(&bd->rq->queuelist, &disk->rq_list);
+ spin_unlock_irq(&pd_lock);
- schedule_fsm();
+ run_fsm();
+ return BLK_STS_OK;
}
static int pd_special_command(struct pd_unit *disk,
@@ -847,23 +873,33 @@ static const struct block_device_operations pd_fops = {
/* probing */
+static const struct blk_mq_ops pd_mq_ops = {
+ .queue_rq = pd_queue_rq,
+};
+
static void pd_probe_drive(struct pd_unit *disk)
{
- struct gendisk *p = alloc_disk(1 << PD_BITS);
+ struct gendisk *p;
+
+ p = alloc_disk(1 << PD_BITS);
if (!p)
return;
+
strcpy(p->disk_name, disk->name);
p->fops = &pd_fops;
p->major = major;
p->first_minor = (disk - pd) << PD_BITS;
disk->gd = p;
p->private_data = disk;
- p->queue = blk_init_queue(do_pd_request, &pd_lock);
- if (!p->queue) {
- disk->gd = NULL;
- put_disk(p);
+
+ p->queue = blk_mq_init_sq_queue(&disk->tag_set, &pd_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ if (IS_ERR(p->queue)) {
+ p->queue = NULL;
return;
}
+
+ p->queue->queuedata = disk;
blk_queue_max_hw_sectors(p->queue, cluster);
blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
@@ -895,6 +931,7 @@ static int pd_detect(void)
disk->standby = parm[D_SBY];
if (parm[D_PRT])
pd_drive_count++;
+ INIT_LIST_HEAD(&disk->rq_list);
}
par_drv = pi_register_driver(name);
@@ -972,6 +1009,7 @@ static void __exit pd_exit(void)
disk->gd = NULL;
del_gendisk(p);
blk_cleanup_queue(p->queue);
+ blk_mq_free_tag_set(&disk->tag_set);
put_disk(p);
pi_release(disk->pi);
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index eef7a91f667d..e92e7a8eeeb2 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -152,7 +152,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/spinlock.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
@@ -206,7 +206,8 @@ module_param_array(drive3, int, NULL, 0);
#define ATAPI_WRITE_10 0x2a
static int pf_open(struct block_device *bdev, fmode_t mode);
-static void do_pf_request(struct request_queue * q);
+static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd);
static int pf_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
@@ -238,6 +239,8 @@ struct pf_unit {
int present; /* device present ? */
char name[PF_NAMELEN]; /* pf0, pf1, ... */
struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
+ struct list_head rq_list;
};
static struct pf_unit units[PF_UNITS];
@@ -277,6 +280,10 @@ static const struct block_device_operations pf_fops = {
.check_events = pf_check_events,
};
+static const struct blk_mq_ops pf_mq_ops = {
+ .queue_rq = pf_queue_rq,
+};
+
static void __init pf_init_units(void)
{
struct pf_unit *pf;
@@ -284,14 +291,22 @@ static void __init pf_init_units(void)
pf_drive_count = 0;
for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
- struct gendisk *disk = alloc_disk(1);
+ struct gendisk *disk;
+
+ disk = alloc_disk(1);
if (!disk)
continue;
- disk->queue = blk_init_queue(do_pf_request, &pf_spin_lock);
- if (!disk->queue) {
+
+ disk->queue = blk_mq_init_sq_queue(&pf->tag_set, &pf_mq_ops,
+ 1, BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(disk->queue)) {
put_disk(disk);
- return;
+ disk->queue = NULL;
+ continue;
}
+
+ INIT_LIST_HEAD(&pf->rq_list);
+ disk->queue->queuedata = pf;
blk_queue_max_segments(disk->queue, cluster);
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
pf->disk = disk;
@@ -784,18 +799,18 @@ static int pf_queue;
static int set_next_request(void)
{
struct pf_unit *pf;
- struct request_queue *q;
int old_pos = pf_queue;
do {
pf = &units[pf_queue];
- q = pf->present ? pf->disk->queue : NULL;
if (++pf_queue == PF_UNITS)
pf_queue = 0;
- if (q) {
- pf_req = blk_fetch_request(q);
- if (pf_req)
- break;
+ if (pf->present && !list_empty(&pf->rq_list)) {
+ pf_req = list_first_entry(&pf->rq_list, struct request,
+ queuelist);
+ list_del_init(&pf_req->queuelist);
+ blk_mq_start_request(pf_req);
+ break;
}
} while (pf_queue != old_pos);
@@ -804,8 +819,12 @@ static int set_next_request(void)
static void pf_end_request(blk_status_t err)
{
- if (pf_req && !__blk_end_request_cur(pf_req, err))
+ if (!pf_req)
+ return;
+ if (!blk_update_request(pf_req, err, blk_rq_cur_bytes(pf_req))) {
+ __blk_mq_end_request(pf_req, err);
pf_req = NULL;
+ }
}
static void pf_request(void)
@@ -842,9 +861,17 @@ repeat:
}
}
-static void do_pf_request(struct request_queue *q)
+static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct pf_unit *pf = hctx->queue->queuedata;
+
+ spin_lock_irq(&pf_spin_lock);
+ list_add_tail(&bd->rq->queuelist, &pf->rq_list);
pf_request();
+ spin_unlock_irq(&pf_spin_lock);
+
+ return BLK_STS_OK;
}
static int pf_next_buf(void)
@@ -1024,6 +1051,7 @@ static void __exit pf_exit(void)
continue;
del_gendisk(pf->disk);
blk_cleanup_queue(pf->disk->queue);
+ blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
pi_release(pf->pi);
}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 6f1d25c1eb64..9381f4e3b221 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2645,7 +2645,7 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
*/
if (pd->refcnt == 1)
pkt_lock_door(pd, 0);
- /* fallthru */
+ /* fall through */
/*
* forward selected CDROM ioctls to CD-ROM, for UDF
*/
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index afe1508d82c6..4e1d9b31f60c 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -19,7 +19,7 @@
*/
#include <linux/ata.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -42,6 +42,7 @@
struct ps3disk_private {
spinlock_t lock; /* Request queue spinlock */
struct request_queue *queue;
+ struct blk_mq_tag_set tag_set;
struct gendisk *gendisk;
unsigned int blocking_factor;
struct request *req;
@@ -118,8 +119,8 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
}
}
-static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
- struct request *req)
+static blk_status_t ps3disk_submit_request_sg(struct ps3_storage_device *dev,
+ struct request *req)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
int write = rq_data_dir(req), res;
@@ -158,16 +159,15 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__,
__LINE__, op, res);
- __blk_end_request_all(req, BLK_STS_IOERR);
- return 0;
+ return BLK_STS_IOERR;
}
priv->req = req;
- return 1;
+ return BLK_STS_OK;
}
-static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
- struct request *req)
+static blk_status_t ps3disk_submit_flush_request(struct ps3_storage_device *dev,
+ struct request *req)
{
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
u64 res;
@@ -180,50 +180,45 @@ static int ps3disk_submit_flush_request(struct ps3_storage_device *dev,
if (res) {
dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n",
__func__, __LINE__, res);
- __blk_end_request_all(req, BLK_STS_IOERR);
- return 0;
+ return BLK_STS_IOERR;
}
priv->req = req;
- return 1;
+ return BLK_STS_OK;
}
-static void ps3disk_do_request(struct ps3_storage_device *dev,
- struct request_queue *q)
+static blk_status_t ps3disk_do_request(struct ps3_storage_device *dev,
+ struct request *req)
{
- struct request *req;
-
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
- while ((req = blk_fetch_request(q))) {
- switch (req_op(req)) {
- case REQ_OP_FLUSH:
- if (ps3disk_submit_flush_request(dev, req))
- return;
- break;
- case REQ_OP_READ:
- case REQ_OP_WRITE:
- if (ps3disk_submit_request_sg(dev, req))
- return;
- break;
- default:
- blk_dump_rq_flags(req, DEVICE_NAME " bad request");
- __blk_end_request_all(req, BLK_STS_IOERR);
- }
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
+ return ps3disk_submit_flush_request(dev, req);
+ case REQ_OP_READ:
+ case REQ_OP_WRITE:
+ return ps3disk_submit_request_sg(dev, req);
+ default:
+ blk_dump_rq_flags(req, DEVICE_NAME " bad request");
+ return BLK_STS_IOERR;
}
}
-static void ps3disk_request(struct request_queue *q)
+static blk_status_t ps3disk_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct request_queue *q = hctx->queue;
struct ps3_storage_device *dev = q->queuedata;
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
+ blk_status_t ret;
- if (priv->req) {
- dev_dbg(&dev->sbd.core, "%s:%u busy\n", __func__, __LINE__);
- return;
- }
+ blk_mq_start_request(bd->rq);
+
+ spin_lock_irq(&priv->lock);
+ ret = ps3disk_do_request(dev, bd->rq);
+ spin_unlock_irq(&priv->lock);
- ps3disk_do_request(dev, q);
+ return ret;
}
static irqreturn_t ps3disk_interrupt(int irq, void *data)
@@ -280,11 +275,11 @@ static irqreturn_t ps3disk_interrupt(int irq, void *data)
}
spin_lock(&priv->lock);
- __blk_end_request_all(req, error);
priv->req = NULL;
- ps3disk_do_request(dev, priv->queue);
+ blk_mq_end_request(req, error);
spin_unlock(&priv->lock);
+ blk_mq_run_hw_queues(priv->queue, true);
return IRQ_HANDLED;
}
@@ -404,6 +399,10 @@ static unsigned long ps3disk_mask;
static DEFINE_MUTEX(ps3disk_mask_mutex);
+static const struct blk_mq_ops ps3disk_mq_ops = {
+ .queue_rq = ps3disk_queue_rq,
+};
+
static int ps3disk_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
@@ -454,11 +453,12 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
ps3disk_identify(dev);
- queue = blk_init_queue(ps3disk_request, &priv->lock);
- if (!queue) {
- dev_err(&dev->sbd.core, "%s:%u: blk_init_queue failed\n",
+ queue = blk_mq_init_sq_queue(&priv->tag_set, &ps3disk_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(queue)) {
+ dev_err(&dev->sbd.core, "%s:%u: blk_mq_init_queue failed\n",
__func__, __LINE__);
- error = -ENOMEM;
+ error = PTR_ERR(queue);
goto fail_teardown;
}
@@ -500,11 +500,12 @@ static int ps3disk_probe(struct ps3_system_bus_device *_dev)
gendisk->disk_name, priv->model, priv->raw_capacity >> 11,
get_capacity(gendisk) >> 11);
- device_add_disk(&dev->sbd.core, gendisk);
+ device_add_disk(&dev->sbd.core, gendisk, NULL);
return 0;
fail_cleanup_queue:
blk_cleanup_queue(queue);
+ blk_mq_free_tag_set(&priv->tag_set);
fail_teardown:
ps3stor_teardown(dev);
fail_free_bounce:
@@ -530,6 +531,7 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
mutex_unlock(&ps3disk_mask_mutex);
del_gendisk(priv->gendisk);
blk_cleanup_queue(priv->queue);
+ blk_mq_free_tag_set(&priv->tag_set);
put_disk(priv->gendisk);
dev_notice(&dev->sbd.core, "Synchronizing disk cache\n");
ps3disk_sync_cache(dev);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 1e3d5de9d838..c0c50816a10b 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -769,7 +769,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
- device_add_disk(&dev->core, gendisk);
+ device_add_disk(&dev->core, gendisk, NULL);
return 0;
fail_cleanup_queue:
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index f2c631ce793c..639051502181 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -782,7 +782,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
pci_set_master(dev);
pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
- st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
+ st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
if (st) {
dev_err(CARD_TO_DEV(card),
"No usable DMA configuration,aborting\n");
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index c148e83e4ed7..d9a8758682c9 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -276,7 +276,7 @@ static void creg_cmd_done(struct work_struct *work)
st = -EIO;
}
- if ((cmd->op == CREG_OP_READ)) {
+ if (cmd->op == CREG_OP_READ) {
unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
/* Paranoid Sanity Checks */
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 1a92f9e65937..3894aa0f350b 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -226,7 +226,7 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
- device_add_disk(CARD_TO_DEV(card), card->gendisk);
+ device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
card->bdev_attached = 1;
}
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 8fbc1bf6db3d..af9cf0215164 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -224,12 +224,12 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
{
if (dma->cmd != HW_CMD_BLK_DISCARD) {
- if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
- pci_unmap_page(ctrl->card->dev, dma->dma_addr,
+ if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
+ dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr,
get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
+ DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
}
}
@@ -438,23 +438,23 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
if (dma->cmd != HW_CMD_BLK_DISCARD) {
if (dma->cmd == HW_CMD_BLK_WRITE)
- dir = PCI_DMA_TODEVICE;
+ dir = DMA_TO_DEVICE;
else
- dir = PCI_DMA_FROMDEVICE;
+ dir = DMA_FROM_DEVICE;
/*
- * The function pci_map_page is placed here because we
+ * The function dma_map_page is placed here because we
* can only, by design, issue up to 255 commands to the
* hardware at one time per DMA channel. So the maximum
* amount of mapped memory would be 255 * 4 channels *
* 4096 Bytes which is less than 2GB, the limit of a x8
- * Non-HWWD PCIe slot. This way the pci_map_page
+ * Non-HWWD PCIe slot. This way the dma_map_page
* function should never fail because of a lack of
* mappable memory.
*/
- dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
+ dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page,
dma->pg_off, dma->sub_page.cnt << 9, dir);
- if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
+ if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
push_tracker(ctrl->trackers, tag);
rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
continue;
@@ -776,10 +776,10 @@ bvec_err:
/*----------------- DMA Engine Initialization & Setup -------------------*/
int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
{
- ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
- &ctrl->status.dma_addr);
- ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
- &ctrl->cmd.dma_addr);
+ ctrl->status.buf = dma_alloc_coherent(&dev->dev, STATUS_BUFFER_SIZE8,
+ &ctrl->status.dma_addr, GFP_KERNEL);
+ ctrl->cmd.buf = dma_alloc_coherent(&dev->dev, COMMAND_BUFFER_SIZE8,
+ &ctrl->cmd.dma_addr, GFP_KERNEL);
if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
return -ENOMEM;
@@ -962,12 +962,12 @@ failed_dma_setup:
vfree(ctrl->trackers);
if (ctrl->status.buf)
- pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
- ctrl->status.buf,
- ctrl->status.dma_addr);
+ dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
+ ctrl->status.buf,
+ ctrl->status.dma_addr);
if (ctrl->cmd.buf)
- pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
- ctrl->cmd.buf, ctrl->cmd.dma_addr);
+ dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
+ ctrl->cmd.buf, ctrl->cmd.dma_addr);
}
return st;
@@ -1023,10 +1023,10 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
vfree(ctrl->trackers);
- pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
- ctrl->status.buf, ctrl->status.dma_addr);
- pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
- ctrl->cmd.buf, ctrl->cmd.dma_addr);
+ dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
+ ctrl->status.buf, ctrl->status.dma_addr);
+ dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
+ ctrl->cmd.buf, ctrl->cmd.dma_addr);
}
}
@@ -1059,11 +1059,11 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
card->ctrl[i].stats.reads_issued--;
if (dma->cmd != HW_CMD_BLK_DISCARD) {
- pci_unmap_page(card->dev, dma->dma_addr,
+ dma_unmap_page(&card->dev->dev, dma->dma_addr,
get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
- PCI_DMA_TODEVICE :
- PCI_DMA_FROMDEVICE);
+ DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
}
list_add_tail(&dma->list, &issued_dmas[i]);
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 87b9e7fbf062..7c5fc6942f32 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -632,7 +632,7 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
* Map scatterlist to PCI bus addresses.
* Note PCI might change the number of entries.
*/
- n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
+ n_sg = dma_map_sg(&skdev->pdev->dev, sgl, n_sg, skreq->data_dir);
if (n_sg <= 0)
return false;
@@ -682,7 +682,8 @@ static void skd_postop_sg_list(struct skd_device *skdev,
skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
skreq->sksg_dma_address +
((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
- pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
+ dma_unmap_sg(&skdev->pdev->dev, &skreq->sg[0], skreq->n_sg,
+ skreq->data_dir);
}
/*
@@ -1416,7 +1417,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_BUSY_IMMINENT:
skd_log_skreq(skdev, skreq, "retry(busy)");
- blk_requeue_request(skdev->queue, req);
+ blk_mq_requeue_request(req, true);
dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
skdev->timer_countdown = SKD_TIMER_MINUTES(20);
@@ -1426,7 +1427,7 @@ static void skd_resolve_req_exception(struct skd_device *skdev,
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
skd_log_skreq(skdev, skreq, "retry");
- blk_requeue_request(skdev->queue, req);
+ blk_mq_requeue_request(req, true);
break;
}
/* fall through */
@@ -2632,8 +2633,8 @@ static int skd_cons_skcomp(struct skd_device *skdev)
"comp pci_alloc, total bytes %zd entries %d\n",
SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
- skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
- &skdev->cq_dma_address);
+ skcomp = dma_zalloc_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
+ &skdev->cq_dma_address, GFP_KERNEL);
if (skcomp == NULL) {
rc = -ENOMEM;
@@ -2674,10 +2675,10 @@ static int skd_cons_skmsg(struct skd_device *skdev)
skmsg->id = i + SKD_ID_FIT_MSG;
- skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
- SKD_N_FITMSG_BYTES,
- &skmsg->mb_dma_address);
-
+ skmsg->msg_buf = dma_alloc_coherent(&skdev->pdev->dev,
+ SKD_N_FITMSG_BYTES,
+ &skmsg->mb_dma_address,
+ GFP_KERNEL);
if (skmsg->msg_buf == NULL) {
rc = -ENOMEM;
goto err_out;
@@ -2971,8 +2972,8 @@ err_out:
static void skd_free_skcomp(struct skd_device *skdev)
{
if (skdev->skcomp_table)
- pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
- skdev->skcomp_table, skdev->cq_dma_address);
+ dma_free_coherent(&skdev->pdev->dev, SKD_SKCOMP_SIZE,
+ skdev->skcomp_table, skdev->cq_dma_address);
skdev->skcomp_table = NULL;
skdev->cq_dma_address = 0;
@@ -2991,8 +2992,8 @@ static void skd_free_skmsg(struct skd_device *skdev)
skmsg = &skdev->skmsg_table[i];
if (skmsg->msg_buf != NULL) {
- pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
- skmsg->msg_buf,
+ dma_free_coherent(&skdev->pdev->dev, SKD_N_FITMSG_BYTES,
+ skmsg->msg_buf,
skmsg->mb_dma_address);
}
skmsg->msg_buf = NULL;
@@ -3104,7 +3105,7 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
{
dev_dbg(&skdev->pdev->dev, "add_disk\n");
- device_add_disk(parent, skdev->disk);
+ device_add_disk(parent, skdev->disk, NULL);
return 0;
}
@@ -3172,18 +3173,12 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!rc) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- dev_err(&pdev->dev, "consistent DMA mask error %d\n",
- rc);
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(&pdev->dev, "DMA mask error %d\n", rc);
- goto err_out_regions;
- }
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&pdev->dev, "DMA mask error %d\n", rc);
+ goto err_out_regions;
}
if (!skd_major) {
@@ -3367,20 +3362,12 @@ static int skd_pci_resume(struct pci_dev *pdev)
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!rc) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
-
- dev_err(&pdev->dev, "consistent DMA mask error %d\n",
- rc);
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
-
- dev_err(&pdev->dev, "DMA mask error %d\n", rc);
- goto err_out_regions;
- }
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(&pdev->dev, "DMA mask error %d\n", rc);
+ goto err_out_regions;
}
pci_set_master(pdev);
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 5ca56bfae63c..b54fa6726303 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -36,6 +36,10 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define VDC_TX_RING_SIZE 512
#define VDC_DEFAULT_BLK_SIZE 512
+#define MAX_XFER_BLKS (128 * 1024)
+#define MAX_XFER_SIZE (MAX_XFER_BLKS / VDC_DEFAULT_BLK_SIZE)
+#define MAX_RING_COOKIES ((MAX_XFER_BLKS / PAGE_SIZE) + 2)
+
#define WAITING_FOR_LINK_UP 0x01
#define WAITING_FOR_TX_SPACE 0x02
#define WAITING_FOR_GEN_CMD 0x04
@@ -450,7 +454,7 @@ static int __send_request(struct request *req)
{
struct vdc_port *port = req->rq_disk->private_data;
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
- struct scatterlist sg[port->ring_cookies];
+ struct scatterlist sg[MAX_RING_COOKIES];
struct vdc_req_entry *rqe;
struct vio_disk_desc *desc;
unsigned int map_perm;
@@ -458,6 +462,9 @@ static int __send_request(struct request *req)
u64 len;
u8 op;
+ if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES))
+ return -EINVAL;
+
map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
if (rq_data_dir(req) == READ) {
@@ -850,7 +857,7 @@ static int probe_disk(struct vdc_port *port)
port->vdisk_size, (port->vdisk_size >> (20 - 9)),
port->vio.ver.major, port->vio.ver.minor);
- device_add_disk(&port->vio.vdev->dev, g);
+ device_add_disk(&port->vio.vdev->dev, g, NULL);
return 0;
}
@@ -984,9 +991,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto err_out_free_port;
port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE;
- port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
- port->ring_cookies = ((port->max_xfer_size *
- port->vdisk_block_size) / PAGE_SIZE) + 2;
+ port->max_xfer_size = MAX_XFER_SIZE;
+ port->ring_cookies = MAX_RING_COOKIES;
err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
if (err)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 0e31884a9519..3fa6fcc34790 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -19,7 +19,7 @@
#include <linux/module.h>
#include <linux/fd.h>
#include <linux/slab.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
#include <linux/kernel.h>
@@ -190,6 +190,7 @@ struct floppy_state {
int ref_count;
struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
/* parent controller */
@@ -211,7 +212,6 @@ enum head {
struct swim_priv {
struct swim __iomem *base;
spinlock_t lock;
- int fdc_queue;
int floppy_count;
struct floppy_state unit[FD_MAX_UNIT];
};
@@ -525,58 +525,36 @@ static blk_status_t floppy_read_sectors(struct floppy_state *fs,
return 0;
}
-static struct request *swim_next_request(struct swim_priv *swd)
+static blk_status_t swim_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request_queue *q;
- struct request *rq;
- int old_pos = swd->fdc_queue;
+ struct floppy_state *fs = hctx->queue->queuedata;
+ struct swim_priv *swd = fs->swd;
+ struct request *req = bd->rq;
+ blk_status_t err;
- do {
- q = swd->unit[swd->fdc_queue].disk->queue;
- if (++swd->fdc_queue == swd->floppy_count)
- swd->fdc_queue = 0;
- if (q) {
- rq = blk_fetch_request(q);
- if (rq)
- return rq;
- }
- } while (swd->fdc_queue != old_pos);
+ if (!spin_trylock_irq(&swd->lock))
+ return BLK_STS_DEV_RESOURCE;
- return NULL;
-}
+ blk_mq_start_request(req);
-static void do_fd_request(struct request_queue *q)
-{
- struct swim_priv *swd = q->queuedata;
- struct request *req;
- struct floppy_state *fs;
+ if (!fs->disk_in || rq_data_dir(req) == WRITE) {
+ err = BLK_STS_IOERR;
+ goto out;
+ }
- req = swim_next_request(swd);
- while (req) {
- blk_status_t err = BLK_STS_IOERR;
+ do {
+ err = floppy_read_sectors(fs, blk_rq_pos(req),
+ blk_rq_cur_sectors(req),
+ bio_data(req->bio));
+ } while (blk_update_request(req, err, blk_rq_cur_bytes(req)));
+ __blk_mq_end_request(req, err);
- fs = req->rq_disk->private_data;
- if (blk_rq_pos(req) >= fs->total_secs)
- goto done;
- if (!fs->disk_in)
- goto done;
- if (rq_data_dir(req) == WRITE && fs->write_protected)
- goto done;
+ err = BLK_STS_OK;
+out:
+ spin_unlock_irq(&swd->lock);
+ return err;
- switch (rq_data_dir(req)) {
- case WRITE:
- /* NOT IMPLEMENTED */
- break;
- case READ:
- err = floppy_read_sectors(fs, blk_rq_pos(req),
- blk_rq_cur_sectors(req),
- bio_data(req->bio));
- break;
- }
- done:
- if (!__blk_end_request_cur(req, err))
- req = swim_next_request(swd);
- }
}
static struct floppy_struct floppy_type[4] = {
@@ -823,6 +801,10 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
return 0;
}
+static const struct blk_mq_ops swim_mq_ops = {
+ .queue_rq = swim_queue_rq,
+};
+
static int swim_floppy_init(struct swim_priv *swd)
{
int err;
@@ -852,20 +834,25 @@ static int swim_floppy_init(struct swim_priv *swd)
spin_lock_init(&swd->lock);
for (drive = 0; drive < swd->floppy_count; drive++) {
+ struct request_queue *q;
+
swd->unit[drive].disk = alloc_disk(1);
if (swd->unit[drive].disk == NULL) {
err = -ENOMEM;
goto exit_put_disks;
}
- swd->unit[drive].disk->queue = blk_init_queue(do_fd_request,
- &swd->lock);
- if (!swd->unit[drive].disk->queue) {
- err = -ENOMEM;
+
+ q = blk_mq_init_sq_queue(&swd->unit[drive].tag_set, &swim_mq_ops,
+ 2, BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(q)) {
+ err = PTR_ERR(q);
goto exit_put_disks;
}
+
+ swd->unit[drive].disk->queue = q;
blk_queue_bounce_limit(swd->unit[drive].disk->queue,
BLK_BOUNCE_HIGH);
- swd->unit[drive].disk->queue->queuedata = swd;
+ swd->unit[drive].disk->queue->queuedata = &swd->unit[drive];
swd->unit[drive].swd = swd;
}
@@ -887,8 +874,18 @@ static int swim_floppy_init(struct swim_priv *swd)
exit_put_disks:
unregister_blkdev(FLOPPY_MAJOR, "fd");
- while (drive--)
- put_disk(swd->unit[drive].disk);
+ do {
+ struct gendisk *disk = swd->unit[drive].disk;
+
+ if (disk) {
+ if (disk->queue) {
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+ }
+ blk_mq_free_tag_set(&swd->unit[drive].tag_set);
+ put_disk(disk);
+ }
+ } while (drive--);
return err;
}
@@ -961,6 +958,7 @@ static int swim_remove(struct platform_device *dev)
for (drive = 0; drive < swd->floppy_count; drive++) {
del_gendisk(swd->unit[drive].disk);
blk_cleanup_queue(swd->unit[drive].disk->queue);
+ blk_mq_free_tag_set(&swd->unit[drive].tag_set);
put_disk(swd->unit[drive].disk);
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 469541c1e51e..c1c676a33e4a 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -25,7 +25,7 @@
#include <linux/delay.h>
#include <linux/fd.h>
#include <linux/ioctl.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/module.h>
@@ -206,6 +206,7 @@ struct floppy_state {
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
int index;
struct request *cur_req;
+ struct blk_mq_tag_set tag_set;
};
#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
@@ -260,16 +261,15 @@ static int floppy_revalidate(struct gendisk *disk);
static bool swim3_end_request(struct floppy_state *fs, blk_status_t err, unsigned int nr_bytes)
{
struct request *req = fs->cur_req;
- int rc;
swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
err, nr_bytes, req);
if (err)
nr_bytes = blk_rq_cur_bytes(req);
- rc = __blk_end_request(req, err, nr_bytes);
- if (rc)
+ if (blk_update_request(req, err, nr_bytes))
return true;
+ __blk_mq_end_request(req, err);
fs->cur_req = NULL;
return false;
}
@@ -309,86 +309,58 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
-static void start_request(struct floppy_state *fs)
+static blk_status_t swim3_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request *req;
+ struct floppy_state *fs = hctx->queue->queuedata;
+ struct request *req = bd->rq;
unsigned long x;
- swim3_dbg("start request, initial state=%d\n", fs->state);
-
- if (fs->state == idle && fs->wanted) {
- fs->state = available;
- wake_up(&fs->wait);
- return;
+ spin_lock_irq(&swim3_lock);
+ if (fs->cur_req || fs->state != idle) {
+ spin_unlock_irq(&swim3_lock);
+ return BLK_STS_DEV_RESOURCE;
}
- while (fs->state == idle) {
- swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
- if (!fs->cur_req) {
- fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
- swim3_dbg(" fetched request %p\n", fs->cur_req);
- if (!fs->cur_req)
- break;
- }
- req = fs->cur_req;
-
- if (fs->mdev->media_bay &&
- check_media_bay(fs->mdev->media_bay) != MB_FD) {
- swim3_dbg("%s", " media bay absent, dropping req\n");
- swim3_end_request(fs, BLK_STS_IOERR, 0);
- continue;
- }
-
-#if 0 /* This is really too verbose */
- swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
- req->rq_disk->disk_name, req->cmd,
- (long)blk_rq_pos(req), blk_rq_sectors(req),
- bio_data(req->bio));
- swim3_dbg(" current_nr_sectors=%u\n",
- blk_rq_cur_sectors(req));
-#endif
-
- if (blk_rq_pos(req) >= fs->total_secs) {
- swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
- (long)blk_rq_pos(req), (long)fs->total_secs);
- swim3_end_request(fs, BLK_STS_IOERR, 0);
- continue;
- }
- if (fs->ejected) {
- swim3_dbg("%s", " disk ejected\n");
+ blk_mq_start_request(req);
+ fs->cur_req = req;
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD) {
+ swim3_dbg("%s", " media bay absent, dropping req\n");
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
+ goto out;
+ }
+ if (fs->ejected) {
+ swim3_dbg("%s", " disk ejected\n");
+ swim3_end_request(fs, BLK_STS_IOERR, 0);
+ goto out;
+ }
+ if (rq_data_dir(req) == WRITE) {
+ if (fs->write_prot < 0)
+ fs->write_prot = swim3_readbit(fs, WRITE_PROT);
+ if (fs->write_prot) {
+ swim3_dbg("%s", " try to write, disk write protected\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
- continue;
+ goto out;
}
-
- if (rq_data_dir(req) == WRITE) {
- if (fs->write_prot < 0)
- fs->write_prot = swim3_readbit(fs, WRITE_PROT);
- if (fs->write_prot) {
- swim3_dbg("%s", " try to write, disk write protected\n");
- swim3_end_request(fs, BLK_STS_IOERR, 0);
- continue;
- }
- }
-
- /* Do not remove the cast. blk_rq_pos(req) is now a
- * sector_t and can be 64 bits, but it will never go
- * past 32 bits for this driver anyway, so we can
- * safely cast it down and not have to do a 64/32
- * division
- */
- fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
- x = ((long)blk_rq_pos(req)) % fs->secpercyl;
- fs->head = x / fs->secpertrack;
- fs->req_sector = x % fs->secpertrack + 1;
- fs->state = do_transfer;
- fs->retries = 0;
-
- act(fs);
}
-}
-static void do_fd_request(struct request_queue * q)
-{
- start_request(q->queuedata);
+ /*
+ * Do not remove the cast. blk_rq_pos(req) is now a sector_t and can be
+ * 64 bits, but it will never go past 32 bits for this driver anyway, so
+ * we can safely cast it down and not have to do a 64/32 division
+ */
+ fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl;
+ x = ((long)blk_rq_pos(req)) % fs->secpercyl;
+ fs->head = x / fs->secpertrack;
+ fs->req_sector = x % fs->secpertrack + 1;
+ fs->state = do_transfer;
+ fs->retries = 0;
+
+ act(fs);
+
+out:
+ spin_unlock_irq(&swim3_lock);
+ return BLK_STS_OK;
}
static void set_timeout(struct floppy_state *fs, int nticks,
@@ -585,7 +557,6 @@ static void scan_timeout(struct timer_list *t)
if (fs->retries > 5) {
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
} else {
fs->state = jogging;
act(fs);
@@ -609,7 +580,6 @@ static void seek_timeout(struct timer_list *t)
swim3_err("%s", "Seek timeout\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
@@ -638,7 +608,6 @@ static void settle_timeout(struct timer_list *t)
swim3_err("%s", "Seek settle timeout\n");
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
unlock:
spin_unlock_irqrestore(&swim3_lock, flags);
}
@@ -667,7 +636,6 @@ static void xfer_timeout(struct timer_list *t)
(long)blk_rq_pos(fs->cur_req));
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
@@ -704,7 +672,6 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (fs->retries > 5) {
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
} else {
fs->state = jogging;
act(fs);
@@ -796,7 +763,6 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->state, rq_data_dir(req), intr, err);
swim3_end_request(fs, BLK_STS_IOERR, 0);
fs->state = idle;
- start_request(fs);
break;
}
fs->retries = 0;
@@ -813,8 +779,6 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
} else
fs->state = idle;
}
- if (fs->state == idle)
- start_request(fs);
break;
default:
swim3_err("Don't know what to do in state %d\n", fs->state);
@@ -862,14 +826,19 @@ static int grab_drive(struct floppy_state *fs, enum swim_state state,
static void release_drive(struct floppy_state *fs)
{
+ struct request_queue *q = disks[fs->index]->queue;
unsigned long flags;
swim3_dbg("%s", "-> release drive\n");
spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
- start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
+
+ blk_mq_freeze_queue(q);
+ blk_mq_quiesce_queue(q);
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q);
}
static int fd_eject(struct floppy_state *fs)
@@ -1089,6 +1058,10 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
+static const struct blk_mq_ops swim3_mq_ops = {
+ .queue_rq = swim3_queue_rq,
+};
+
static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
{
struct floppy_state *fs = macio_get_drvdata(mdev);
@@ -1202,47 +1175,63 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int swim3_attach(struct macio_dev *mdev,
const struct of_device_id *match)
{
+ struct floppy_state *fs;
struct gendisk *disk;
- int index, rc;
+ int rc;
- index = floppy_count++;
- if (index >= MAX_FLOPPIES)
+ if (floppy_count >= MAX_FLOPPIES)
return -ENXIO;
- /* Add the drive */
- rc = swim3_add_device(mdev, index);
- if (rc)
- return rc;
- /* Now register that disk. Same comment about failure handling */
- disk = disks[index] = alloc_disk(1);
- if (disk == NULL)
- return -ENOMEM;
- disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
- if (disk->queue == NULL) {
- put_disk(disk);
- return -ENOMEM;
+ if (floppy_count == 0) {
+ rc = register_blkdev(FLOPPY_MAJOR, "fd");
+ if (rc)
+ return rc;
}
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- disk->queue->queuedata = &floppy_states[index];
- if (index == 0) {
- /* If we failed, there isn't much we can do as the driver is still
- * too dumb to remove the device, just bail out
- */
- if (register_blkdev(FLOPPY_MAJOR, "fd"))
- return 0;
+ fs = &floppy_states[floppy_count];
+
+ disk = alloc_disk(1);
+ if (disk == NULL) {
+ rc = -ENOMEM;
+ goto out_unregister;
+ }
+
+ disk->queue = blk_mq_init_sq_queue(&fs->tag_set, &swim3_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(disk->queue)) {
+ rc = PTR_ERR(disk->queue);
+ disk->queue = NULL;
+ goto out_put_disk;
}
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
+ disk->queue->queuedata = fs;
+
+ rc = swim3_add_device(mdev, floppy_count);
+ if (rc)
+ goto out_cleanup_queue;
disk->major = FLOPPY_MAJOR;
- disk->first_minor = index;
+ disk->first_minor = floppy_count;
disk->fops = &floppy_fops;
- disk->private_data = &floppy_states[index];
+ disk->private_data = fs;
disk->flags |= GENHD_FL_REMOVABLE;
- sprintf(disk->disk_name, "fd%d", index);
+ sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
add_disk(disk);
+ disks[floppy_count++] = disk;
return 0;
+
+out_cleanup_queue:
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+ blk_mq_free_tag_set(&fs->tag_set);
+out_put_disk:
+ put_disk(disk);
+out_unregister:
+ if (floppy_count == 0)
+ unregister_blkdev(FLOPPY_MAJOR, "fd");
+ return rc;
}
static const struct of_device_id swim3_match[] =
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 4d90e5eba2f5..064b8c5c7a32 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -16,7 +16,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
@@ -197,7 +197,6 @@ enum {
FL_NON_RAID = FW_VER_NON_RAID,
FL_4PORT = FW_VER_4PORT,
FL_FW_VER_MASK = (FW_VER_NON_RAID | FW_VER_4PORT),
- FL_DAC = (1 << 16),
FL_DYN_MAJOR = (1 << 17),
};
@@ -244,6 +243,7 @@ struct carm_port {
unsigned int port_no;
struct gendisk *disk;
struct carm_host *host;
+ struct blk_mq_tag_set tag_set;
/* attached device characteristics */
u64 capacity;
@@ -279,6 +279,7 @@ struct carm_host {
unsigned int state;
u32 fw_ver;
+ struct blk_mq_tag_set tag_set;
struct request_queue *oob_q;
unsigned int n_oob;
@@ -750,7 +751,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
struct request *req = crq->rq;
int rc;
- __blk_end_request_all(req, error);
+ blk_mq_end_request(req, error);
rc = carm_put_request(host, crq);
assert(rc == 0);
@@ -760,7 +761,7 @@ static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
{
unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
- blk_stop_queue(q);
+ blk_mq_stop_hw_queues(q);
VPRINTK("STOPPED QUEUE %p\n", q);
host->wait_q[idx] = q;
@@ -785,7 +786,7 @@ static inline void carm_round_robin(struct carm_host *host)
{
struct request_queue *q = carm_pop_q(host);
if (q) {
- blk_start_queue(q);
+ blk_mq_start_hw_queues(q);
VPRINTK("STARTED QUEUE %p\n", q);
}
}
@@ -802,82 +803,86 @@ static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
}
}
-static void carm_oob_rq_fn(struct request_queue *q)
+static blk_status_t carm_oob_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct request_queue *q = hctx->queue;
struct carm_host *host = q->queuedata;
struct carm_request *crq;
- struct request *rq;
int rc;
- while (1) {
- DPRINTK("get req\n");
- rq = blk_fetch_request(q);
- if (!rq)
- break;
+ blk_mq_start_request(bd->rq);
- crq = rq->special;
- assert(crq != NULL);
- assert(crq->rq == rq);
+ spin_lock_irq(&host->lock);
- crq->n_elem = 0;
+ crq = bd->rq->special;
+ assert(crq != NULL);
+ assert(crq->rq == bd->rq);
- DPRINTK("send req\n");
- rc = carm_send_msg(host, crq);
- if (rc) {
- blk_requeue_request(q, rq);
- carm_push_q(host, q);
- return; /* call us again later, eventually */
- }
+ crq->n_elem = 0;
+
+ DPRINTK("send req\n");
+ rc = carm_send_msg(host, crq);
+ if (rc) {
+ carm_push_q(host, q);
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_DEV_RESOURCE;
}
+
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_OK;
}
-static void carm_rq_fn(struct request_queue *q)
+static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
+ struct request_queue *q = hctx->queue;
struct carm_port *port = q->queuedata;
struct carm_host *host = port->host;
struct carm_msg_rw *msg;
struct carm_request *crq;
- struct request *rq;
+ struct request *rq = bd->rq;
struct scatterlist *sg;
int writing = 0, pci_dir, i, n_elem, rc;
u32 tmp;
unsigned int msg_size;
-queue_one_request:
- VPRINTK("get req\n");
- rq = blk_peek_request(q);
- if (!rq)
- return;
+ blk_mq_start_request(rq);
+
+ spin_lock_irq(&host->lock);
crq = carm_get_request(host);
if (!crq) {
carm_push_q(host, q);
- return; /* call us again later, eventually */
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_DEV_RESOURCE;
}
crq->rq = rq;
- blk_start_request(rq);
-
if (rq_data_dir(rq) == WRITE) {
writing = 1;
- pci_dir = PCI_DMA_TODEVICE;
+ pci_dir = DMA_TO_DEVICE;
} else {
- pci_dir = PCI_DMA_FROMDEVICE;
+ pci_dir = DMA_FROM_DEVICE;
}
/* get scatterlist from block layer */
sg = &crq->sg[0];
n_elem = blk_rq_map_sg(q, rq, sg);
if (n_elem <= 0) {
+ /* request with no s/g entries? */
carm_end_rq(host, crq, BLK_STS_IOERR);
- return; /* request with no s/g entries? */
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_IOERR;
}
/* map scatterlist to PCI bus addresses */
- n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
+ n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, pci_dir);
if (n_elem <= 0) {
+ /* request with no s/g entries? */
carm_end_rq(host, crq, BLK_STS_IOERR);
- return; /* request with no s/g entries? */
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_IOERR;
}
crq->n_elem = n_elem;
crq->port = port;
@@ -927,12 +932,13 @@ queue_one_request:
rc = carm_send_msg(host, crq);
if (rc) {
carm_put_request(host, crq);
- blk_requeue_request(q, rq);
carm_push_q(host, q);
- return; /* call us again later, eventually */
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_DEV_RESOURCE;
}
- goto queue_one_request;
+ spin_unlock_irq(&host->lock);
+ return BLK_STS_OK;
}
static void carm_handle_array_info(struct carm_host *host,
@@ -1052,11 +1058,11 @@ static inline void carm_handle_rw(struct carm_host *host,
VPRINTK("ENTER\n");
if (rq_data_dir(crq->rq) == WRITE)
- pci_dir = PCI_DMA_TODEVICE;
+ pci_dir = DMA_TO_DEVICE;
else
- pci_dir = PCI_DMA_FROMDEVICE;
+ pci_dir = DMA_FROM_DEVICE;
- pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
+ dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem, pci_dir);
carm_end_rq(host, crq, error);
}
@@ -1485,6 +1491,14 @@ static int carm_init_host(struct carm_host *host)
return 0;
}
+static const struct blk_mq_ops carm_oob_mq_ops = {
+ .queue_rq = carm_oob_queue_rq,
+};
+
+static const struct blk_mq_ops carm_mq_ops = {
+ .queue_rq = carm_queue_rq,
+};
+
static int carm_init_disks(struct carm_host *host)
{
unsigned int i;
@@ -1513,9 +1527,10 @@ static int carm_init_disks(struct carm_host *host)
disk->fops = &carm_bd_ops;
disk->private_data = port;
- q = blk_init_queue(carm_rq_fn, &host->lock);
- if (!q) {
- rc = -ENOMEM;
+ q = blk_mq_init_sq_queue(&port->tag_set, &carm_mq_ops,
+ max_queue, BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(q)) {
+ rc = PTR_ERR(q);
break;
}
disk->queue = q;
@@ -1533,14 +1548,18 @@ static void carm_free_disks(struct carm_host *host)
unsigned int i;
for (i = 0; i < CARM_MAX_PORTS; i++) {
- struct gendisk *disk = host->port[i].disk;
+ struct carm_port *port = &host->port[i];
+ struct gendisk *disk = port->disk;
+
if (disk) {
struct request_queue *q = disk->queue;
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
- if (q)
+ if (q) {
+ blk_mq_free_tag_set(&port->tag_set);
blk_cleanup_queue(q);
+ }
put_disk(disk);
}
}
@@ -1548,8 +1567,8 @@ static void carm_free_disks(struct carm_host *host)
static int carm_init_shm(struct carm_host *host)
{
- host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE,
- &host->shm_dma);
+ host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE,
+ &host->shm_dma, GFP_KERNEL);
if (!host->shm)
return -ENOMEM;
@@ -1565,7 +1584,6 @@ static int carm_init_shm(struct carm_host *host)
static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct carm_host *host;
- unsigned int pci_dac;
int rc;
struct request_queue *q;
unsigned int i;
@@ -1580,28 +1598,12 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out;
-#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!rc) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
- pci_name(pdev));
- goto err_out_regions;
- }
- pci_dac = 1;
- } else {
-#endif
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
- pci_name(pdev));
- goto err_out_regions;
- }
- pci_dac = 0;
-#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
+ printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
+ pci_name(pdev));
+ goto err_out_regions;
}
-#endif
host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host) {
@@ -1612,7 +1614,6 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
host->pdev = pdev;
- host->flags = pci_dac ? FL_DAC : 0;
spin_lock_init(&host->lock);
INIT_WORK(&host->fsm_task, carm_fsm_task);
init_completion(&host->probe_comp);
@@ -1636,12 +1637,13 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_iounmap;
}
- q = blk_init_queue(carm_oob_rq_fn, &host->lock);
- if (!q) {
+ q = blk_mq_init_sq_queue(&host->tag_set, &carm_oob_mq_ops, 1,
+ BLK_MQ_F_NO_SCHED);
+ if (IS_ERR(q)) {
printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n",
pci_name(pdev));
- rc = -ENOMEM;
- goto err_out_pci_free;
+ rc = PTR_ERR(q);
+ goto err_out_dma_free;
}
host->oob_q = q;
q->queuedata = host;
@@ -1705,8 +1707,9 @@ err_out_free_majors:
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
blk_cleanup_queue(host->oob_q);
-err_out_pci_free:
- pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
+ blk_mq_free_tag_set(&host->tag_set);
+err_out_dma_free:
+ dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
err_out_iounmap:
iounmap(host->mmio);
err_out_kfree:
@@ -1736,7 +1739,8 @@ static void carm_remove_one (struct pci_dev *pdev)
else if (host->major == 161)
clear_bit(1, &carm_major_alloc);
blk_cleanup_queue(host->oob_q);
- pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
+ blk_mq_free_tag_set(&host->tag_set);
+ dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
iounmap(host->mmio);
kfree(host);
pci_release_regions(pdev);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 5c7fb8cc4149..be3e3ab79950 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -363,12 +363,12 @@ static int add_bio(struct cardinfo *card)
vec = bio_iter_iovec(bio, card->current_iter);
- dma_handle = pci_map_page(card->dev,
+ dma_handle = dma_map_page(&card->dev->dev,
vec.bv_page,
vec.bv_offset,
vec.bv_len,
bio_op(bio) == REQ_OP_READ ?
- PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
p = &card->mm_pages[card->Ready];
desc = &p->desc[p->cnt];
@@ -421,7 +421,7 @@ static void process_page(unsigned long data)
struct cardinfo *card = (struct cardinfo *)data;
unsigned int dma_status = card->dma_status;
- spin_lock_bh(&card->lock);
+ spin_lock(&card->lock);
if (card->Active < 0)
goto out_unlock;
page = &card->mm_pages[card->Active];
@@ -448,10 +448,10 @@ static void process_page(unsigned long data)
page->iter = page->bio->bi_iter;
}
- pci_unmap_page(card->dev, desc->data_dma_handle,
+ dma_unmap_page(&card->dev->dev, desc->data_dma_handle,
vec.bv_len,
(control & DMASCR_TRANSFER_READ) ?
- PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (control & DMASCR_HARD_ERROR) {
/* error */
bio->bi_status = BLK_STS_IOERR;
@@ -496,7 +496,7 @@ static void process_page(unsigned long data)
mm_start_io(card);
}
out_unlock:
- spin_unlock_bh(&card->lock);
+ spin_unlock(&card->lock);
while (return_bio) {
struct bio *bio = return_bio;
@@ -817,8 +817,8 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_printk(KERN_INFO, &dev->dev,
"Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
- if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) &&
- pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&dev->dev, DMA_BIT_MASK(64)) &&
+ dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
return -ENOMEM;
}
@@ -871,12 +871,10 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto failed_magic;
}
- card->mm_pages[0].desc = pci_alloc_consistent(card->dev,
- PAGE_SIZE * 2,
- &card->mm_pages[0].page_dma);
- card->mm_pages[1].desc = pci_alloc_consistent(card->dev,
- PAGE_SIZE * 2,
- &card->mm_pages[1].page_dma);
+ card->mm_pages[0].desc = dma_alloc_coherent(&card->dev->dev,
+ PAGE_SIZE * 2, &card->mm_pages[0].page_dma, GFP_KERNEL);
+ card->mm_pages[1].desc = dma_alloc_coherent(&card->dev->dev,
+ PAGE_SIZE * 2, &card->mm_pages[1].page_dma, GFP_KERNEL);
if (card->mm_pages[0].desc == NULL ||
card->mm_pages[1].desc == NULL) {
dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n");
@@ -1002,13 +1000,13 @@ static int mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
failed_req_irq:
failed_alloc:
if (card->mm_pages[0].desc)
- pci_free_consistent(card->dev, PAGE_SIZE*2,
- card->mm_pages[0].desc,
- card->mm_pages[0].page_dma);
+ dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
+ card->mm_pages[0].desc,
+ card->mm_pages[0].page_dma);
if (card->mm_pages[1].desc)
- pci_free_consistent(card->dev, PAGE_SIZE*2,
- card->mm_pages[1].desc,
- card->mm_pages[1].page_dma);
+ dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
+ card->mm_pages[1].desc,
+ card->mm_pages[1].page_dma);
failed_magic:
iounmap(card->csr_remap);
failed_remap_csr:
@@ -1027,11 +1025,11 @@ static void mm_pci_remove(struct pci_dev *dev)
iounmap(card->csr_remap);
if (card->mm_pages[0].desc)
- pci_free_consistent(card->dev, PAGE_SIZE*2,
+ dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
card->mm_pages[0].desc,
card->mm_pages[0].page_dma);
if (card->mm_pages[1].desc)
- pci_free_consistent(card->dev, PAGE_SIZE*2,
+ dma_free_coherent(&card->dev->dev, PAGE_SIZE * 2,
card->mm_pages[1].desc,
card->mm_pages[1].page_dma);
blk_cleanup_queue(card->queue);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 23752dc99b00..086c6bb12baa 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -351,8 +351,8 @@ static int minor_to_index(int minor)
return minor >> PART_BITS;
}
-static ssize_t virtblk_serial_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t serial_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
int err;
@@ -371,7 +371,7 @@ static ssize_t virtblk_serial_show(struct device *dev,
return err;
}
-static DEVICE_ATTR(serial, 0444, virtblk_serial_show, NULL);
+static DEVICE_ATTR_RO(serial);
/* The queue's logical block size must be set before calling this */
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
@@ -545,8 +545,8 @@ static const char *const virtblk_cache_types[] = {
};
static ssize_t
-virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+cache_type_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
@@ -564,8 +564,7 @@ virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct gendisk *disk = dev_to_disk(dev);
struct virtio_blk *vblk = disk->private_data;
@@ -575,12 +574,38 @@ virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}
-static const struct device_attribute dev_attr_cache_type_ro =
- __ATTR(cache_type, 0444,
- virtblk_cache_type_show, NULL);
-static const struct device_attribute dev_attr_cache_type_rw =
- __ATTR(cache_type, 0644,
- virtblk_cache_type_show, virtblk_cache_type_store);
+static DEVICE_ATTR_RW(cache_type);
+
+static struct attribute *virtblk_attrs[] = {
+ &dev_attr_serial.attr,
+ &dev_attr_cache_type.attr,
+ NULL,
+};
+
+static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gendisk *disk = dev_to_disk(dev);
+ struct virtio_blk *vblk = disk->private_data;
+ struct virtio_device *vdev = vblk->vdev;
+
+ if (a == &dev_attr_cache_type.attr &&
+ !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
+ return S_IRUGO;
+
+ return a->mode;
+}
+
+static const struct attribute_group virtblk_attr_group = {
+ .attrs = virtblk_attrs,
+ .is_visible = virtblk_attrs_are_visible,
+};
+
+static const struct attribute_group *virtblk_attr_groups[] = {
+ &virtblk_attr_group,
+ NULL,
+};
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
@@ -780,24 +805,9 @@ static int virtblk_probe(struct virtio_device *vdev)
virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
- device_add_disk(&vdev->dev, vblk->disk);
- err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
- if (err)
- goto out_del_disk;
-
- if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
- err = device_create_file(disk_to_dev(vblk->disk),
- &dev_attr_cache_type_rw);
- else
- err = device_create_file(disk_to_dev(vblk->disk),
- &dev_attr_cache_type_ro);
- if (err)
- goto out_del_disk;
+ device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
return 0;
-out_del_disk:
- del_gendisk(vblk->disk);
- blk_cleanup_queue(vblk->disk->queue);
out_free_tags:
blk_mq_free_tag_set(&vblk->tag_set);
out_put_disk:
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a71d817e900d..9eea83ae01c6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2420,7 +2420,7 @@ static void blkfront_connect(struct blkfront_info *info)
for (i = 0; i < info->nr_rings; i++)
kick_pending_request_queues(&info->rinfo[i]);
- device_add_disk(&info->xbdev->dev, info->gd);
+ device_add_disk(&info->xbdev->dev, info->gd, NULL);
info->is_ready = 1;
return;
@@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info)
list_del(&gnt_list_entry->node);
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
rinfo->persistent_gnts_c--;
- __free_page(gnt_list_entry->page);
- kfree(gnt_list_entry);
+ gnt_list_entry->gref = GRANT_INVALID_REF;
+ list_add_tail(&gnt_list_entry->node, &rinfo->grants);
}
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index c24589414c75..87ccef4bd69e 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -88,7 +88,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/ata.h>
#include <linux/hdreg.h>
@@ -209,6 +209,8 @@ struct ace_device {
struct device *dev;
struct request_queue *queue;
struct gendisk *gd;
+ struct blk_mq_tag_set tag_set;
+ struct list_head rq_list;
/* Inserted CF card parameters */
u16 cf_id[ATA_ID_WORDS];
@@ -462,18 +464,26 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace)
ace->fsm_continue_flag = 0;
}
+static bool ace_has_next_request(struct request_queue *q)
+{
+ struct ace_device *ace = q->queuedata;
+
+ return !list_empty(&ace->rq_list);
+}
+
/* Get the next read/write request; ending requests that we don't handle */
static struct request *ace_get_next_request(struct request_queue *q)
{
- struct request *req;
+ struct ace_device *ace = q->queuedata;
+ struct request *rq;
- while ((req = blk_peek_request(q)) != NULL) {
- if (!blk_rq_is_passthrough(req))
- break;
- blk_start_request(req);
- __blk_end_request_all(req, BLK_STS_IOERR);
+ rq = list_first_entry_or_null(&ace->rq_list, struct request, queuelist);
+ if (rq) {
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
}
- return req;
+
+ return NULL;
}
static void ace_fsm_dostate(struct ace_device *ace)
@@ -499,11 +509,11 @@ static void ace_fsm_dostate(struct ace_device *ace)
/* Drop all in-flight and pending requests */
if (ace->req) {
- __blk_end_request_all(ace->req, BLK_STS_IOERR);
+ blk_mq_end_request(ace->req, BLK_STS_IOERR);
ace->req = NULL;
}
- while ((req = blk_fetch_request(ace->queue)) != NULL)
- __blk_end_request_all(req, BLK_STS_IOERR);
+ while ((req = ace_get_next_request(ace->queue)) != NULL)
+ blk_mq_end_request(req, BLK_STS_IOERR);
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
@@ -517,7 +527,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
switch (ace->fsm_state) {
case ACE_FSM_STATE_IDLE:
/* See if there is anything to do */
- if (ace->id_req_count || ace_get_next_request(ace->queue)) {
+ if (ace->id_req_count || ace_has_next_request(ace->queue)) {
ace->fsm_iter_num++;
ace->fsm_state = ACE_FSM_STATE_REQ_LOCK;
mod_timer(&ace->stall_timer, jiffies + HZ);
@@ -651,7 +661,6 @@ static void ace_fsm_dostate(struct ace_device *ace)
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
}
- blk_start_request(req);
/* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev,
@@ -728,7 +737,8 @@ static void ace_fsm_dostate(struct ace_device *ace)
}
/* bio finished; is there another one? */
- if (__blk_end_request_cur(ace->req, BLK_STS_OK)) {
+ if (blk_update_request(ace->req, BLK_STS_OK,
+ blk_rq_cur_bytes(ace->req))) {
/* dev_dbg(ace->dev, "next block; h=%u c=%u\n",
* blk_rq_sectors(ace->req),
* blk_rq_cur_sectors(ace->req));
@@ -854,17 +864,23 @@ static irqreturn_t ace_interrupt(int irq, void *dev_id)
/* ---------------------------------------------------------------------
* Block ops
*/
-static void ace_request(struct request_queue * q)
+static blk_status_t ace_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request *req;
- struct ace_device *ace;
-
- req = ace_get_next_request(q);
+ struct ace_device *ace = hctx->queue->queuedata;
+ struct request *req = bd->rq;
- if (req) {
- ace = req->rq_disk->private_data;
- tasklet_schedule(&ace->fsm_tasklet);
+ if (blk_rq_is_passthrough(req)) {
+ blk_mq_start_request(req);
+ return BLK_STS_IOERR;
}
+
+ spin_lock_irq(&ace->lock);
+ list_add_tail(&req->queuelist, &ace->rq_list);
+ spin_unlock_irq(&ace->lock);
+
+ tasklet_schedule(&ace->fsm_tasklet);
+ return BLK_STS_OK;
}
static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing)
@@ -957,6 +973,10 @@ static const struct block_device_operations ace_fops = {
.getgeo = ace_getgeo,
};
+static const struct blk_mq_ops ace_mq_ops = {
+ .queue_rq = ace_queue_rq,
+};
+
/* --------------------------------------------------------------------
* SystemACE device setup/teardown code
*/
@@ -972,6 +992,7 @@ static int ace_setup(struct ace_device *ace)
spin_lock_init(&ace->lock);
init_completion(&ace->id_completion);
+ INIT_LIST_HEAD(&ace->rq_list);
/*
* Map the device
@@ -989,9 +1010,15 @@ static int ace_setup(struct ace_device *ace)
/*
* Initialize the request queue
*/
- ace->queue = blk_init_queue(ace_request, &ace->lock);
- if (ace->queue == NULL)
+ ace->queue = blk_mq_init_sq_queue(&ace->tag_set, &ace_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(ace->queue)) {
+ rc = PTR_ERR(ace->queue);
+ ace->queue = NULL;
goto err_blk_initq;
+ }
+ ace->queue->queuedata = ace;
+
blk_queue_logical_block_size(ace->queue, 512);
blk_queue_bounce_limit(ace->queue, BLK_BOUNCE_HIGH);
@@ -1066,6 +1093,7 @@ err_read:
put_disk(ace->gd);
err_alloc_disk:
blk_cleanup_queue(ace->queue);
+ blk_mq_free_tag_set(&ace->tag_set);
err_blk_initq:
iounmap(ace->baseaddr);
err_ioremap:
@@ -1081,8 +1109,10 @@ static void ace_teardown(struct ace_device *ace)
put_disk(ace->gd);
}
- if (ace->queue)
+ if (ace->queue) {
blk_cleanup_queue(ace->queue);
+ blk_mq_free_tag_set(&ace->tag_set);
+ }
tasklet_kill(&ace->fsm_tasklet);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index d0c5bc4e0703..1106c076fa4b 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -31,7 +31,7 @@
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -66,43 +66,44 @@ static DEFINE_SPINLOCK(z2ram_lock);
static struct gendisk *z2ram_gendisk;
-static void do_z2_request(struct request_queue *q)
+static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
- struct request *req;
-
- req = blk_fetch_request(q);
- while (req) {
- unsigned long start = blk_rq_pos(req) << 9;
- unsigned long len = blk_rq_cur_bytes(req);
- blk_status_t err = BLK_STS_OK;
-
- if (start + len > z2ram_size) {
- pr_err(DEVICE_NAME ": bad access: block=%llu, "
- "count=%u\n",
- (unsigned long long)blk_rq_pos(req),
- blk_rq_cur_sectors(req));
- err = BLK_STS_IOERR;
- goto done;
- }
- while (len) {
- unsigned long addr = start & Z2RAM_CHUNKMASK;
- unsigned long size = Z2RAM_CHUNKSIZE - addr;
- void *buffer = bio_data(req->bio);
-
- if (len < size)
- size = len;
- addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
- if (rq_data_dir(req) == READ)
- memcpy(buffer, (char *)addr, size);
- else
- memcpy((char *)addr, buffer, size);
- start += size;
- len -= size;
- }
- done:
- if (!__blk_end_request_cur(req, err))
- req = blk_fetch_request(q);
+ struct request *req = bd->rq;
+ unsigned long start = blk_rq_pos(req) << 9;
+ unsigned long len = blk_rq_cur_bytes(req);
+
+ blk_mq_start_request(req);
+
+ if (start + len > z2ram_size) {
+ pr_err(DEVICE_NAME ": bad access: block=%llu, "
+ "count=%u\n",
+ (unsigned long long)blk_rq_pos(req),
+ blk_rq_cur_sectors(req));
+ return BLK_STS_IOERR;
+ }
+
+ spin_lock_irq(&z2ram_lock);
+
+ while (len) {
+ unsigned long addr = start & Z2RAM_CHUNKMASK;
+ unsigned long size = Z2RAM_CHUNKSIZE - addr;
+ void *buffer = bio_data(req->bio);
+
+ if (len < size)
+ size = len;
+ addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
+ if (rq_data_dir(req) == READ)
+ memcpy(buffer, (char *)addr, size);
+ else
+ memcpy((char *)addr, buffer, size);
+ start += size;
+ len -= size;
}
+
+ spin_unlock_irq(&z2ram_lock);
+ blk_mq_end_request(req, BLK_STS_OK);
+ return BLK_STS_OK;
}
static void
@@ -337,6 +338,11 @@ static struct kobject *z2_find(dev_t dev, int *part, void *data)
}
static struct request_queue *z2_queue;
+static struct blk_mq_tag_set tag_set;
+
+static const struct blk_mq_ops z2_mq_ops = {
+ .queue_rq = z2_queue_rq,
+};
static int __init
z2_init(void)
@@ -355,9 +361,13 @@ z2_init(void)
if (!z2ram_gendisk)
goto out_disk;
- z2_queue = blk_init_queue(do_z2_request, &z2ram_lock);
- if (!z2_queue)
+ z2_queue = blk_mq_init_sq_queue(&tag_set, &z2_mq_ops, 16,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (IS_ERR(z2_queue)) {
+ ret = PTR_ERR(z2_queue);
+ z2_queue = NULL;
goto out_queue;
+ }
z2ram_gendisk->major = Z2RAM_MAJOR;
z2ram_gendisk->first_minor = 0;
@@ -387,6 +397,7 @@ static void __exit z2_exit(void)
del_gendisk(z2ram_gendisk);
put_disk(z2ram_gendisk);
blk_cleanup_queue(z2_queue);
+ blk_mq_free_tag_set(&tag_set);
if ( current_device != -1 )
{
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index 635235759a0a..fcd055457364 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -3,7 +3,6 @@ config ZRAM
tristate "Compressed RAM block device support"
depends on BLOCK && SYSFS && ZSMALLOC && CRYPTO
select CRYPTO_LZO
- default n
help
Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
Pages written to these disks are compressed and stored in memory
@@ -18,7 +17,6 @@ config ZRAM
config ZRAM_WRITEBACK
bool "Write back incompressible page to backing device"
depends on ZRAM
- default n
help
With incompressible page, there is no memory saving to keep it
in memory. Instead, write it out to backing device.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index a1d6b5597c17..4879595200e1 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1636,6 +1636,11 @@ static const struct attribute_group zram_disk_attr_group = {
.attrs = zram_disk_attrs,
};
+static const struct attribute_group *zram_disk_attr_groups[] = {
+ &zram_disk_attr_group,
+ NULL,
+};
+
/*
* Allocate and initialize new zram device. the function returns
* '>= 0' device_id upon success, and negative value otherwise.
@@ -1716,24 +1721,14 @@ static int zram_add(void)
zram->disk->queue->backing_dev_info->capabilities |=
(BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO);
- add_disk(zram->disk);
-
- ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
- &zram_disk_attr_group);
- if (ret < 0) {
- pr_err("Error creating sysfs group for device %d\n",
- device_id);
- goto out_free_disk;
- }
+ device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
+
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
zram_debugfs_register(zram);
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
-out_free_disk:
- del_gendisk(zram->disk);
- put_disk(zram->disk);
out_free_queue:
blk_cleanup_queue(queue);
out_free_idr:
@@ -1762,15 +1757,6 @@ static int zram_remove(struct zram *zram)
mutex_unlock(&bdev->bd_mutex);
zram_debugfs_unregister(zram);
- /*
- * Remove sysfs first, so no one will perform a disksize
- * store while we destroy the devices. This also helps during
- * hot_remove -- zram_reset_device() is the last holder of
- * ->init_lock, no later/concurrent disksize_store() or any
- * other sysfs handlers are possible.
- */
- sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
- &zram_disk_attr_group);
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 963bb0309e25..ea6238ed5c0e 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
}
clear_bit(HCI_UART_PROTO_SET, &hu->flags);
+ percpu_free_rwsem(&hu->proto_lock);
+
kfree(hu);
}
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index e182f6019f68..2fee65886d50 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1322,7 +1322,7 @@ static int qca_init_regulators(struct qca_power *qca,
{
int i;
- qca->vreg_bulk = devm_kzalloc(qca->dev, num_vregs *
+ qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
sizeof(struct regulator_bulk_data),
GFP_KERNEL);
if (!qca->vreg_bulk)
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a5d5a96479bf..614ecdbb4ab7 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -410,10 +410,10 @@ static int cdrom_get_disc_info(struct cdrom_device_info *cdi,
* hack to have the capability flags defined const, while we can still
* change it here without gcc complaining at every line.
*/
-#define ENSURE(call, bits) \
-do { \
- if (cdo->call == NULL) \
- *change_capability &= ~(bits); \
+#define ENSURE(cdo, call, bits) \
+do { \
+ if (cdo->call == NULL) \
+ WARN_ON_ONCE((cdo)->capability & (bits)); \
} while (0)
/*
@@ -589,7 +589,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
{
static char banner_printed;
const struct cdrom_device_ops *cdo = cdi->ops;
- int *change_capability = (int *)&cdo->capability; /* hack */
cd_dbg(CD_OPEN, "entering register_cdrom\n");
@@ -601,16 +600,16 @@ int register_cdrom(struct cdrom_device_info *cdi)
cdrom_sysctl_register();
}
- ENSURE(drive_status, CDC_DRIVE_STATUS);
+ ENSURE(cdo, drive_status, CDC_DRIVE_STATUS);
if (cdo->check_events == NULL && cdo->media_changed == NULL)
- *change_capability = ~(CDC_MEDIA_CHANGED | CDC_SELECT_DISC);
- ENSURE(tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
- ENSURE(lock_door, CDC_LOCK);
- ENSURE(select_speed, CDC_SELECT_SPEED);
- ENSURE(get_last_session, CDC_MULTI_SESSION);
- ENSURE(get_mcn, CDC_MCN);
- ENSURE(reset, CDC_RESET);
- ENSURE(generic_packet, CDC_GENERIC_PACKET);
+ WARN_ON_ONCE(cdo->capability & (CDC_MEDIA_CHANGED | CDC_SELECT_DISC));
+ ENSURE(cdo, tray_move, CDC_CLOSE_TRAY | CDC_OPEN_TRAY);
+ ENSURE(cdo, lock_door, CDC_LOCK);
+ ENSURE(cdo, select_speed, CDC_SELECT_SPEED);
+ ENSURE(cdo, get_last_session, CDC_MULTI_SESSION);
+ ENSURE(cdo, get_mcn, CDC_MCN);
+ ENSURE(cdo, reset, CDC_RESET);
+ ENSURE(cdo, generic_packet, CDC_GENERIC_PACKET);
cdi->mc_flags = 0;
cdi->options = CDO_USE_FFLAGS;
@@ -2445,7 +2444,7 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
return -ENOSYS;
if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
- if ((int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index ae3a7537cf0f..757e85b81879 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -31,12 +31,11 @@
#include <linux/cdrom.h>
#include <linux/genhd.h>
#include <linux/bio.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/wait.h>
-#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <scsi/scsi.h>
#include <asm/io.h>
@@ -102,11 +101,6 @@ static int gdrom_major;
static DECLARE_WAIT_QUEUE_HEAD(command_queue);
static DECLARE_WAIT_QUEUE_HEAD(request_queue);
-static DEFINE_SPINLOCK(gdrom_lock);
-static void gdrom_readdisk_dma(struct work_struct *work);
-static DECLARE_WORK(work, gdrom_readdisk_dma);
-static LIST_HEAD(gdrom_deferred);
-
struct gdromtoc {
unsigned int entry[99];
unsigned int first, last;
@@ -122,6 +116,7 @@ static struct gdrom_unit {
char disk_type;
struct gdromtoc *toc;
struct request_queue *gdrom_rq;
+ struct blk_mq_tag_set tag_set;
} gd;
struct gdrom_id {
@@ -584,103 +579,83 @@ static int gdrom_set_interrupt_handlers(void)
* 9 -> sectors >> 8
* 10 -> sectors
*/
-static void gdrom_readdisk_dma(struct work_struct *work)
+static blk_status_t gdrom_readdisk_dma(struct request *req)
{
int block, block_cnt;
blk_status_t err;
struct packet_command *read_command;
- struct list_head *elem, *next;
- struct request *req;
unsigned long timeout;
- if (list_empty(&gdrom_deferred))
- return;
read_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
if (!read_command)
- return; /* get more memory later? */
+ return BLK_STS_RESOURCE;
+
read_command->cmd[0] = 0x30;
read_command->cmd[1] = 0x20;
- spin_lock(&gdrom_lock);
- list_for_each_safe(elem, next, &gdrom_deferred) {
- req = list_entry(elem, struct request, queuelist);
- spin_unlock(&gdrom_lock);
- block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
- block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
- __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
- __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
- __raw_writel(1, GDROM_DMA_DIRECTION_REG);
- __raw_writel(1, GDROM_DMA_ENABLE_REG);
- read_command->cmd[2] = (block >> 16) & 0xFF;
- read_command->cmd[3] = (block >> 8) & 0xFF;
- read_command->cmd[4] = block & 0xFF;
- read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
- read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
- read_command->cmd[10] = block_cnt & 0xFF;
- /* set for DMA */
- __raw_writeb(1, GDROM_ERROR_REG);
- /* other registers */
- __raw_writeb(0, GDROM_SECNUM_REG);
- __raw_writeb(0, GDROM_BCL_REG);
- __raw_writeb(0, GDROM_BCH_REG);
- __raw_writeb(0, GDROM_DSEL_REG);
- __raw_writeb(0, GDROM_INTSEC_REG);
- /* Wait for registers to reset after any previous activity */
- timeout = jiffies + HZ / 2;
- while (gdrom_is_busy() && time_before(jiffies, timeout))
- cpu_relax();
- __raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
- timeout = jiffies + HZ / 2;
- /* Wait for packet command to finish */
- while (gdrom_is_busy() && time_before(jiffies, timeout))
- cpu_relax();
- gd.pending = 1;
- gd.transfer = 1;
- outsw(GDROM_DATA_REG, &read_command->cmd, 6);
- timeout = jiffies + HZ / 2;
- /* Wait for any pending DMA to finish */
- while (__raw_readb(GDROM_DMA_STATUS_REG) &&
- time_before(jiffies, timeout))
- cpu_relax();
- /* start transfer */
- __raw_writeb(1, GDROM_DMA_STATUS_REG);
- wait_event_interruptible_timeout(request_queue,
- gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
- err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
- gd.transfer = 0;
- gd.pending = 0;
- /* now seek to take the request spinlock
- * before handling ending the request */
- spin_lock(&gdrom_lock);
- list_del_init(&req->queuelist);
- __blk_end_request_all(req, err);
- }
- spin_unlock(&gdrom_lock);
+ block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
+ block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
+ __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
+ __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
+ __raw_writel(1, GDROM_DMA_DIRECTION_REG);
+ __raw_writel(1, GDROM_DMA_ENABLE_REG);
+ read_command->cmd[2] = (block >> 16) & 0xFF;
+ read_command->cmd[3] = (block >> 8) & 0xFF;
+ read_command->cmd[4] = block & 0xFF;
+ read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
+ read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
+ read_command->cmd[10] = block_cnt & 0xFF;
+ /* set for DMA */
+ __raw_writeb(1, GDROM_ERROR_REG);
+ /* other registers */
+ __raw_writeb(0, GDROM_SECNUM_REG);
+ __raw_writeb(0, GDROM_BCL_REG);
+ __raw_writeb(0, GDROM_BCH_REG);
+ __raw_writeb(0, GDROM_DSEL_REG);
+ __raw_writeb(0, GDROM_INTSEC_REG);
+ /* Wait for registers to reset after any previous activity */
+ timeout = jiffies + HZ / 2;
+ while (gdrom_is_busy() && time_before(jiffies, timeout))
+ cpu_relax();
+ __raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
+ timeout = jiffies + HZ / 2;
+ /* Wait for packet command to finish */
+ while (gdrom_is_busy() && time_before(jiffies, timeout))
+ cpu_relax();
+ gd.pending = 1;
+ gd.transfer = 1;
+ outsw(GDROM_DATA_REG, &read_command->cmd, 6);
+ timeout = jiffies + HZ / 2;
+ /* Wait for any pending DMA to finish */
+ while (__raw_readb(GDROM_DMA_STATUS_REG) &&
+ time_before(jiffies, timeout))
+ cpu_relax();
+ /* start transfer */
+ __raw_writeb(1, GDROM_DMA_STATUS_REG);
+ wait_event_interruptible_timeout(request_queue,
+ gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
+ err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
+ gd.transfer = 0;
+ gd.pending = 0;
+
+ blk_mq_end_request(req, err);
kfree(read_command);
+ return BLK_STS_OK;
}
-static void gdrom_request(struct request_queue *rq)
-{
- struct request *req;
-
- while ((req = blk_fetch_request(rq)) != NULL) {
- switch (req_op(req)) {
- case REQ_OP_READ:
- /*
- * Add to list of deferred work and then schedule
- * workqueue.
- */
- list_add_tail(&req->queuelist, &gdrom_deferred);
- schedule_work(&work);
- break;
- case REQ_OP_WRITE:
- pr_notice("Read only device - write request ignored\n");
- __blk_end_request_all(req, BLK_STS_IOERR);
- break;
- default:
- printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
- __blk_end_request_all(req, BLK_STS_IOERR);
- break;
- }
+static blk_status_t gdrom_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ blk_mq_start_request(bd->rq);
+
+ switch (req_op(bd->rq)) {
+ case REQ_OP_READ:
+ return gdrom_readdisk_dma(bd->rq);
+ case REQ_OP_WRITE:
+ pr_notice("Read only device - write request ignored\n");
+ return BLK_STS_IOERR;
+ default:
+ printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
+ return BLK_STS_IOERR;
}
}
@@ -768,6 +743,10 @@ static int probe_gdrom_setupqueue(void)
return gdrom_init_dma_mode();
}
+static const struct blk_mq_ops gdrom_mq_ops = {
+ .queue_rq = gdrom_queue_rq,
+};
+
/*
* register this as a block device and as compliant with the
* universal CD Rom driver interface
@@ -811,11 +790,15 @@ static int probe_gdrom(struct platform_device *devptr)
err = gdrom_set_interrupt_handlers();
if (err)
goto probe_fail_cmdirq_register;
- gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock);
- if (!gd.gdrom_rq) {
- err = -ENOMEM;
+
+ gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ if (IS_ERR(gd.gdrom_rq)) {
+ rc = PTR_ERR(gd.gdrom_rq);
+ gd.gdrom_rq = NULL;
goto probe_fail_requestq;
}
+
blk_queue_bounce_limit(gd.gdrom_rq, BLK_BOUNCE_HIGH);
err = probe_gdrom_setupqueue();
@@ -832,6 +815,7 @@ static int probe_gdrom(struct platform_device *devptr)
probe_fail_toc:
blk_cleanup_queue(gd.gdrom_rq);
+ blk_mq_free_tag_set(&gd.tag_set);
probe_fail_requestq:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);
@@ -849,8 +833,8 @@ probe_fail_no_mem:
static int remove_gdrom(struct platform_device *devptr)
{
- flush_work(&work);
blk_cleanup_queue(gd.gdrom_rq);
+ blk_mq_free_tag_set(&gd.tag_set);
free_irq(HW_EVENT_GDROM_CMD, &gd);
free_irq(HW_EVENT_GDROM_DMA, &gd);
del_gendisk(gd.disk);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ce277ee0a28a..40728491f37b 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -566,5 +566,5 @@ config RANDOM_TRUST_CPU
that CPU manufacturer (perhaps with the insistence or mandate
of a Nation State's intelligence or law enforcement agencies)
has not installed a hidden back door to compromise the CPU's
- random number generation facilities.
-
+ random number generation facilities. This can also be configured
+ at boot with "random.trust_cpu=on/off".
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index a3397664f800..97d6856c9c0f 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -59,8 +59,6 @@ enum bt_states {
BT_STATE_RESET3,
BT_STATE_RESTART,
BT_STATE_PRINTME,
- BT_STATE_CAPABILITIES_BEGIN,
- BT_STATE_CAPABILITIES_END,
BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */
};
@@ -86,7 +84,6 @@ struct si_sm_data {
int error_retries; /* end of "common" fields */
int nonzero_status; /* hung BMCs stay all 0 */
enum bt_states complete; /* to divert the state machine */
- int BT_CAP_outreqs;
long BT_CAP_req2rsp;
int BT_CAP_retries; /* Recommended retries */
};
@@ -137,8 +134,6 @@ static char *state2txt(unsigned char state)
case BT_STATE_RESET3: return("RESET3");
case BT_STATE_RESTART: return("RESTART");
case BT_STATE_LONG_BUSY: return("LONG_BUSY");
- case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN");
- case BT_STATE_CAPABILITIES_END: return("CAP_END");
}
return("BAD STATE");
}
@@ -185,7 +180,6 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
bt->complete = BT_STATE_IDLE; /* end here */
bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
- /* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
return 3; /* We claim 3 bytes of space; ought to check SPMI table */
}
@@ -451,7 +445,7 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
{
- unsigned char status, BT_CAP[8];
+ unsigned char status;
static enum bt_states last_printed = BT_STATE_PRINTME;
int i;
@@ -504,12 +498,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
BT_CONTROL(BT_H_BUSY);
- bt->timeout = bt->BT_CAP_req2rsp;
-
- /* Read BT capabilities if it hasn't been done yet */
- if (!bt->BT_CAP_outreqs)
- BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
- SI_SM_CALL_WITHOUT_DELAY);
BT_SI_SM_RETURN(SI_SM_IDLE);
case BT_STATE_XACTION_START:
@@ -614,37 +602,6 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
BT_STATE_CHANGE(BT_STATE_XACTION_START,
SI_SM_CALL_WITH_DELAY);
- /*
- * Get BT Capabilities, using timing of upper level state machine.
- * Set outreqs to prevent infinite loop on timeout.
- */
- case BT_STATE_CAPABILITIES_BEGIN:
- bt->BT_CAP_outreqs = 1;
- {
- unsigned char GetBT_CAP[] = { 0x18, 0x36 };
- bt->state = BT_STATE_IDLE;
- bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
- }
- bt->complete = BT_STATE_CAPABILITIES_END;
- BT_STATE_CHANGE(BT_STATE_XACTION_START,
- SI_SM_CALL_WITH_DELAY);
-
- case BT_STATE_CAPABILITIES_END:
- i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
- bt_init_data(bt, bt->io);
- if ((i == 8) && !BT_CAP[2]) {
- bt->BT_CAP_outreqs = BT_CAP[3];
- bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
- bt->BT_CAP_retries = BT_CAP[7];
- } else
- printk(KERN_WARNING "IPMI BT: using default values\n");
- if (!bt->BT_CAP_outreqs)
- bt->BT_CAP_outreqs = 1;
- printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
- bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
- bt->timeout = bt->BT_CAP_req2rsp;
- return SI_SM_CALL_WITHOUT_DELAY;
-
default: /* should never occur */
return error_recovery(bt,
status,
@@ -655,6 +612,11 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
static int bt_detect(struct si_sm_data *bt)
{
+ unsigned char GetBT_CAP[] = { 0x18, 0x36 };
+ unsigned char BT_CAP[8];
+ enum si_sm_result smi_result;
+ int rv;
+
/*
* It's impossible for the BT status and interrupt registers to be
* all 1's, (assuming a properly functioning, self-initialized BMC)
@@ -665,6 +627,48 @@ static int bt_detect(struct si_sm_data *bt)
if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
return 1;
reset_flags(bt);
+
+ /*
+ * Try getting the BT capabilities here.
+ */
+ rv = bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP));
+ if (rv) {
+ dev_warn(bt->io->dev,
+ "Can't start capabilities transaction: %d\n", rv);
+ goto out_no_bt_cap;
+ }
+
+ smi_result = SI_SM_CALL_WITHOUT_DELAY;
+ for (;;) {
+ if (smi_result == SI_SM_CALL_WITH_DELAY ||
+ smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
+ schedule_timeout_uninterruptible(1);
+ smi_result = bt_event(bt, jiffies_to_usecs(1));
+ } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
+ smi_result = bt_event(bt, 0);
+ } else
+ break;
+ }
+
+ rv = bt_get_result(bt, BT_CAP, sizeof(BT_CAP));
+ bt_init_data(bt, bt->io);
+ if (rv < 8) {
+ dev_warn(bt->io->dev, "bt cap response too short: %d\n", rv);
+ goto out_no_bt_cap;
+ }
+
+ if (BT_CAP[2]) {
+ dev_warn(bt->io->dev, "Error fetching bt cap: %x\n", BT_CAP[2]);
+out_no_bt_cap:
+ dev_warn(bt->io->dev, "using default values\n");
+ } else {
+ bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
+ bt->BT_CAP_retries = BT_CAP[7];
+ }
+
+ dev_info(bt->io->dev, "req2rsp=%ld secs retries=%d\n",
+ bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
+
return 0;
}
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 51832b8a2c62..7fc9612070a1 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3381,39 +3381,45 @@ int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
rv = handlers->start_processing(send_info, intf);
if (rv)
- goto out;
+ goto out_err;
rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
if (rv) {
dev_err(si_dev, "Unable to get the device id: %d\n", rv);
- goto out;
+ goto out_err_started;
}
mutex_lock(&intf->bmc_reg_mutex);
rv = __scan_channels(intf, &id);
mutex_unlock(&intf->bmc_reg_mutex);
+ if (rv)
+ goto out_err_bmc_reg;
- out:
- if (rv) {
- ipmi_bmc_unregister(intf);
- list_del_rcu(&intf->link);
- mutex_unlock(&ipmi_interfaces_mutex);
- synchronize_srcu(&ipmi_interfaces_srcu);
- cleanup_srcu_struct(&intf->users_srcu);
- kref_put(&intf->refcount, intf_free);
- } else {
- /*
- * Keep memory order straight for RCU readers. Make
- * sure everything else is committed to memory before
- * setting intf_num to mark the interface valid.
- */
- smp_wmb();
- intf->intf_num = i;
- mutex_unlock(&ipmi_interfaces_mutex);
+ /*
+ * Keep memory order straight for RCU readers. Make
+ * sure everything else is committed to memory before
+ * setting intf_num to mark the interface valid.
+ */
+ smp_wmb();
+ intf->intf_num = i;
+ mutex_unlock(&ipmi_interfaces_mutex);
- /* After this point the interface is legal to use. */
- call_smi_watchers(i, intf->si_dev);
- }
+ /* After this point the interface is legal to use. */
+ call_smi_watchers(i, intf->si_dev);
+
+ return 0;
+
+ out_err_bmc_reg:
+ ipmi_bmc_unregister(intf);
+ out_err_started:
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
+ out_err:
+ list_del_rcu(&intf->link);
+ mutex_unlock(&ipmi_interfaces_mutex);
+ synchronize_srcu(&ipmi_interfaces_srcu);
+ cleanup_srcu_struct(&intf->users_srcu);
+ kref_put(&intf->refcount, intf_free);
return rv;
}
@@ -3504,7 +3510,8 @@ void ipmi_unregister_smi(struct ipmi_smi *intf)
}
srcu_read_unlock(&intf->users_srcu, index);
- intf->handlers->shutdown(intf->send_info);
+ if (intf->handlers->shutdown)
+ intf->handlers->shutdown(intf->send_info);
cleanup_smi_msgs(intf);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 90ec010bffbd..5faa917df1b6 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2083,18 +2083,9 @@ static int try_smi_init(struct smi_info *new_smi)
si_to_str[new_smi->io.si_type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
- kfree(init_name);
-
- return 0;
-
-out_err:
- if (new_smi->intf) {
- ipmi_unregister_smi(new_smi->intf);
- new_smi->intf = NULL;
- }
+ out_err:
kfree(init_name);
-
return rv;
}
@@ -2227,6 +2218,8 @@ static void shutdown_smi(void *send_info)
kfree(smi_info->si_sm);
smi_info->si_sm = NULL;
+
+ smi_info->intf = NULL;
}
/*
@@ -2240,10 +2233,8 @@ static void cleanup_one_si(struct smi_info *smi_info)
list_del(&smi_info->link);
- if (smi_info->intf) {
+ if (smi_info->intf)
ipmi_unregister_smi(smi_info->intf);
- smi_info->intf = NULL;
- }
if (smi_info->pdev) {
if (smi_info->pdev_registered)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 18e4650c233b..29e67a80fb20 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -181,6 +181,8 @@ struct ssif_addr_info {
struct device *dev;
struct i2c_client *client;
+ struct i2c_client *added_client;
+
struct mutex clients_mutex;
struct list_head clients;
@@ -1214,18 +1216,11 @@ static void shutdown_ssif(void *send_info)
complete(&ssif_info->wake_thread);
kthread_stop(ssif_info->thread);
}
-
- /*
- * No message can be outstanding now, we have removed the
- * upper layer and it permitted us to do so.
- */
- kfree(ssif_info);
}
static int ssif_remove(struct i2c_client *client)
{
struct ssif_info *ssif_info = i2c_get_clientdata(client);
- struct ipmi_smi *intf;
struct ssif_addr_info *addr_info;
if (!ssif_info)
@@ -1235,9 +1230,7 @@ static int ssif_remove(struct i2c_client *client)
* After this point, we won't deliver anything asychronously
* to the message handler. We can unregister ourself.
*/
- intf = ssif_info->intf;
- ssif_info->intf = NULL;
- ipmi_unregister_smi(intf);
+ ipmi_unregister_smi(ssif_info->intf);
list_for_each_entry(addr_info, &ssif_infos, link) {
if (addr_info->client == client) {
@@ -1246,6 +1239,8 @@ static int ssif_remove(struct i2c_client *client)
}
}
+ kfree(ssif_info);
+
return 0;
}
@@ -1648,15 +1643,9 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
out:
if (rv) {
- /*
- * Note that if addr_info->client is assigned, we
- * leave it. The i2c client hangs around even if we
- * return a failure here, and the failure here is not
- * propagated back to the i2c code. This seems to be
- * design intent, strange as it may be. But if we
- * don't leave it, ssif_platform_remove will not remove
- * the client like it should.
- */
+ if (addr_info)
+ addr_info->client = NULL;
+
dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv);
kfree(ssif_info);
}
@@ -1676,7 +1665,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque)
if (adev->type != &i2c_adapter_type)
return 0;
- i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo);
+ addr_info->added_client = i2c_new_device(to_i2c_adapter(adev),
+ &addr_info->binfo);
if (!addr_info->adapter_name)
return 1; /* Only try the first I2C adapter by default. */
@@ -1849,7 +1839,7 @@ static int ssif_platform_remove(struct platform_device *dev)
return 0;
mutex_lock(&ssif_infos_mutex);
- i2c_unregister_device(addr_info->client);
+ i2c_unregister_device(addr_info->added_client);
list_del(&addr_info->link);
kfree(addr_info);
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index bb882ab161fe..e6124bd548df 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -16,6 +16,8 @@
#include "kcs_bmc.h"
+#define DEVICE_NAME "ipmi-kcs"
+
#define KCS_MSG_BUFSIZ 1000
#define KCS_ZERO_DATA 0
@@ -429,8 +431,6 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
if (!kcs_bmc)
return NULL;
- dev_set_name(dev, "ipmi-kcs%u", channel);
-
spin_lock_init(&kcs_bmc->lock);
kcs_bmc->channel = channel;
@@ -444,7 +444,8 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
return NULL;
kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
- kcs_bmc->miscdev.name = dev_name(dev);
+ kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
+ DEVICE_NAME, channel);
kcs_bmc->miscdev.fops = &kcs_bmc_fops;
return kcs_bmc;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index bf5f99fc36f1..c75b6cdf0053 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -779,6 +779,13 @@ static struct crng_state **crng_node_pool __read_mostly;
static void invalidate_batched_entropy(void);
+static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
+static int __init parse_trust_cpu(char *arg)
+{
+ return kstrtobool(arg, &trust_cpu);
+}
+early_param("random.trust_cpu", parse_trust_cpu);
+
static void crng_initialize(struct crng_state *crng)
{
int i;
@@ -799,12 +806,10 @@ static void crng_initialize(struct crng_state *crng)
}
crng->state[i] ^= rv;
}
-#ifdef CONFIG_RANDOM_TRUST_CPU
- if (arch_init) {
+ if (trust_cpu && arch_init) {
crng_init = 2;
pr_notice("random: crng done (trusting CPU's manufacturer)\n");
}
-#endif
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index ffa5dac221e4..129ebd2588fd 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -1434,8 +1434,16 @@ static void __init sun4i_ccu_init(struct device_node *node,
return;
}
- /* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN4I_PLL_AUDIO_REG);
+
+ /*
+ * Force VCO and PLL bias current to lowest setting. Higher
+ * settings interfere with sigma-delta modulation and result
+ * in audible noise and distortions when using SPDIF or I2S.
+ */
+ val &= ~GENMASK(25, 16);
+
+ /* Force the PLL-Audio-1x divider to 1 */
val &= ~GENMASK(29, 26);
writel(val | (1 << 26), reg + SUN4I_PLL_AUDIO_REG);
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 08ef69945ffb..d977193842df 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -55,6 +55,7 @@ struct clk_plt_data {
u8 nparents;
struct clk_plt *clks[PMC_CLK_NUM];
struct clk_lookup *mclk_lookup;
+ struct clk_lookup *ether_clk_lookup;
};
/* Return an index in parent table */
@@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
spin_lock_init(&pclk->lock);
- /*
- * If the clock was already enabled by the firmware mark it as critical
- * to avoid it being gated by the clock framework if no driver owns it.
- */
- if (plt_clk_is_enabled(&pclk->hw))
- init.flags |= CLK_IS_CRITICAL;
-
ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
if (ret) {
pclk = ERR_PTR(ret);
@@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev)
goto err_unreg_clk_plt;
}
+ data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
+ "ether_clk", NULL);
+ if (!data->ether_clk_lookup) {
+ err = -ENOMEM;
+ goto err_drop_mclk;
+ }
+
plt_clk_free_parent_names_loop(parent_names, data->nparents);
platform_set_drvdata(pdev, data);
return 0;
+err_drop_mclk:
+ clkdev_drop(data->mclk_lookup);
err_unreg_clk_plt:
plt_clk_unregister_loop(data, i);
plt_clk_unregister_parents(data);
@@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev)
data = platform_get_drvdata(pdev);
+ clkdev_drop(data->ether_clk_lookup);
clkdev_drop(data->mclk_lookup);
plt_clk_unregister_loop(data, PMC_CLK_NUM);
plt_clk_unregister_parents(data);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index d8c7f5750cdb..9a7d4dc00b6e 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -319,6 +319,13 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
}
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+static u64 notrace arm64_1188873_read_cntvct_el0(void)
+{
+ return read_sysreg(cntvct_el0);
+}
+#endif
+
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
@@ -408,6 +415,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.read_cntvct_el0 = arm64_858921_read_cntvct_el0,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1188873
+ {
+ .match_type = ate_match_local_cap_id,
+ .id = (void *)ARM64_WORKAROUND_1188873,
+ .desc = "ARM erratum 1188873",
+ .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
+ },
+#endif
};
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index ec8a4376f74f..2fab18fae4fc 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
data->base = of_iomap(node, 0);
if (!data->base) {
pr_err("Could not map PIT address\n");
- return -ENXIO;
+ ret = -ENXIO;
+ goto exit;
}
data->mck = of_clk_get(node, 0);
if (IS_ERR(data->mck)) {
pr_err("Unable to get mck clk\n");
- return PTR_ERR(data->mck);
+ ret = PTR_ERR(data->mck);
+ goto exit;
}
ret = clk_prepare_enable(data->mck);
if (ret) {
pr_err("Unable to enable mck\n");
- return ret;
+ goto exit;
}
/* Get the interrupts property */
data->irq = irq_of_parse_and_map(node, 0);
if (!data->irq) {
pr_err("Unable to get IRQ from DT\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit;
}
/*
@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
ret = clocksource_register_hz(&data->clksrc, pit_rate);
if (ret) {
pr_err("Failed to register clocksource\n");
- return ret;
+ goto exit;
}
/* Set up irq handler */
@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
"at91_tick", data);
if (ret) {
pr_err("Unable to setup IRQ\n");
- return ret;
+ clocksource_unregister(&data->clksrc);
+ goto exit;
}
/* Set up and register clockevents */
@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
clockevents_register_device(&data->clkevt);
return 0;
+
+exit:
+ kfree(data);
+ return ret;
}
TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
at91sam926x_pit_dt_init);
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
index c020038ebfab..cf93f6419b51 100644
--- a/drivers/clocksource/timer-fttmr010.c
+++ b/drivers/clocksource/timer-fttmr010.c
@@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
cr &= ~fttmr010->t1_enable_val;
writel(cr, fttmr010->base + TIMER_CR);
- /* Setup the match register forward/backward in time */
- cr = readl(fttmr010->base + TIMER1_COUNT);
- if (fttmr010->count_down)
- cr -= cycles;
- else
- cr += cycles;
- writel(cr, fttmr010->base + TIMER1_MATCH1);
+ if (fttmr010->count_down) {
+ /*
+ * ASPEED Timer Controller will load TIMER1_LOAD register
+ * into TIMER1_COUNT register when the timer is re-enabled.
+ */
+ writel(cycles, fttmr010->base + TIMER1_LOAD);
+ } else {
+ /* Setup the match register forward in time */
+ cr = readl(fttmr010->base + TIMER1_COUNT);
+ writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
+ }
/* Start */
cr = readl(fttmr010->base + TIMER_CR);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 29e2e1a78a43..6949a9113dbb 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
return -ENXIO;
}
+ if (!of_machine_is_compatible("ti,am43"))
+ ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
ti_32k_timer.counter = ti_32k_timer.base;
/*
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index a1830fa25fc5..2a3675c24032 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -44,7 +44,7 @@ enum _msm8996_version {
struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
-static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
+static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
{
size_t len;
u32 *msm_id;
@@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
}
module_init(qcom_cpufreq_kryo_init);
-static void __init qcom_cpufreq_kryo_exit(void)
+static void __exit qcom_cpufreq_kryo_exit(void)
{
platform_device_unregister(kryo_cpufreq_pdev);
platform_driver_unregister(&qcom_cpufreq_kryo_driver);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index d67667970f7e..ec40f991e6c6 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
- desc_bytes;
+ edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+ desc_bytes);
edesc->iv_dir = DMA_TO_DEVICE;
/* Make sure IV is located in a DMAable area */
@@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
- desc_bytes;
+ edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
+ desc_bytes);
edesc->iv_dir = DMA_FROM_DEVICE;
/* Make sure IV is located in a DMAable area */
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 218739b961fe..72790d88236d 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex);
static struct sev_misc_dev *misc_dev;
static struct psp_device *psp_master;
+static int psp_cmd_timeout = 100;
+module_param(psp_cmd_timeout, int, 0644);
+MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
+
+static int psp_probe_timeout = 5;
+module_param(psp_probe_timeout, int, 0644);
+MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
+
+static bool psp_dead;
+static int psp_timeout;
+
static struct psp_device *psp_alloc_struct(struct sp_device *sp)
{
struct device *dev = sp->dev;
@@ -82,10 +93,19 @@ done:
return IRQ_HANDLED;
}
-static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
+static int sev_wait_cmd_ioc(struct psp_device *psp,
+ unsigned int *reg, unsigned int timeout)
{
- wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
+ int ret;
+
+ ret = wait_event_timeout(psp->sev_int_queue,
+ psp->sev_int_rcvd, timeout * HZ);
+ if (!ret)
+ return -ETIMEDOUT;
+
*reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
+
+ return 0;
}
static int sev_cmd_buffer_len(int cmd)
@@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
if (!psp)
return -ENODEV;
+ if (psp_dead)
+ return -EBUSY;
+
/* Get the physical address of the command buffer */
phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
- dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n",
- cmd, phys_msb, phys_lsb);
+ dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
+ cmd, phys_msb, phys_lsb, psp_timeout);
print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
sev_cmd_buffer_len(cmd), false);
@@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
/* wait for command completion */
- sev_wait_cmd_ioc(psp, &reg);
+ ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
+ if (ret) {
+ if (psp_ret)
+ *psp_ret = 0;
+
+ dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
+ psp_dead = true;
+
+ return ret;
+ }
+
+ psp_timeout = psp_cmd_timeout;
if (psp_ret)
*psp_ret = reg & PSP_CMDRESP_ERR_MASK;
@@ -888,6 +922,8 @@ void psp_pci_init(void)
psp_master = sp->psp_data;
+ psp_timeout = psp_probe_timeout;
+
if (sev_get_api_version())
goto err;
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 5c539af8ed60..010bbf607797 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
walk->to = (struct phys_sge_pairs *)(dsgl + 1);
}
-static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
+static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
+ int pci_chan_id)
{
struct cpl_rx_phys_dsgl *phys_cpl;
@@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
phys_cpl->rss_hdr_int.qid = htons(qid);
phys_cpl->rss_hdr_int.hash_val = 0;
+ phys_cpl->rss_hdr_int.channel = pci_chan_id;
}
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
@@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
!!lcb, ctx->tx_qidx);
- chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
qid);
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
((sizeof(chcr_req->wreq)) >> 4)));
@@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
adap->vres.ncrypto_fc);
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
- rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
- rxq_idx += id % rxq_perchan;
- txq_idx = ctx->dev->tx_channel_id * txq_perchan;
- txq_idx += id % txq_perchan;
spin_lock(&ctx->dev->lock_chcr_dev);
- ctx->rx_qidx = rxq_idx;
- ctx->tx_qidx = txq_idx;
+ ctx->tx_chan_id = ctx->dev->tx_channel_id;
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
ctx->dev->rx_channel_id = 0;
spin_unlock(&ctx->dev->lock_chcr_dev);
+ rxq_idx = ctx->tx_chan_id * rxq_perchan;
+ rxq_idx += id % rxq_perchan;
+ txq_idx = ctx->tx_chan_id * txq_perchan;
+ txq_idx += id % txq_perchan;
+ ctx->rx_qidx = rxq_idx;
+ ctx->tx_qidx = txq_idx;
+ /* Channel Id used by SGE to forward packet to Host.
+ * Same value should be used in cpl_fw6_pld RSS_CH field
+ * by FW. Driver programs PCI channel ID to be used in fw
+ * at the time of queue allocation with value "pi->tx_chan"
+ */
+ ctx->pci_chan_id = txq_idx / txq_perchan;
}
out:
return err;
@@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct dsgl_walk dsgl_walk;
unsigned int authsize = crypto_aead_authsize(tfm);
+ struct chcr_context *ctx = a_ctx(tfm);
u32 temp;
dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
- dsgl_walk_end(&dsgl_walk, qid);
+ dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
@@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
unsigned short qid)
{
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
+ struct chcr_context *ctx = c_ctx(tfm);
struct dsgl_walk dsgl_walk;
dsgl_walk_init(&dsgl_walk, phys_cpl);
@@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
reqctx->dstsg = dsgl_walk.last_sg;
reqctx->dst_ofst = dsgl_walk.last_sg_len;
- dsgl_walk_end(&dsgl_walk, qid);
+ dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
}
void chcr_add_hash_src_ent(struct ahash_request *req,
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 54835cb109e5..0d2c70c344f3 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -255,6 +255,8 @@ struct chcr_context {
struct chcr_dev *dev;
unsigned char tx_qidx;
unsigned char rx_qidx;
+ unsigned char tx_chan_id;
+ unsigned char pci_chan_id;
struct __crypto_ctx crypto_ctx[0];
};
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index 7e71043457a6..86c699c14f84 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -1044,7 +1044,8 @@ static int safexcel_probe(struct platform_device *pdev)
safexcel_configure(priv);
- priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
+ priv->ring = devm_kcalloc(dev, priv->config.rings,
+ sizeof(*priv->ring),
GFP_KERNEL);
if (!priv->ring) {
ret = -ENOMEM;
@@ -1063,8 +1064,9 @@ static int safexcel_probe(struct platform_device *pdev)
if (ret)
goto err_reg_clk;
- priv->ring[i].rdr_req = devm_kzalloc(dev,
- sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
+ priv->ring[i].rdr_req = devm_kcalloc(dev,
+ EIP197_DEFAULT_RING_SIZE,
+ sizeof(priv->ring[i].rdr_req),
GFP_KERNEL);
if (!priv->ring[i].rdr_req) {
ret = -ENOMEM;
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index a10c418d4e5c..56bd28174f52 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -63,7 +63,7 @@ struct dcp {
struct dcp_coherent_block *coh;
struct completion completion[DCP_MAX_CHANS];
- struct mutex mutex[DCP_MAX_CHANS];
+ spinlock_t lock[DCP_MAX_CHANS];
struct task_struct *thread[DCP_MAX_CHANS];
struct crypto_queue queue[DCP_MAX_CHANS];
};
@@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
int ret;
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&sdcp->mutex[chan]);
+ spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]);
- mutex_unlock(&sdcp->mutex[chan]);
+ spin_unlock(&sdcp->lock[chan]);
+
+ if (!backlog && !arq) {
+ schedule();
+ continue;
+ }
+
+ set_current_state(TASK_RUNNING);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
if (arq) {
ret = mxs_dcp_aes_block_crypt(arq);
arq->complete(arq, ret);
- continue;
}
-
- schedule();
- } while (!kthread_should_stop());
+ }
return 0;
}
@@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
rctx->ecb = ecb;
actx->chan = DCP_CHAN_CRYPTO;
- mutex_lock(&sdcp->mutex[actx->chan]);
+ spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
- mutex_unlock(&sdcp->mutex[actx->chan]);
+ spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]);
@@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
struct ahash_request *req;
int ret, fini;
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&sdcp->mutex[chan]);
+ spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]);
- mutex_unlock(&sdcp->mutex[chan]);
+ spin_unlock(&sdcp->lock[chan]);
+
+ if (!backlog && !arq) {
+ schedule();
+ continue;
+ }
+
+ set_current_state(TASK_RUNNING);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
ret = dcp_sha_req_to_buf(arq);
fini = rctx->fini;
arq->complete(arq, ret);
- if (!fini)
- continue;
}
-
- schedule();
- } while (!kthread_should_stop());
+ }
return 0;
}
@@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
rctx->init = 1;
}
- mutex_lock(&sdcp->mutex[actx->chan]);
+ spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
- mutex_unlock(&sdcp->mutex[actx->chan]);
+ spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]);
mutex_unlock(&actx->mutex);
@@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sdcp);
for (i = 0; i < DCP_MAX_CHANS; i++) {
- mutex_init(&sdcp->mutex[i]);
+ spin_lock_init(&sdcp->lock[i]);
init_completion(&sdcp->completion[i]);
crypto_init_queue(&sdcp->queue[i], 50);
}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index ba197f34c252..763c2166ee0e 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C3XXX_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 24ec908eb26c..613c7d5644ce 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C3XXXIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 59a5a0df50b6..9cb832963357 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C62X_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index b9f3e0e4fde9..278452b8ef81 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C62XIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index be5c5a988ca5..3a9708ef4ce2 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 26ab17bfc6da..3da0f951cb59 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index bbe4d72ca105..948806e57cee 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
}
+static const struct address_space_operations dev_dax_aops = {
+ .set_page_dirty = noop_set_page_dirty,
+ .invalidatepage = noop_invalidatepage,
+};
+
static int dax_open(struct inode *inode, struct file *filp)
{
struct dax_device *dax_dev = inode_dax(inode);
@@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp)
dev_dbg(&dev_dax->dev, "trace\n");
inode->i_mapping = __dax_inode->i_mapping;
inode->i_mapping->host = __dax_inode;
+ inode->i_mapping->a_ops = &dev_dax_aops;
filp->f_mapping = inode->i_mapping;
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
filp->private_data = dev_dax;
diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c
index b76cb17d879c..adfd316db1a8 100644
--- a/drivers/dma/mic_x100_dma.c
+++ b/drivers/dma/mic_x100_dma.c
@@ -639,7 +639,7 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
int ret;
struct device *dev = &mbdev->dev;
- mic_dma_dev = kzalloc(sizeof(*mic_dma_dev), GFP_KERNEL);
+ mic_dma_dev = devm_kzalloc(dev, sizeof(*mic_dma_dev), GFP_KERNEL);
if (!mic_dma_dev) {
ret = -ENOMEM;
goto alloc_error;
@@ -664,7 +664,6 @@ static struct mic_dma_device *mic_dma_dev_reg(struct mbus_device *mbdev,
reg_error:
mic_dma_uninit(mic_dma_dev);
init_error:
- kfree(mic_dma_dev);
mic_dma_dev = NULL;
alloc_error:
dev_err(dev, "Error at %s %d ret=%d\n", __func__, __LINE__, ret);
@@ -674,7 +673,6 @@ alloc_error:
static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev)
{
mic_dma_uninit(mic_dma_dev);
- kfree(mic_dma_dev);
}
/* DEBUGFS CODE */
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 721e6c57beae..64342944d917 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -166,7 +166,13 @@ scmi_perf_domain_attributes_get(const struct scmi_handle *handle, u32 domain,
le32_to_cpu(attr->sustained_freq_khz);
dom_info->sustained_perf_level =
le32_to_cpu(attr->sustained_perf_level);
- dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) /
+ if (!dom_info->sustained_freq_khz ||
+ !dom_info->sustained_perf_level)
+ /* CPUFreq converts to kHz, hence default 1000 */
+ dom_info->mult_factor = 1000;
+ else
+ dom_info->mult_factor =
+ (dom_info->sustained_freq_khz * 1000) /
dom_info->sustained_perf_level;
memcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE);
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index d8e159feb573..89110dfc7127 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -90,14 +90,17 @@ config EFI_ARMSTUB
config EFI_ARMSTUB_DTB_LOADER
bool "Enable the DTB loader"
depends on EFI_ARMSTUB
+ default y
help
Select this config option to add support for the dtb= command
line parameter, allowing a device tree blob to be loaded into
memory from the EFI System Partition by the stub.
- The device tree is typically provided by the platform or by
- the bootloader, so this option is mostly for development
- purposes only.
+ If the device tree is provided by the platform or by
+ the bootloader this option may not be needed.
+ But, for various development reasons and to maintain existing
+ functionality for bootloaders that do not have such support
+ this option is necessary.
config EFI_BOOTLOADER_CONTROL
tristate "EFI Bootloader Control"
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index fc9fd2d0482f..0b840531ef33 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -420,7 +420,7 @@ static int pr_mgmt_init(struct platform_device *pdev,
/* Create region for each port */
fme_region = dfl_fme_create_region(pdata, mgr,
fme_br->br, i);
- if (!fme_region) {
+ if (IS_ERR(fme_region)) {
ret = PTR_ERR(fme_region);
goto destroy_region;
}
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 0b7e19c27c6d..51a5ac2293a7 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -14,6 +14,7 @@
*/
#include <linux/module.h>
+#include <linux/fpga/fpga-mgr.h>
#include <linux/fpga/fpga-region.h>
#include "dfl-fme-pr.h"
@@ -66,9 +67,10 @@ eprobe_mgr_put:
static int fme_region_remove(struct platform_device *pdev)
{
struct fpga_region *region = dev_get_drvdata(&pdev->dev);
+ struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region);
- fpga_mgr_put(region->mgr);
+ fpga_mgr_put(mgr);
return 0;
}
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 24b8f98b73ec..c983dac97501 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
*
* Given a device, get an exclusive reference to a fpga bridge.
*
- * Return: fpga manager struct or IS_ERR() condition containing error code.
+ * Return: fpga bridge struct or IS_ERR() condition containing error code.
*/
struct fpga_bridge *fpga_bridge_get(struct device *dev,
struct fpga_image_info *info)
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 35fabb8083fb..052a1342ab7e 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -437,9 +437,10 @@ eprobe_mgr_put:
static int of_fpga_region_remove(struct platform_device *pdev)
{
struct fpga_region *region = platform_get_drvdata(pdev);
+ struct fpga_manager *mgr = region->mgr;
fpga_region_unregister(region);
- fpga_mgr_put(region->mgr);
+ fpga_mgr_put(mgr);
return 0;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e8f8a1999393..25187403e3ac 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -571,7 +571,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
if (ret)
goto out_free_descs;
lh->descs[i] = desc;
- count = i;
+ count = i + 1;
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
@@ -1682,7 +1682,8 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gpiochip,
irq_set_chained_handler_and_data(parent_irq, parent_handler,
gpiochip);
- gpiochip->irq.parents = &parent_irq;
+ gpiochip->irq.parent_irq = parent_irq;
+ gpiochip->irq.parents = &gpiochip->irq.parent_irq;
gpiochip->irq.num_parents = 1;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index f8bbbb3a9504..0c791e35acf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr)
+ void **cpu_ptr, bool mqd_gfx9)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
@@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
+
+ if (mqd_gfx9)
+ bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
+
r = amdgpu_bo_create(adev, &bp, &bo);
if (r) {
dev_err(adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 2f379c183ed2..cc9aeab5468c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
/* Shared API */
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr);
+ void **cpu_ptr, bool mqd_gfx9);
void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
void get_local_mem_info(struct kgd_dev *kgd,
struct kfd_local_mem_info *mem_info);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ea3f698aef5e..9803b91f3e77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (time_after(jiffies, end_jiffies))
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 693ec5ea4950..8816c697b205 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) {
- if ((adev->pdev->device == 0x67df) &&
- ((adev->pdev->revision == 0xe0) ||
- (adev->pdev->revision == 0xe3) ||
- (adev->pdev->revision == 0xe4) ||
- (adev->pdev->revision == 0xe5) ||
- (adev->pdev->revision == 0xe7) ||
+ if (((adev->pdev->device == 0x67df) &&
+ ((adev->pdev->revision == 0xe0) ||
+ (adev->pdev->revision == 0xe3) ||
+ (adev->pdev->revision == 0xe4) ||
+ (adev->pdev->revision == 0xe5) ||
+ (adev->pdev->revision == 0xe7) ||
+ (adev->pdev->revision == 0xef))) ||
+ ((adev->pdev->device == 0x6fdf) &&
(adev->pdev->revision == 0xef))) {
info->is_kicker = true;
strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index b6e9df11115d..b31d121a876b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -39,6 +39,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
{
struct drm_gem_object *gobj;
unsigned long size;
+ int r;
gobj = drm_gem_object_lookup(p->filp, data->handle);
if (gobj == NULL)
@@ -50,20 +51,26 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p->uf_entry.tv.shared = true;
p->uf_entry.user_pages = NULL;
- size = amdgpu_bo_size(p->uf_entry.robj);
- if (size != PAGE_SIZE || (data->offset + 8) > size)
- return -EINVAL;
-
- *offset = data->offset;
-
drm_gem_object_put_unlocked(gobj);
+ size = amdgpu_bo_size(p->uf_entry.robj);
+ if (size != PAGE_SIZE || (data->offset + 8) > size) {
+ r = -EINVAL;
+ goto error_unref;
+ }
+
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
- amdgpu_bo_unref(&p->uf_entry.robj);
- return -EINVAL;
+ r = -EINVAL;
+ goto error_unref;
}
+ *offset = data->offset;
+
return 0;
+
+error_unref:
+ amdgpu_bo_unref(&p->uf_entry.robj);
+ return r;
}
static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
@@ -1262,10 +1269,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
error_abort:
dma_fence_put(&job->base.s_fence->finished);
job->base.s_fence = NULL;
+ amdgpu_mn_unlock(p->mn);
error_unlock:
amdgpu_job_free(job);
- amdgpu_mn_unlock(p->mn);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 8ab5ccbc14ac..39bf2ce548c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2063,6 +2063,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_COMMON,
+ AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_IH,
};
@@ -2093,7 +2094,6 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
static enum amd_ip_block_type ip_order[] = {
AMD_IP_BLOCK_TYPE_SMC,
- AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_DCE,
AMD_IP_BLOCK_TYPE_GFX,
AMD_IP_BLOCK_TYPE_SDMA,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8843a06360fa..0f41d8647376 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
/* Polaris12 */
{0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0cc5190f4f36..5f3f54073818 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
{
int i;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
if (adev->vce.vcpu_bo == NULL)
return 0;
@@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES)
return 0;
- cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index fd654a4406db..400fc74bbae2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
unsigned size;
void *ptr;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->vcn.vcpu_bo == NULL)
return 0;
- cancel_delayed_work_sync(&adev->vcn.idle_work);
-
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
ptr = adev->vcn.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e7ca4623cfb9..7c3b634d8d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -70,6 +70,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
@@ -81,7 +82,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
@@ -109,7 +111,8 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0)
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
};
static const struct soc15_reg_golden golden_settings_sdma_4_2[] =
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 1b048715ab8a..29ac74f40dce 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->init_gtt_mem_allocation(
kfd->kgd, size, &kfd->gtt_mem,
- &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){
+ &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
+ false)) {
dev_err(kfd_device, "Could not allocate %d bytes\n", size);
goto out;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index ec0d62a16e53..4f22e745df51 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
{
- int retval;
struct mqd_manager *mqd_mgr;
+ int retval;
mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
if (!mqd_mgr)
@@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (!q->properties.is_active)
return 0;
- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
- &q->properties, q->process->mm);
+ if (WARN(q->process->mm != current->mm,
+ "should only run in user thread"))
+ retval = -EFAULT;
+ else
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+ &q->properties, current->mm);
if (retval)
goto out_uninit_mqd;
@@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
- q->properties.type == KFD_QUEUE_TYPE_SDMA))
- retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
- &q->properties, q->process->mm);
+ q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
+ if (WARN(q->process->mm != current->mm,
+ "should only run in user thread"))
+ retval = -EFAULT;
+ else
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
+ q->pipe, q->queue,
+ &q->properties, current->mm);
+ }
out_unlock:
dqm_unlock(dqm);
@@ -653,6 +663,7 @@ out:
static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
+ struct mm_struct *mm = NULL;
struct queue *q;
struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
@@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
kfd_flush_tlb(pdd);
}
+ /* Take a safe reference to the mm_struct, which may otherwise
+ * disappear even while the kfd_process is still referenced.
+ */
+ mm = get_task_mm(pdd->process->lead_thread);
+ if (!mm) {
+ retval = -EFAULT;
+ goto out;
+ }
+
/* activate all active queues on the qpd */
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_evicted)
@@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
q->properties.is_evicted = false;
q->properties.is_active = true;
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
- q->queue, &q->properties,
- q->process->mm);
+ q->queue, &q->properties, mm);
if (retval)
goto out;
dqm->queue_count++;
}
qpd->evicted = 0;
out:
+ if (mm)
+ mmput(mm);
dqm_unlock(dqm);
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 7a61f38c09e6..01494752c36a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
struct amd_iommu_device_info iommu_info;
unsigned int pasid_limit;
int err;
+ struct kfd_topology_device *top_dev;
- if (!kfd->device_info->needs_iommu_device)
+ top_dev = kfd_topology_device_by_id(kfd->id);
+
+ /*
+ * Overwrite ATS capability according to needs_iommu_device to fix
+ * potential missing corresponding bit in CRAT of BIOS.
+ */
+ if (!kfd->device_info->needs_iommu_device) {
+ top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
return 0;
+ }
+
+ top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
iommu_info.flags = 0;
err = amd_iommu_device_info(kfd->pdev, &iommu_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index f5fc3675f21e..0cedb37cf513 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
&((*mqd_mem_obj)->gtt_mem),
&((*mqd_mem_obj)->gpu_addr),
- (void *)&((*mqd_mem_obj)->cpu_ptr));
+ (void *)&((*mqd_mem_obj)->cpu_ptr), true);
} else
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f971710f1c91..92b285ca73aa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu);
int kfd_topology_remove_device(struct kfd_dev *gpu);
struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
uint32_t proximity_domain);
+struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index bc95d4dfee2e..80f5db4ef75f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
return device;
}
-struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
{
- struct kfd_topology_device *top_dev;
- struct kfd_dev *device = NULL;
+ struct kfd_topology_device *top_dev = NULL;
+ struct kfd_topology_device *ret = NULL;
down_read(&topology_lock);
list_for_each_entry(top_dev, &topology_device_list, list)
if (top_dev->gpu_id == gpu_id) {
- device = top_dev->gpu;
+ ret = top_dev;
break;
}
up_read(&topology_lock);
- return device;
+ return ret;
+}
+
+struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
+{
+ struct kfd_topology_device *top_dev;
+
+ top_dev = kfd_topology_device_by_id(gpu_id);
+ if (!top_dev)
+ return NULL;
+
+ return top_dev->gpu;
}
struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 800f481a6995..6903fe6c894b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
return NULL;
}
+static void emulated_link_detect(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct dc_sink *sink = NULL;
+ struct dc_sink *prev_sink = NULL;
+
+ link->type = dc_connection_none;
+ prev_sink = link->local_sink;
+
+ if (prev_sink != NULL)
+ dc_sink_retain(prev_sink);
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_LVDS: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_LVDS;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+ break;
+ }
+
+ default:
+ DC_ERROR("Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ return;
+ }
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DC_ERROR("Failed to create sink!\n");
+ return;
+ }
+
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(
+ link->ctx,
+ link,
+ sink);
+
+ if (edid_status != EDID_OK)
+ DC_ERROR("Failed to read EDID");
+
+}
+
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -654,6 +735,7 @@ static int dm_resume(void *handle)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
+ enum dc_connection_type new_connection_type = dc_connection_none;
int ret;
int i;
@@ -684,7 +766,13 @@ static int dm_resume(void *handle)
continue;
mutex_lock(&aconnector->hpd_lock);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none)
+ emulated_link_detect(aconnector->dc_link);
+ else
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false;
@@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param)
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
/* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in it's own context.
@@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param)
if (aconnector->fake_enable)
aconnector->fake_enable = false;
- if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
+ if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_hotplug_event(dev);
+
+ } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
amdgpu_dm_update_connector_after_detect(aconnector);
@@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param)
struct drm_device *dev = connector->dev;
struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+ enum dc_connection_type new_connection_type = dc_connection_none;
/* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be
@@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param)
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
!is_mst_root_connector) {
/* Downstream Port status changed. */
- if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
+ if (!dc_link_detect_sink(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(dc_link);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_hotplug_event(dev);
+ } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
if (aconnector->fake_enable)
aconnector->fake_enable = false;
@@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
int32_t total_overlay_planes, total_primary_planes;
+ enum dc_connection_type new_connection_type = dc_connection_none;
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link = dc_get_link_at_index(dm->dc, i);
- if (dc_link_detect(link, DETECT_REASON_BOOT)) {
+ if (!dc_link_detect_sink(link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(link);
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
}
@@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (dm_state && dm_state->freesync_capable)
stream->ignore_msa_timing_param = true;
finish:
- if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
+ if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
dc_sink_release(sink);
return stream;
@@ -4504,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
}
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- /* Signal HW programming completion */
- drm_atomic_helper_commit_hw_done(state);
if (wait_for_vblank)
drm_atomic_helper_wait_for_flip_done(dev, state);
+ /*
+ * FIXME:
+ * Delay hw_done() until flip_done() is signaled. This is to block
+ * another commit from freeing the CRTC state while we're still
+ * waiting on flip_done.
+ */
+ drm_atomic_helper_commit_hw_done(state);
+
drm_atomic_helper_cleanup_planes(dev, state);
/* Finally, drop a runtime PM reference for each newly disabled CRTC,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 37eaf72ace54..fced3c1c2ef5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -195,7 +195,7 @@ static bool program_hpd_filter(
return result;
}
-static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
{
uint32_t is_hpd_high = 0;
struct gpio *hpd_pin;
@@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
return false;
- if (false == detect_sink(link, &new_connection_type)) {
+ if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index d43cefbc43d3..1b48ab9aea89 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
bool dc_link_is_dp_sink_present(struct dc_link *link);
+bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
/*
* DPCD access interfaces
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 14384d9675a8..b2f308766a9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
}
-void dce110_set_bandwidth(
+static void dce110_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index e4c5db75c4c6..d6db3dbd9015 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
-void dce110_set_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed);
-
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 5853522a6182..eb0f5f9a973b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,17 +244,6 @@ static void dce120_update_dchub(
dh_data->dchub_info_valid = false;
}
-static void dce120_set_bandwidth(
- struct dc *dc,
- struct dc_state *context,
- bool decrease_allowed)
-{
- if (context->stream_count <= 0)
- return;
-
- dce110_set_bandwidth(dc, context, decrease_allowed);
-}
-
void dce120_hw_sequencer_construct(struct dc *dc)
{
/* All registers used by dce11.2 match those in dce11 in offset and
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
- dc->hwss.set_bandwidth = dce120_set_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 14391b06080c..43b82e14007e 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -292,7 +292,7 @@ struct tile_config {
struct kfd2kgd_calls {
int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr);
+ void **cpu_ptr, bool mqd_gfx9);
void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 08b5bb219816..94d6dabec2dc 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev)
drm->irq_enabled = true;
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ drm_crtc_vblank_reset(&malidp->crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail;
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index c94a4422e0e9..2781e462c1ed 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches,
- int num_planes, u16 w, u16 h, u32 fmt_id)
+ int num_planes, u16 w, u16 h, u32 fmt_id,
+ const s16 *rgb2yuv_coeffs)
{
u32 base = MALIDP500_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
MALIDP500_SE_MEMWRITE_OUT_SIZE);
+
+ if (rgb2yuv_coeffs) {
+ int i;
+
+ for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+ malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+ MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
+ }
+ }
+
malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
return 0;
@@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
dma_addr_t *addrs, s32 *pitches,
- int num_planes, u16 w, u16 h, u32 fmt_id)
+ int num_planes, u16 w, u16 h, u32 fmt_id,
+ const s16 *rgb2yuv_coeffs)
{
u32 base = MALIDP550_SE_MEMWRITE_BASE;
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
MALIDP550_SE_CONTROL);
+ if (rgb2yuv_coeffs) {
+ int i;
+
+ for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+ malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
+ MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index ad2e96915d44..9fc94c08190f 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -191,7 +191,8 @@ struct malidp_hw {
* @param fmt_id - internal format ID of output buffer
*/
int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
- s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
+ s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
+ const s16 *rgb2yuv_coeffs);
/*
* Disable the writing to memory of the next frame's content.
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index ba6ae66387c9..91472e5e0c8b 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
s32 pitches[2];
u8 format;
u8 n_planes;
+ bool rgb2yuv_initialized;
+ const s16 *rgb2yuv_coeffs;
};
static int malidp_mw_connector_get_modes(struct drm_connector *connector)
@@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
static struct drm_connector_state *
malidp_mw_connector_duplicate_state(struct drm_connector *connector)
{
- struct malidp_mw_connector_state *mw_state;
+ struct malidp_mw_connector_state *mw_state, *mw_current_state;
if (WARN_ON(!connector->state))
return NULL;
@@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
if (!mw_state)
return NULL;
- /* No need to preserve any of our driver-local data */
+ mw_current_state = to_mw_state(connector->state);
+ mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
+ mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
+
__drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
return &mw_state->base;
@@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
+static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
+ 47, 157, 16,
+ -26, -87, 112,
+ 112, -102, -10,
+ 16, 128, 128
+};
+
static int
malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
}
mw_state->n_planes = n_planes;
+ if (fb->format->is_yuv)
+ mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
+
return 0;
}
@@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
conn_state->writeback_job = NULL;
-
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
mw_state->pitches, mw_state->n_planes,
- fb->width, fb->height, mw_state->format);
+ fb->width, fb->height, mw_state->format,
+ !mw_state->rgb2yuv_initialized ?
+ mw_state->rgb2yuv_coeffs : NULL);
+ mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
} else {
DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
hwdev->hw->disable_memwrite(hwdev);
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 3579d36b2a71..6ffe849774f2 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -205,6 +205,7 @@
#define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c
#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
+#define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74
#define MALIDP500_SE_MEMWRITE_BASE 0x00e00
#define MALIDP500_DC_IRQ_BASE 0x00f00
#define MALIDP500_CONFIG_VALID 0x00f00
@@ -238,6 +239,7 @@
#define MALIDP550_SE_CONTROL 0x08010
#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
+#define MALIDP550_SE_RGB_YUV_COEFFS 0x08078
#define MALIDP550_SE_MEMWRITE_BASE 0x08100
#define MALIDP550_DC_BASE 0x0c000
#define MALIDP550_DC_CONTROL 0x0c010
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3eb061e11e2e..281cf9cbb44c 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -174,6 +174,11 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
state->crtcs[i].state = NULL;
state->crtcs[i].old_state = NULL;
state->crtcs[i].new_state = NULL;
+
+ if (state->crtcs[i].commit) {
+ drm_crtc_commit_put(state->crtcs[i].commit);
+ state->crtcs[i].commit = NULL;
+ }
}
for (i = 0; i < config->num_total_plane; i++) {
@@ -2067,7 +2072,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- if (!drm_core_check_feature(dev, DRIVER_ATOMIC))
+ if (!drm_drv_uses_atomic_modeset(dev))
return;
list_for_each_entry(plane, &config->plane_list, head) {
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 80be74df7ba6..1bb4c318bdd4 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1408,15 +1408,16 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
void drm_atomic_helper_wait_for_flip_done(struct drm_device *dev,
struct drm_atomic_state *old_state)
{
- struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
- for_each_new_crtc_in_state(old_state, crtc, new_crtc_state, i) {
- struct drm_crtc_commit *commit = new_crtc_state->commit;
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct drm_crtc_commit *commit = old_state->crtcs[i].commit;
int ret;
- if (!commit)
+ crtc = old_state->crtcs[i].ptr;
+
+ if (!crtc || !commit)
continue;
ret = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
@@ -1934,6 +1935,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
drm_crtc_commit_get(commit);
commit->abort_completion = true;
+
+ state->crtcs[i].commit = commit;
+ drm_crtc_commit_get(commit);
}
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index baff50a4c234..df31c3815092 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
EXPORT_SYMBOL(drm_client_close);
/**
- * drm_client_new - Create a DRM client
+ * drm_client_init - Initialise a DRM client
* @dev: DRM device
* @client: DRM client
* @name: Client name
* @funcs: DRM client functions (optional)
*
+ * This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
* The caller needs to hold a reference on @dev before calling this function.
* The client is freed when the &drm_device is unregistered. See drm_client_release().
*
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
- const char *name, const struct drm_client_funcs *funcs)
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs)
{
int ret;
@@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
if (ret)
goto err_put_module;
- mutex_lock(&dev->clientlist_mutex);
- list_add(&client->list, &dev->clientlist);
- mutex_unlock(&dev->clientlist_mutex);
-
drm_dev_get(dev);
return 0;
@@ -109,13 +106,33 @@ err_put_module:
return ret;
}
-EXPORT_SYMBOL(drm_client_new);
+EXPORT_SYMBOL(drm_client_init);
+
+/**
+ * drm_client_add - Add client to the device list
+ * @client: DRM client
+ *
+ * Add the client to the &drm_device client list to activate its callbacks.
+ * @client must be initialized by a call to drm_client_init(). After
+ * drm_client_add() it is no longer permissible to call drm_client_release()
+ * directly (outside the unregister callback), instead cleanup will happen
+ * automatically on driver unload.
+ */
+void drm_client_add(struct drm_client_dev *client)
+{
+ struct drm_device *dev = client->dev;
+
+ mutex_lock(&dev->clientlist_mutex);
+ list_add(&client->list, &dev->clientlist);
+ mutex_unlock(&dev->clientlist_mutex);
+}
+EXPORT_SYMBOL(drm_client_add);
/**
* drm_client_release - Release DRM client resources
* @client: DRM client
*
- * Releases resources by closing the &drm_file that was opened by drm_client_new().
+ * Releases resources by closing the &drm_file that was opened by drm_client_init().
* It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
*
* This function should only be called from the unregister callback. An exception
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index bae43938c8f6..9cbe8f5c9aca 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -567,9 +567,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
struct drm_mode_crtc *crtc_req = data;
struct drm_crtc *crtc;
struct drm_plane *plane;
- struct drm_connector **connector_set = NULL, *connector;
- struct drm_framebuffer *fb = NULL;
- struct drm_display_mode *mode = NULL;
+ struct drm_connector **connector_set, *connector;
+ struct drm_framebuffer *fb;
+ struct drm_display_mode *mode;
struct drm_mode_set set;
uint32_t __user *set_connectors_ptr;
struct drm_modeset_acquire_ctx ctx;
@@ -598,6 +598,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
mutex_lock(&crtc->dev->mode_config.mutex);
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
retry:
+ connector_set = NULL;
+ fb = NULL;
+ mode = NULL;
+
ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6f28fe58f169..373bd4c2b698 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
return ret;
}
- if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
+ if (drm_drv_uses_atomic_modeset(dev)) {
ret = drm_atomic_debugfs_init(minor);
if (ret) {
DRM_ERROR("Failed to create atomic debugfs files\n");
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3c9fc99648b7..ff0bfc65a8c1 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -113,6 +113,9 @@ static const struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+ /* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
+ { "BOE", 0x78b, EDID_QUIRK_FORCE_6BPC },
+
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
{ "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
@@ -4279,7 +4282,7 @@ static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK;
- hdmi->y420_dc_modes |= dc_mask;
+ hdmi->y420_dc_modes = dc_mask;
}
static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 9da36a6271d3..9ac1f2e0f064 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
fb_helper = &fbdev_cma->fb_helper;
- ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
if (ret)
goto err_free;
@@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
if (ret)
goto err_client_put;
+ drm_client_add(&fb_helper->client);
+
return fbdev_cma;
err_client_put:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b0dd20bccb8..9628dd617826 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1580,6 +1580,25 @@ unlock:
}
EXPORT_SYMBOL(drm_fb_helper_ioctl);
+static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1,
+ const struct fb_var_screeninfo *var_2)
+{
+ return var_1->bits_per_pixel == var_2->bits_per_pixel &&
+ var_1->grayscale == var_2->grayscale &&
+ var_1->red.offset == var_2->red.offset &&
+ var_1->red.length == var_2->red.length &&
+ var_1->red.msb_right == var_2->red.msb_right &&
+ var_1->green.offset == var_2->green.offset &&
+ var_1->green.length == var_2->green.length &&
+ var_1->green.msb_right == var_2->green.msb_right &&
+ var_1->blue.offset == var_2->blue.offset &&
+ var_1->blue.length == var_2->blue.length &&
+ var_1->blue.msb_right == var_2->blue.msb_right &&
+ var_1->transp.offset == var_2->transp.offset &&
+ var_1->transp.length == var_2->transp.length &&
+ var_1->transp.msb_right == var_2->transp.msb_right;
+}
+
/**
* drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var
* @var: screeninfo to check
@@ -1590,7 +1609,6 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
- int depth;
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
@@ -1610,72 +1628,15 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
- switch (var->bits_per_pixel) {
- case 16:
- depth = (var->green.length == 6) ? 16 : 15;
- break;
- case 32:
- depth = (var->transp.length > 0) ? 32 : 24;
- break;
- default:
- depth = var->bits_per_pixel;
- break;
- }
-
- switch (depth) {
- case 8:
- var->red.offset = 0;
- var->green.offset = 0;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 15:
- var->red.offset = 10;
- var->green.offset = 5;
- var->blue.offset = 0;
- var->red.length = 5;
- var->green.length = 5;
- var->blue.length = 5;
- var->transp.length = 1;
- var->transp.offset = 15;
- break;
- case 16:
- var->red.offset = 11;
- var->green.offset = 5;
- var->blue.offset = 0;
- var->red.length = 5;
- var->green.length = 6;
- var->blue.length = 5;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 24:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 0;
- var->transp.offset = 0;
- break;
- case 32:
- var->red.offset = 16;
- var->green.offset = 8;
- var->blue.offset = 0;
- var->red.length = 8;
- var->green.length = 8;
- var->blue.length = 8;
- var->transp.length = 8;
- var->transp.offset = 24;
- break;
- default:
+ /*
+ * drm fbdev emulation doesn't support changing the pixel format at all,
+ * so reject all pixel format changing requests.
+ */
+ if (!drm_fb_pixel_format_equal(var, &info->var)) {
+ DRM_DEBUG("fbdev emulation doesn't support changing the pixel format\n");
return -EINVAL;
}
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_check_var);
@@ -2370,7 +2331,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
{
int c, o;
struct drm_connector *connector;
- const struct drm_connector_helper_funcs *connector_funcs;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
@@ -2399,8 +2359,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (drm_has_preferred_mode(fb_helper_conn, width, height))
my_score++;
- connector_funcs = connector->helper_private;
-
/*
* select a crtc for this connector and then attempt to configure
* remaining connectors
@@ -3221,12 +3179,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
if (!fb_helper)
return -ENOMEM;
- ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
+ ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) {
kfree(fb_helper);
return ret;
}
+ drm_client_add(&fb_helper->client);
+
fb_helper->preferred_bpp = preferred_bpp;
drm_fbdev_client_hotplug(&fb_helper->client);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index b54fb78a283c..b82da96ded5c 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
lessee_priv->is_master = 1;
lessee_priv->authenticated = 1;
- /* Hook up the fd */
- fd_install(fd, lessee_file);
-
/* Pass fd back to userspace */
DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
cl->fd = fd;
cl->lessee_id = lessee->lessee_id;
+ /* Hook up the fd */
+ fd_install(fd, lessee_file);
+
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
return 0;
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index b902361dee6e..1d9a9d2fe0e0 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,7 +24,6 @@
#include <linux/err.h>
#include <linux/module.h>
-#include <drm/drm_device.h>
#include <drm/drm_crtc.h>
#include <drm/drm_panel.h>
@@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
if (panel->connector)
return -EBUSY;
- panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
- if (!panel->link) {
- dev_err(panel->dev, "failed to link panel to %s\n",
- dev_name(connector->dev->dev));
- return -EINVAL;
- }
-
panel->connector = connector;
panel->drm = connector->dev;
@@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
*/
int drm_panel_detach(struct drm_panel *panel)
{
- device_link_del(panel->link);
-
panel->connector = NULL;
panel->drm = NULL;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index adb3cb27d31e..759278fef35a 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
{
int ret;
+ WARN_ON(*fence);
+
*fence = drm_syncobj_fence_get(syncobj);
if (*fence)
return 1;
@@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i) {
+ if (entries[i].fence)
+ continue;
+
drm_syncobj_fence_get_or_add_callback(syncobjs[i],
&entries[i].fence,
&entries[i].syncobj_cb,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 9b2720b41571..83c1f46670bf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct component_match *match = NULL;
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
-
if (!dev->platform_data) {
struct device_node *core_node;
@@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np))
continue;
- pdev = platform_device_register_simple("etnaviv", -1,
- NULL, 0);
- if (IS_ERR(pdev)) {
- ret = PTR_ERR(pdev);
+
+ pdev = platform_device_alloc("etnaviv", -1);
+ if (!pdev) {
+ ret = -ENOMEM;
+ of_node_put(np);
+ goto unregister_platform_driver;
+ }
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+ /*
+ * Apply the same DMA configuration to the virtual etnaviv
+ * device as the GPU we found. This assumes that all Vivante
+ * GPUs in the system share the same DMA constraints.
+ */
+ of_dma_configure(&pdev->dev, np, true);
+
+ ret = platform_device_add(pdev);
+ if (ret) {
+ platform_device_put(pdev);
of_node_put(np);
goto unregister_platform_driver;
}
+
etnaviv_drm = pdev;
of_node_put(np);
break;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 87f6b5672e11..797d9ee5f15a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
unsigned long start, unsigned long size)
{
- struct iommu_domain *domain;
- int ret;
-
- domain = iommu_domain_alloc(priv->dma_dev->bus);
- if (!domain)
- return -ENOMEM;
-
- ret = iommu_get_dma_cookie(domain);
- if (ret)
- goto free_domain;
-
- ret = iommu_dma_init_domain(domain, start, size, NULL);
- if (ret)
- goto put_cookie;
-
- priv->mapping = domain;
+ priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
return 0;
-
-put_cookie:
- iommu_put_dma_cookie(domain);
-free_domain:
- iommu_domain_free(domain);
- return ret;
}
static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
{
- struct iommu_domain *domain = priv->mapping;
-
- iommu_put_dma_cookie(domain);
- iommu_domain_free(domain);
priv->mapping = NULL;
}
@@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
{
struct iommu_domain *domain = priv->mapping;
- return iommu_attach_device(domain, dev);
+ if (dev != priv->dma_dev)
+ return iommu_attach_device(domain, dev);
+ return 0;
}
static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
@@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
{
struct iommu_domain *domain = priv->mapping;
- iommu_detach_device(domain, dev);
+ if (dev != priv->dma_dev)
+ iommu_detach_device(domain, dev);
}
#else
#error Unsupported architecture and IOMMU/DMA-mapping glue code
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 5d2f0d548469..250b5e02a314 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
break;
}
/* TDA9950 executes all retries for us */
- tx_status |= CEC_TX_STATUS_MAX_RETRIES;
+ if (tx_status != CEC_TX_STATUS_OK)
+ tx_status |= CEC_TX_STATUS_MAX_RETRIES;
cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
nack_cnt, 0, err_cnt);
break;
@@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
/* Wait up to .5s for it to signal non-busy */
do {
csr = tda9950_read(client, REG_CSR);
- if (!(csr & CSR_BUSY) || --timeout)
+ if (!(csr & CSR_BUSY) || !--timeout)
break;
msleep(10);
} while (1);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 72afa518edd9..94c1089ecf59 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -3210,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
+ MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index a45f46d8537f..9ad89e38f6c0 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -32,6 +32,7 @@
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
+#include <linux/sched/mm.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/rbtree.h>
@@ -1792,16 +1793,21 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
- if (kthread)
+ if (kthread) {
+ if (!mmget_not_zero(kvm->mm))
+ return -EFAULT;
use_mm(kvm->mm);
+ }
idx = srcu_read_lock(&kvm->srcu);
ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
kvm_read_guest(kvm, gpa, buf, len);
srcu_read_unlock(&kvm->srcu, idx);
- if (kthread)
+ if (kthread) {
unuse_mm(kvm->mm);
+ mmput(kvm->mm);
+ }
return ret;
}
@@ -1827,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
{
struct kvmgt_guest_info *info;
struct kvm *kvm;
+ int idx;
+ bool ret;
if (!handle_valid(handle))
return false;
@@ -1834,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
info = (struct kvmgt_guest_info *)handle;
kvm = info->kvm;
- return kvm_is_visible_gfn(kvm, gfn);
+ idx = srcu_read_lock(&kvm->srcu);
+ ret = kvm_is_visible_gfn(kvm, gfn);
+ srcu_read_unlock(&kvm->srcu, idx);
+ return ret;
}
struct intel_gvt_mpt kvmgt_mpt = {
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 994366035364..9bb9a85c992c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
+
+ if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
+ ~(BIT(0) | BIT(1));
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
+ ~PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
+ ~PHY_POWER_GOOD;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
+ ~BIT(30);
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
+ ~BIT(30);
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
+ ~BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
+ BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
+ ~BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
+ BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
+ ~BXT_PHY_LANE_ENABLED;
+ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
+ BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK;
+ }
} else {
#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
/* only reset the engine related, so starting with 0x44200
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index fa75a2eead90..b0d3a43ccd03 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -42,8 +42,6 @@
#define DEVICE_TYPE_EFP3 0x20
#define DEVICE_TYPE_EFP4 0x10
-#define DEV_SIZE 38
-
struct opregion_header {
u8 signature[16];
u32 size;
@@ -63,6 +61,10 @@ struct bdb_data_header {
u16 size; /* data size */
} __packed;
+/* For supporting windows guest with opregion, here hardcode the emulated
+ * bdb header version as '186', and the corresponding child_device_config
+ * length should be '33' but not '38'.
+ */
struct efp_child_device_config {
u16 handle;
u16 device_type;
@@ -109,12 +111,6 @@ struct efp_child_device_config {
u8 mipi_bridge_type; /* 171 */
u16 device_class_ext;
u8 dvo_function;
- u8 dp_usb_type_c:1; /* 195 */
- u8 skip6:7;
- u8 dp_usb_type_c_2x_gpio_index; /* 195 */
- u16 dp_usb_type_c_2x_gpio_pin; /* 195 */
- u8 iboost_dp:4; /* 196 */
- u8 iboost_hdmi:4; /* 196 */
} __packed;
struct vbt {
@@ -155,7 +151,7 @@ static void virt_vbt_generation(struct vbt *v)
v->header.bdb_offset = offsetof(struct vbt, bdb_header);
strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
- v->bdb_header.version = 186; /* child_dev_size = 38 */
+ v->bdb_header.version = 186; /* child_dev_size = 33 */
v->bdb_header.header_size = sizeof(v->bdb_header);
v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
@@ -169,11 +165,13 @@ static void virt_vbt_generation(struct vbt *v)
/* child device */
num_child = 4; /* each port has one child */
+ v->general_definitions.child_dev_size =
+ sizeof(struct efp_child_device_config);
v->general_definitions_header.id = BDB_GENERAL_DEFINITIONS;
/* size will include child devices */
v->general_definitions_header.size =
- sizeof(struct bdb_general_definitions) + num_child * DEV_SIZE;
- v->general_definitions.child_dev_size = DEV_SIZE;
+ sizeof(struct bdb_general_definitions) +
+ num_child * v->general_definitions.child_dev_size;
/* portA */
v->child0.handle = DEVICE_TYPE_EFP1;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index a4e8e3cf74fd..c628be05fbfe 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_submission(vgpu);
intel_vgpu_clean_display(vgpu);
intel_vgpu_clean_opregion(vgpu);
+ intel_vgpu_reset_ggtt(vgpu, true);
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index f7f2aa71d8d9..a262a64f5625 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
return true;
}
+static void *compress_next_page(struct drm_i915_error_object *dst)
+{
+ unsigned long page;
+
+ if (dst->page_count >= dst->num_pages)
+ return ERR_PTR(-ENOSPC);
+
+ page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ return dst->pages[dst->page_count++] = (void *)page;
+}
+
static int compress_page(struct compress *c,
void *src,
struct drm_i915_error_object *dst)
@@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
do {
if (zstream->avail_out == 0) {
- unsigned long page;
-
- page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
- if (!page)
- return -ENOMEM;
+ zstream->next_out = compress_next_page(dst);
+ if (IS_ERR(zstream->next_out))
+ return PTR_ERR(zstream->next_out);
- dst->pages[dst->page_count++] = (void *)page;
-
- zstream->next_out = (void *)page;
zstream->avail_out = PAGE_SIZE;
}
- if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
+ if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
return -EIO;
} while (zstream->avail_in);
@@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
return 0;
}
-static void compress_fini(struct compress *c,
+static int compress_flush(struct compress *c,
struct drm_i915_error_object *dst)
{
struct z_stream_s *zstream = &c->zstream;
- if (dst) {
- zlib_deflate(zstream, Z_FINISH);
- dst->unused = zstream->avail_out;
- }
+ do {
+ switch (zlib_deflate(zstream, Z_FINISH)) {
+ case Z_OK: /* more space requested */
+ zstream->next_out = compress_next_page(dst);
+ if (IS_ERR(zstream->next_out))
+ return PTR_ERR(zstream->next_out);
+
+ zstream->avail_out = PAGE_SIZE;
+ break;
+
+ case Z_STREAM_END:
+ goto end;
+
+ default: /* any error */
+ return -EIO;
+ }
+ } while (1);
+
+end:
+ memset(zstream->next_out, 0, zstream->avail_out);
+ dst->unused = zstream->avail_out;
+ return 0;
+}
+
+static void compress_fini(struct compress *c,
+ struct drm_i915_error_object *dst)
+{
+ struct z_stream_s *zstream = &c->zstream;
zlib_deflateEnd(zstream);
kfree(zstream->workspace);
-
if (c->tmp)
free_page((unsigned long)c->tmp);
}
@@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
return 0;
}
+static int compress_flush(struct compress *c,
+ struct drm_i915_error_object *dst)
+{
+ return 0;
+}
+
static void compress_fini(struct compress *c,
struct drm_i915_error_object *dst)
{
@@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
unsigned long num_pages;
struct sgt_iter iter;
dma_addr_t dma;
+ int ret;
if (!vma)
return NULL;
@@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
dst->gtt_offset = vma->node.start;
dst->gtt_size = vma->node.size;
+ dst->num_pages = num_pages;
dst->page_count = 0;
dst->unused = 0;
@@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
return NULL;
}
+ ret = -EINVAL;
for_each_sgt_dma(dma, iter, vma->pages) {
void __iomem *s;
- int ret;
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s);
-
if (ret)
- goto unwind;
+ break;
}
- goto out;
-unwind:
- while (dst->page_count--)
- free_page((unsigned long)dst->pages[dst->page_count]);
- kfree(dst);
- dst = NULL;
+ if (ret || compress_flush(&compress, dst)) {
+ while (dst->page_count--)
+ free_page((unsigned long)dst->pages[dst->page_count]);
+ kfree(dst);
+ dst = NULL;
+ }
-out:
compress_fini(&compress, dst);
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index f893a4e8b783..8710fb18ed74 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -135,6 +135,7 @@ struct i915_gpu_state {
struct drm_i915_error_object {
u64 gtt_offset;
u64 gtt_size;
+ int num_pages;
int page_count;
int unused;
u32 *pages[0];
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 90628a47ae17..29877969310d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
spin_unlock(&i915->irq_lock);
}
-static void
-gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
- u32 *iir)
+static u32
+gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
{
void __iomem * const regs = dev_priv->regs;
+ u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
- return;
+ return 0;
+
+ iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
+ if (likely(iir))
+ raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
- *iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
- if (likely(*iir))
- raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
+ return iir;
}
static void
-gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
- const u32 master_ctl, const u32 iir)
+gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
{
- if (!(master_ctl & GEN11_GU_MISC_IRQ))
- return;
-
- if (unlikely(!iir)) {
- DRM_ERROR("GU_MISC iir blank!\n");
- return;
- }
-
if (iir & GEN11_GU_MISC_GSE)
intel_opregion_asle_intr(dev_priv);
- else
- DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
}
static irqreturn_t gen11_irq_handler(int irq, void *arg)
@@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
enable_rpm_wakeref_asserts(i915);
}
- gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
+ gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
/* Acknowledge and enable interrupts. */
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
- gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
+ gen11_gu_misc_irq_handler(i915, gu_misc_iir);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 6a4d1388ad2d..1df3ce134cd0 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
GEN10_FEATURES, \
GEN(11), \
.ddb_size = 2048, \
- .has_csr = 0, \
.has_logical_ring_elsq = 1
static const struct intel_device_info intel_icelake_11_info = {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 4a3c8ee9a973..d2951096bca0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5079,10 +5079,14 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
mutex_lock(&dev_priv->pcu_lock);
WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
mutex_unlock(&dev_priv->pcu_lock);
- /* wait for pcode to finish disabling IPS, which may take up to 42ms */
+ /*
+ * Wait for PCODE to finish disabling IPS. The BSpec specified
+ * 42ms timeout value leads to occasional timeouts so use 100ms
+ * instead.
+ */
if (intel_wait_for_register(dev_priv,
IPS_CTL, IPS_ENABLE, 0,
- 42))
+ 100))
DRM_ERROR("Timed out waiting for IPS disable\n");
} else {
I915_WRITE(IPS_CTL, 0);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index c2f10d899329..443dfaefd7a6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -181,8 +181,9 @@ struct intel_overlay {
u32 brightness, contrast, saturation;
u32 old_xscale, old_yscale;
/* register access */
- u32 flip_addr;
struct drm_i915_gem_object *reg_bo;
+ struct overlay_registers __iomem *regs;
+ u32 flip_addr;
/* flip handling */
struct i915_gem_active last_flip;
};
@@ -210,29 +211,6 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
}
-static struct overlay_registers __iomem *
-intel_overlay_map_regs(struct intel_overlay *overlay)
-{
- struct drm_i915_private *dev_priv = overlay->i915;
- struct overlay_registers __iomem *regs;
-
- if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
- regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
- else
- regs = io_mapping_map_wc(&dev_priv->ggtt.iomap,
- overlay->flip_addr,
- PAGE_SIZE);
-
- return regs;
-}
-
-static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
- struct overlay_registers __iomem *regs)
-{
- if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
- io_mapping_unmap(regs);
-}
-
static void intel_overlay_submit_request(struct intel_overlay *overlay,
struct i915_request *rq,
i915_gem_retire_fn retire)
@@ -784,13 +762,13 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_i915_gem_object *new_bo,
struct put_image_params *params)
{
- int ret, tmp_width;
- struct overlay_registers __iomem *regs;
- bool scale_changed = false;
+ struct overlay_registers __iomem *regs = overlay->regs;
struct drm_i915_private *dev_priv = overlay->i915;
u32 swidth, swidthsw, sheight, ostride;
enum pipe pipe = overlay->crtc->pipe;
+ bool scale_changed = false;
struct i915_vma *vma;
+ int ret, tmp_width;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
@@ -815,30 +793,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (!overlay->active) {
u32 oconfig;
- regs = intel_overlay_map_regs(overlay);
- if (!regs) {
- ret = -ENOMEM;
- goto out_unpin;
- }
+
oconfig = OCONF_CC_OUT_8BIT;
if (IS_GEN4(dev_priv))
oconfig |= OCONF_CSC_MODE_BT709;
oconfig |= pipe == 0 ?
OCONF_PIPE_A : OCONF_PIPE_B;
iowrite32(oconfig, &regs->OCONFIG);
- intel_overlay_unmap_regs(overlay, regs);
ret = intel_overlay_on(overlay);
if (ret != 0)
goto out_unpin;
}
- regs = intel_overlay_map_regs(overlay);
- if (!regs) {
- ret = -ENOMEM;
- goto out_unpin;
- }
-
iowrite32((params->dst_y << 16) | params->dst_x, &regs->DWINPOS);
iowrite32((params->dst_h << 16) | params->dst_w, &regs->DWINSZ);
@@ -882,8 +849,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
iowrite32(overlay_cmd_reg(params), &regs->OCMD);
- intel_overlay_unmap_regs(overlay, regs);
-
ret = intel_overlay_continue(overlay, vma, scale_changed);
if (ret)
goto out_unpin;
@@ -901,7 +866,6 @@ out_pin_section:
int intel_overlay_switch_off(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct overlay_registers __iomem *regs;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -918,9 +882,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
if (ret != 0)
return ret;
- regs = intel_overlay_map_regs(overlay);
- iowrite32(0, &regs->OCMD);
- intel_overlay_unmap_regs(overlay, regs);
+ iowrite32(0, &overlay->regs->OCMD);
return intel_overlay_off(overlay);
}
@@ -1305,7 +1267,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_intel_overlay_attrs *attrs = data;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_overlay *overlay;
- struct overlay_registers __iomem *regs;
int ret;
overlay = dev_priv->overlay;
@@ -1345,15 +1306,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
overlay->contrast = attrs->contrast;
overlay->saturation = attrs->saturation;
- regs = intel_overlay_map_regs(overlay);
- if (!regs) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- update_reg_attrs(overlay, regs);
-
- intel_overlay_unmap_regs(overlay, regs);
+ update_reg_attrs(overlay, overlay->regs);
if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
if (IS_GEN2(dev_priv))
@@ -1386,12 +1339,47 @@ out_unlock:
return ret;
}
+static int get_registers(struct intel_overlay *overlay, bool use_phys)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_stolen(overlay->i915, PAGE_SIZE);
+ if (obj == NULL)
+ obj = i915_gem_object_create_internal(overlay->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put_bo;
+ }
+
+ if (use_phys)
+ overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+ else
+ overlay->flip_addr = i915_ggtt_offset(vma);
+ overlay->regs = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+
+ if (IS_ERR(overlay->regs)) {
+ err = PTR_ERR(overlay->regs);
+ goto err_put_bo;
+ }
+
+ overlay->reg_bo = obj;
+ return 0;
+
+err_put_bo:
+ i915_gem_object_put(obj);
+ return err;
+}
+
void intel_setup_overlay(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay;
- struct drm_i915_gem_object *reg_bo;
- struct overlay_registers __iomem *regs;
- struct i915_vma *vma = NULL;
int ret;
if (!HAS_OVERLAY(dev_priv))
@@ -1401,46 +1389,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
if (!overlay)
return;
- mutex_lock(&dev_priv->drm.struct_mutex);
- if (WARN_ON(dev_priv->overlay))
- goto out_free;
-
overlay->i915 = dev_priv;
- reg_bo = NULL;
- if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
- reg_bo = i915_gem_object_create_stolen(dev_priv, PAGE_SIZE);
- if (reg_bo == NULL)
- reg_bo = i915_gem_object_create(dev_priv, PAGE_SIZE);
- if (IS_ERR(reg_bo))
- goto out_free;
- overlay->reg_bo = reg_bo;
-
- if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
- ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
- if (ret) {
- DRM_ERROR("failed to attach phys overlay regs\n");
- goto out_free_bo;
- }
- overlay->flip_addr = reg_bo->phys_handle->busaddr;
- } else {
- vma = i915_gem_object_ggtt_pin(reg_bo, NULL,
- 0, PAGE_SIZE, PIN_MAPPABLE);
- if (IS_ERR(vma)) {
- DRM_ERROR("failed to pin overlay register bo\n");
- ret = PTR_ERR(vma);
- goto out_free_bo;
- }
- overlay->flip_addr = i915_ggtt_offset(vma);
-
- ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
- if (ret) {
- DRM_ERROR("failed to move overlay register bo into the GTT\n");
- goto out_unpin_bo;
- }
- }
-
- /* init all values */
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
overlay->brightness = -19;
@@ -1449,44 +1399,51 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
init_request_active(&overlay->last_flip, NULL);
- regs = intel_overlay_map_regs(overlay);
- if (!regs)
- goto out_unpin_bo;
+ mutex_lock(&dev_priv->drm.struct_mutex);
+
+ ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
+ if (ret)
+ goto out_free;
+
+ ret = i915_gem_object_set_to_gtt_domain(overlay->reg_bo, true);
+ if (ret)
+ goto out_reg_bo;
- memset_io(regs, 0, sizeof(struct overlay_registers));
- update_polyphase_filter(regs);
- update_reg_attrs(overlay, regs);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_overlay_unmap_regs(overlay, regs);
+ memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
+ update_polyphase_filter(overlay->regs);
+ update_reg_attrs(overlay, overlay->regs);
dev_priv->overlay = overlay;
- mutex_unlock(&dev_priv->drm.struct_mutex);
- DRM_INFO("initialized overlay support\n");
+ DRM_INFO("Initialized overlay support.\n");
return;
-out_unpin_bo:
- if (vma)
- i915_vma_unpin(vma);
-out_free_bo:
- i915_gem_object_put(reg_bo);
+out_reg_bo:
+ i915_gem_object_put(overlay->reg_bo);
out_free:
mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(overlay);
- return;
}
void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->overlay)
+ struct intel_overlay *overlay;
+
+ overlay = fetch_and_zero(&dev_priv->overlay);
+ if (!overlay)
return;
- /* The bo's should be free'd by the generic code already.
+ /*
+ * The bo's should be free'd by the generic code already.
* Furthermore modesetting teardown happens beforehand so the
- * hardware should be off already */
- WARN_ON(dev_priv->overlay->active);
+ * hardware should be off already.
+ */
+ WARN_ON(overlay->active);
+
+ i915_gem_object_put(overlay->reg_bo);
- i915_gem_object_put(dev_priv->overlay->reg_bo);
- kfree(dev_priv->overlay);
+ kfree(overlay);
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
@@ -1498,37 +1455,11 @@ struct intel_overlay_error_state {
u32 isr;
};
-static struct overlay_registers __iomem *
-intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
-{
- struct drm_i915_private *dev_priv = overlay->i915;
- struct overlay_registers __iomem *regs;
-
- if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
- /* Cast to make sparse happy, but it's wc memory anyway, so
- * equivalent to the wc io mapping on X86. */
- regs = (struct overlay_registers __iomem *)
- overlay->reg_bo->phys_handle->vaddr;
- else
- regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.iomap,
- overlay->flip_addr);
-
- return regs;
-}
-
-static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
- struct overlay_registers __iomem *regs)
-{
- if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
- io_mapping_unmap_atomic(regs);
-}
-
struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
{
struct intel_overlay *overlay = dev_priv->overlay;
struct intel_overlay_error_state *error;
- struct overlay_registers __iomem *regs;
if (!overlay || !overlay->active)
return NULL;
@@ -1541,18 +1472,9 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
error->isr = I915_READ(ISR);
error->base = overlay->flip_addr;
- regs = intel_overlay_map_regs_atomic(overlay);
- if (!regs)
- goto err;
-
- memcpy_fromio(&error->regs, regs, sizeof(struct overlay_registers));
- intel_overlay_unmap_regs_atomic(overlay, regs);
+ memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
return error;
-
-err:
- kfree(error);
- return NULL;
}
void
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 0b976dfd04df..92ecb9bf982c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -600,7 +600,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
- mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+ mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr,
sizeof(struct drm_plane),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
index 790d39f816dc..b557687b1964 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -153,8 +153,8 @@ int msm_dss_parse_clock(struct platform_device *pdev,
return 0;
}
- mp->clk_config = devm_kzalloc(&pdev->dev,
- sizeof(struct dss_clk) * num_clk,
+ mp->clk_config = devm_kcalloc(&pdev->dev,
+ num_clk, sizeof(struct dss_clk),
GFP_KERNEL);
if (!mp->clk_config)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8412119bd940..041e7daf8a33 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -900,9 +900,22 @@ static enum drm_connector_status
nv50_mstc_detect(struct drm_connector *connector, bool force)
{
struct nv50_mstc *mstc = nv50_mstc(connector);
+ enum drm_connector_status conn_status;
+ int ret;
+
if (!mstc->port)
return connector_status_disconnected;
- return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
+
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return connector_status_disconnected;
+
+ conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
+ mstc->port);
+
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ return conn_status;
}
static void
@@ -1123,17 +1136,21 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
int ret;
if (dpcd >= 0x12) {
- ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
+ /* Even if we're enabling MST, start with disabling the
+ * branching unit to clear any sink-side MST topology state
+ * that wasn't set by us
+ */
+ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0);
if (ret < 0)
return ret;
- dpcd &= ~DP_MST_EN;
- if (state)
- dpcd |= DP_MST_EN;
-
- ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
- if (ret < 0)
- return ret;
+ if (state) {
+ /* Now, start initializing */
+ ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL,
+ DP_MST_EN);
+ if (ret < 0)
+ return ret;
+ }
}
return nvif_mthd(disp, 0, &args, sizeof(args));
@@ -1142,31 +1159,58 @@ nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
int
nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
{
- int ret, state = 0;
+ struct drm_dp_aux *aux;
+ int ret;
+ bool old_state, new_state;
+ u8 mstm_ctrl;
if (!mstm)
return 0;
- if (dpcd[0] >= 0x12) {
- ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
+ mutex_lock(&mstm->mgr.lock);
+
+ old_state = mstm->mgr.mst_state;
+ new_state = old_state;
+ aux = mstm->mgr.aux;
+
+ if (old_state) {
+ /* Just check that the MST hub is still as we expect it */
+ ret = drm_dp_dpcd_readb(aux, DP_MSTM_CTRL, &mstm_ctrl);
+ if (ret < 0 || !(mstm_ctrl & DP_MST_EN)) {
+ DRM_DEBUG_KMS("Hub gone, disabling MST topology\n");
+ new_state = false;
+ }
+ } else if (dpcd[0] >= 0x12) {
+ ret = drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &dpcd[1]);
if (ret < 0)
- return ret;
+ goto probe_error;
if (!(dpcd[1] & DP_MST_CAP))
dpcd[0] = 0x11;
else
- state = allow;
+ new_state = allow;
}
- ret = nv50_mstm_enable(mstm, dpcd[0], state);
+ if (new_state == old_state) {
+ mutex_unlock(&mstm->mgr.lock);
+ return new_state;
+ }
+
+ ret = nv50_mstm_enable(mstm, dpcd[0], new_state);
if (ret)
- return ret;
+ goto probe_error;
+
+ mutex_unlock(&mstm->mgr.lock);
- ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
+ ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state);
if (ret)
return nv50_mstm_enable(mstm, dpcd[0], 0);
- return mstm->mgr.mst_state;
+ return new_state;
+
+probe_error:
+ mutex_unlock(&mstm->mgr.lock);
+ return ret;
}
static void
@@ -2074,7 +2118,7 @@ nv50_disp_atomic_state_alloc(struct drm_device *dev)
static const struct drm_mode_config_funcs
nv50_disp_func = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
.atomic_check = nv50_disp_atomic_check,
.atomic_commit = nv50_disp_atomic_commit,
.atomic_state_alloc = nv50_disp_atomic_state_alloc,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 51932c72334e..247f72cc4d10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -409,59 +409,45 @@ static struct nouveau_encoder *
nouveau_connector_ddc_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
- struct nouveau_encoder *nv_encoder = NULL;
+ struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
- int i, panel = -ENODEV;
-
- /* eDP panels need powering on by us (if the VBIOS doesn't default it
- * to on) before doing any AUX channel transactions. LVDS panel power
- * is handled by the SOR itself, and not required for LVDS DDC.
- */
- if (nv_connector->type == DCB_CONNECTOR_eDP) {
- panel = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
- if (panel == 0) {
- nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
- msleep(300);
- }
- }
+ int i, ret;
+ bool switcheroo_ddc = false;
drm_connector_for_each_possible_encoder(connector, encoder, i) {
nv_encoder = nouveau_encoder(encoder);
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- int ret = nouveau_dp_detect(nv_encoder);
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_DP:
+ ret = nouveau_dp_detect(nv_encoder);
if (ret == NOUVEAU_DP_MST)
return NULL;
- if (ret == NOUVEAU_DP_SST)
- break;
- } else
- if ((vga_switcheroo_handler_flags() &
- VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
- nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
- nv_encoder->i2c) {
- int ret;
- vga_switcheroo_lock_ddc(dev->pdev);
- ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);
- vga_switcheroo_unlock_ddc(dev->pdev);
- if (ret)
+ else if (ret == NOUVEAU_DP_SST)
+ found = nv_encoder;
+
+ break;
+ case DCB_OUTPUT_LVDS:
+ switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
+ VGA_SWITCHEROO_CAN_SWITCH_DDC);
+ /* fall-through */
+ default:
+ if (!nv_encoder->i2c)
break;
- } else
- if (nv_encoder->i2c) {
+
+ if (switcheroo_ddc)
+ vga_switcheroo_lock_ddc(dev->pdev);
if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
- break;
+ found = nv_encoder;
+ if (switcheroo_ddc)
+ vga_switcheroo_unlock_ddc(dev->pdev);
+
+ break;
}
+ if (found)
+ break;
}
- /* eDP panel not detected, restore panel power GPIO to previous
- * state to avoid confusing the SOR for other output types.
- */
- if (!nv_encoder && panel == 0)
- nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
-
- return nv_encoder;
+ return found;
}
static struct nouveau_encoder *
@@ -555,12 +541,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- /* Outputs are only polled while runtime active, so acquiring a
- * runtime PM ref here is unnecessary (and would deadlock upon
- * runtime suspend because it waits for polling to finish).
+ /* Outputs are only polled while runtime active, so resuming the
+ * device here is unnecessary (and would deadlock upon runtime suspend
+ * because it waits for polling to finish). We do however, want to
+ * prevent the autosuspend timer from elapsing during this operation
+ * if possible.
*/
- if (!drm_kms_helper_is_poll_worker()) {
- ret = pm_runtime_get_sync(connector->dev->dev);
+ if (drm_kms_helper_is_poll_worker()) {
+ pm_runtime_get_noresume(dev->dev);
+ } else {
+ ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
return conn_status;
}
@@ -638,10 +628,8 @@ detect_analog:
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
- }
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return conn_status;
}
@@ -1105,6 +1093,26 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
const struct nvif_notify_conn_rep_v0 *rep = notify->data;
const char *name = connector->name;
struct nouveau_encoder *nv_encoder;
+ int ret;
+
+ ret = pm_runtime_get(drm->dev->dev);
+ if (ret == 0) {
+ /* We can't block here if there's a pending PM request
+ * running, as we'll deadlock nouveau_display_fini() when it
+ * calls nvif_put() on our nvif_notify struct. So, simply
+ * defer the hotplug event until the device finishes resuming
+ */
+ NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n",
+ name);
+ schedule_work(&drm->hpd_work);
+
+ pm_runtime_put_noidle(drm->dev->dev);
+ return NVIF_NOTIFY_KEEP;
+ } else if (ret != 1 && ret != -EACCES) {
+ NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n",
+ name, ret);
+ return NVIF_NOTIFY_DROP;
+ }
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
NV_DEBUG(drm, "service %s\n", name);
@@ -1122,6 +1130,8 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
drm_helper_hpd_irq_event(connector->dev);
}
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_autosuspend(drm->dev->dev);
return NVIF_NOTIFY_KEEP;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 139368b31916..540c0cbbfcee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -293,7 +293,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .output_poll_changed = nouveau_fbcon_output_poll_changed,
};
@@ -355,8 +355,6 @@ nouveau_display_hpd_work(struct work_struct *work)
pm_runtime_get_sync(drm->dev->dev);
drm_helper_hpd_irq_event(drm->dev);
- /* enable polling for external displays */
- drm_kms_helper_poll_enable(drm->dev);
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
@@ -379,15 +377,29 @@ nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
{
struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
struct acpi_bus_event *info = data;
+ int ret;
if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
- /*
- * This may be the only indication we receive of a
- * connector hotplug on a runtime suspended GPU,
- * schedule hpd_work to check.
- */
- schedule_work(&drm->hpd_work);
+ ret = pm_runtime_get(drm->dev->dev);
+ if (ret == 1 || ret == -EACCES) {
+ /* If the GPU is already awake, or in a state
+ * where we can't wake it up, it can handle
+ * it's own hotplug events.
+ */
+ pm_runtime_put_autosuspend(drm->dev->dev);
+ } else if (ret == 0) {
+ /* This may be the only indication we receive
+ * of a connector hotplug on a runtime
+ * suspended GPU, schedule hpd_work to check.
+ */
+ NV_DEBUG(drm, "ACPI requested connector reprobe\n");
+ schedule_work(&drm->hpd_work);
+ pm_runtime_put_noidle(drm->dev->dev);
+ } else {
+ NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
+ ret);
+ }
/* acpi-video should not generate keypresses for this */
return NOTIFY_BAD;
@@ -411,6 +423,11 @@ nouveau_display_init(struct drm_device *dev)
if (ret)
return ret;
+ /* enable connector detection and polling for connectors without HPD
+ * support
+ */
+ drm_kms_helper_poll_enable(dev);
+
/* enable hotplug interrupts */
drm_connector_list_iter_begin(dev, &conn_iter);
nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
@@ -425,7 +442,7 @@ nouveau_display_init(struct drm_device *dev)
}
void
-nouveau_display_fini(struct drm_device *dev, bool suspend)
+nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime)
{
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -450,6 +467,9 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
}
drm_connector_list_iter_end(&conn_iter);
+ if (!runtime)
+ cancel_work_sync(&drm->hpd_work);
+
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
}
@@ -618,11 +638,11 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
}
}
- nouveau_display_fini(dev, true);
+ nouveau_display_fini(dev, true, runtime);
return 0;
}
- nouveau_display_fini(dev, true);
+ nouveau_display_fini(dev, true, runtime);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 54aa7c3fa42d..ff92b54ce448 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -62,7 +62,7 @@ nouveau_display(struct drm_device *dev)
int nouveau_display_create(struct drm_device *dev);
void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
-void nouveau_display_fini(struct drm_device *dev, bool suspend);
+void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c7ec86d6c3c9..74d2283f2c28 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -230,7 +230,7 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
mutex_unlock(&drm->master.lock);
}
if (ret) {
- NV_ERROR(drm, "Client allocation failed: %d\n", ret);
+ NV_PRINTK(err, cli, "Client allocation failed: %d\n", ret);
goto done;
}
@@ -240,37 +240,37 @@ nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
}, sizeof(struct nv_device_v0),
&cli->device);
if (ret) {
- NV_ERROR(drm, "Device allocation failed: %d\n", ret);
+ NV_PRINTK(err, cli, "Device allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->device.object, mmus);
if (ret < 0) {
- NV_ERROR(drm, "No supported MMU class\n");
+ NV_PRINTK(err, cli, "No supported MMU class\n");
goto done;
}
ret = nvif_mmu_init(&cli->device.object, mmus[ret].oclass, &cli->mmu);
if (ret) {
- NV_ERROR(drm, "MMU allocation failed: %d\n", ret);
+ NV_PRINTK(err, cli, "MMU allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->mmu.object, vmms);
if (ret < 0) {
- NV_ERROR(drm, "No supported VMM class\n");
+ NV_PRINTK(err, cli, "No supported VMM class\n");
goto done;
}
ret = nouveau_vmm_init(cli, vmms[ret].oclass, &cli->vmm);
if (ret) {
- NV_ERROR(drm, "VMM allocation failed: %d\n", ret);
+ NV_PRINTK(err, cli, "VMM allocation failed: %d\n", ret);
goto done;
}
ret = nvif_mclass(&cli->mmu.object, mems);
if (ret < 0) {
- NV_ERROR(drm, "No supported MEM class\n");
+ NV_PRINTK(err, cli, "No supported MEM class\n");
goto done;
}
@@ -592,10 +592,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put(dev->dev);
- } else {
- /* enable polling for external displays */
- drm_kms_helper_poll_enable(dev);
}
+
return 0;
fail_dispinit:
@@ -629,7 +627,7 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_debugfs_fini(drm);
if (dev->mode_config.num_crtc)
- nouveau_display_fini(dev, false);
+ nouveau_display_fini(dev, false, false);
nouveau_display_destroy(dev);
nouveau_bios_takedown(dev);
@@ -835,7 +833,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
return -EBUSY;
}
- drm_kms_helper_poll_disable(drm_dev);
nouveau_switcheroo_optimus_dsm();
ret = nouveau_do_suspend(drm_dev, true);
pci_save_state(pdev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 844498c4267c..0f64c0a1d4b3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -466,6 +466,7 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
console_unlock();
if (state == FBINFO_STATE_RUNNING) {
+ nouveau_fbcon_hotplug_resume(drm->fbcon);
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
}
@@ -487,6 +488,61 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
schedule_work(&drm->fbcon_work);
}
+void
+nouveau_fbcon_output_poll_changed(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fbdev *fbcon = drm->fbcon;
+ int ret;
+
+ if (!fbcon)
+ return;
+
+ mutex_lock(&fbcon->hotplug_lock);
+
+ ret = pm_runtime_get(dev->dev);
+ if (ret == 1 || ret == -EACCES) {
+ drm_fb_helper_hotplug_event(&fbcon->helper);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ } else if (ret == 0) {
+ /* If the GPU was already in the process of suspending before
+ * this event happened, then we can't block here as we'll
+ * deadlock the runtime pmops since they wait for us to
+ * finish. So, just defer this event for when we runtime
+ * resume again. It will be handled by fbcon_work.
+ */
+ NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
+ fbcon->hotplug_waiting = true;
+ pm_runtime_put_noidle(drm->dev->dev);
+ } else {
+ DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
+ ret);
+ }
+
+ mutex_unlock(&fbcon->hotplug_lock);
+}
+
+void
+nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
+{
+ struct nouveau_drm *drm;
+
+ if (!fbcon)
+ return;
+ drm = nouveau_drm(fbcon->helper.dev);
+
+ mutex_lock(&fbcon->hotplug_lock);
+ if (fbcon->hotplug_waiting) {
+ fbcon->hotplug_waiting = false;
+
+ NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
+ drm_fb_helper_hotplug_event(&fbcon->helper);
+ }
+ mutex_unlock(&fbcon->hotplug_lock);
+}
+
int
nouveau_fbcon_init(struct drm_device *dev)
{
@@ -505,6 +561,7 @@ nouveau_fbcon_init(struct drm_device *dev)
drm->fbcon = fbcon;
INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
+ mutex_init(&fbcon->hotplug_lock);
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index a6f192ea3fa6..db9d52047ef8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -41,6 +41,9 @@ struct nouveau_fbdev {
struct nvif_object gdi;
struct nvif_object blit;
struct nvif_object twod;
+
+ struct mutex hotplug_lock;
+ bool hotplug_waiting;
};
void nouveau_fbcon_restore(void);
@@ -68,6 +71,8 @@ void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
void nouveau_fbcon_accel_save_disable(struct drm_device *dev);
void nouveau_fbcon_accel_restore(struct drm_device *dev);
+void nouveau_fbcon_output_poll_changed(struct drm_device *dev);
+void nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon);
extern int nouveau_nofbaccel;
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 3da5a4305aa4..8f1ce4833230 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -46,12 +46,10 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
pr_err("VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
nouveau_pmops_resume(&pdev->dev);
- drm_kms_helper_poll_enable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_err("VGA switcheroo: switched nouveau off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- drm_kms_helper_poll_disable(dev);
nouveau_switcheroo_optimus_dsm();
nouveau_pmops_suspend(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 32fa94a9773f..cbd33e87b799 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -275,6 +275,7 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
struct nvkm_outp *outp, *outt, *pair;
struct nvkm_conn *conn;
struct nvkm_head *head;
+ struct nvkm_ior *ior;
struct nvbios_connE connE;
struct dcb_output dcbE;
u8 hpd = 0, ver, hdr;
@@ -399,6 +400,19 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
return ret;
}
+ /* Enforce identity-mapped SOR assignment for panels, which have
+ * certain bits (ie. backlight controls) wired to a specific SOR.
+ */
+ list_for_each_entry(outp, &disp->outp, head) {
+ if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
+ outp->conn->info.type == DCB_CONNECTOR_eDP) {
+ ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
+ if (!WARN_ON(!ior))
+ ior->identity = true;
+ outp->identity = true;
+ }
+ }
+
i = 0;
list_for_each_entry(head, &disp->head, head)
i = max(i, head->id + 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index 7c5bed29ffef..5f301e632599 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -28,6 +28,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
+#include <subdev/gpio.h>
#include <subdev/i2c.h>
#include <nvif/event.h>
@@ -412,14 +413,10 @@ nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
}
static void
-nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
+nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
{
struct nvkm_dp *dp = nvkm_dp(outp);
- /* Prevent link from being retrained if sink sends an IRQ. */
- atomic_set(&dp->lt.done, 0);
- ior->dp.nr = 0;
-
/* Execute DisableLT script from DP Info Table. */
nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
init.outp = &dp->outp.info;
@@ -428,6 +425,16 @@ nvkm_dp_release(struct nvkm_outp *outp, struct nvkm_ior *ior)
);
}
+static void
+nvkm_dp_release(struct nvkm_outp *outp)
+{
+ struct nvkm_dp *dp = nvkm_dp(outp);
+
+ /* Prevent link from being retrained if sink sends an IRQ. */
+ atomic_set(&dp->lt.done, 0);
+ dp->outp.ior->dp.nr = 0;
+}
+
static int
nvkm_dp_acquire(struct nvkm_outp *outp)
{
@@ -491,7 +498,7 @@ done:
return ret;
}
-static void
+static bool
nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
{
struct nvkm_i2c_aux *aux = dp->aux;
@@ -505,7 +512,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd,
sizeof(dp->dpcd)))
- return;
+ return true;
}
if (dp->present) {
@@ -515,6 +522,7 @@ nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
}
atomic_set(&dp->lt.done, 0);
+ return false;
}
static int
@@ -555,9 +563,38 @@ nvkm_dp_fini(struct nvkm_outp *outp)
static void
nvkm_dp_init(struct nvkm_outp *outp)
{
+ struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
struct nvkm_dp *dp = nvkm_dp(outp);
+
nvkm_notify_put(&dp->outp.conn->hpd);
- nvkm_dp_enable(dp, true);
+
+ /* eDP panels need powering on by us (if the VBIOS doesn't default it
+ * to on) before doing any AUX channel transactions. LVDS panel power
+ * is handled by the SOR itself, and not required for LVDS DDC.
+ */
+ if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
+ int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
+ if (power == 0)
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
+
+ /* We delay here unconditionally, even if already powered,
+ * because some laptop panels having a significant resume
+ * delay before the panel begins responding.
+ *
+ * This is likely a bit of a hack, but no better idea for
+ * handling this at the moment.
+ */
+ msleep(300);
+
+ /* If the eDP panel can't be detected, we need to restore
+ * the panel power GPIO to avoid breaking another output.
+ */
+ if (!nvkm_dp_enable(dp, true) && power == 0)
+ nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
+ } else {
+ nvkm_dp_enable(dp, true);
+ }
+
nvkm_notify_get(&dp->hpd);
}
@@ -576,6 +613,7 @@ nvkm_dp_func = {
.fini = nvkm_dp_fini,
.acquire = nvkm_dp_acquire,
.release = nvkm_dp_release,
+ .disable = nvkm_dp_disable,
};
static int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index e0b4e0c5704e..19911211a12a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -16,6 +16,7 @@ struct nvkm_ior {
char name[8];
struct list_head head;
+ bool identity;
struct nvkm_ior_state {
struct nvkm_outp *outp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index f89c7b977aa5..def005dd5fda 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -501,11 +501,11 @@ nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
nv50_disp_super_ied_off(head, ior, 2);
/* If we're shutting down the OR's only active head, execute
- * the output path's release function.
+ * the output path's disable function.
*/
if (ior->arm.head == (1 << head->id)) {
- if ((outp = ior->arm.outp) && outp->func->release)
- outp->func->release(outp, ior);
+ if ((outp = ior->arm.outp) && outp->func->disable)
+ outp->func->disable(outp, ior);
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index be9e7f8c3b23..c62030c96fba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -93,6 +93,8 @@ nvkm_outp_release(struct nvkm_outp *outp, u8 user)
if (ior) {
outp->acquired &= ~user;
if (!outp->acquired) {
+ if (outp->func->release && outp->ior)
+ outp->func->release(outp);
outp->ior->asy.outp = NULL;
outp->ior = NULL;
}
@@ -127,17 +129,26 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
if (proto == UNKNOWN)
return -ENOSYS;
+ /* Deal with panels requiring identity-mapped SOR assignment. */
+ if (outp->identity) {
+ ior = nvkm_ior_find(outp->disp, SOR, ffs(outp->info.or) - 1);
+ if (WARN_ON(!ior))
+ return -ENOSPC;
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
/* First preference is to reuse the OR that is currently armed
* on HW, if any, in order to prevent unnecessary switching.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->asy.outp && ior->arm.outp == outp)
+ if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* Failing that, a completely unused OR is the next best thing. */
list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->asy.outp && ior->type == type && !ior->arm.outp &&
+ if (!ior->identity &&
+ !ior->asy.outp && ior->type == type && !ior->arm.outp &&
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
return nvkm_outp_acquire_ior(outp, user, ior);
}
@@ -146,7 +157,7 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
* but will be released during the next modeset.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->asy.outp && ior->type == type &&
+ if (!ior->identity && !ior->asy.outp && ior->type == type &&
(ior->func->route.set || ior->id == __ffs(outp->info.or)))
return nvkm_outp_acquire_ior(outp, user, ior);
}
@@ -245,7 +256,6 @@ nvkm_outp_ctor(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
outp->index = index;
outp->info = *dcbE;
outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
- outp->or = ffs(outp->info.or) - 1;
OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
"edid %x bus %d head %x",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index ea84d7d5741a..6c8aa5cfed9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -13,10 +13,10 @@ struct nvkm_outp {
struct dcb_output info;
struct nvkm_i2c_bus *i2c;
- int or;
struct list_head head;
struct nvkm_conn *conn;
+ bool identity;
/* Assembly state. */
#define NVKM_OUTP_PRIV 1
@@ -41,7 +41,8 @@ struct nvkm_outp_func {
void (*init)(struct nvkm_outp *);
void (*fini)(struct nvkm_outp *);
int (*acquire)(struct nvkm_outp *);
- void (*release)(struct nvkm_outp *, struct nvkm_ior *);
+ void (*release)(struct nvkm_outp *);
+ void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
};
#define OUTP_MSG(o,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index b80618e35491..17235e940ca9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -86,10 +86,8 @@ pmu_load(struct nv50_devinit *init, u8 type, bool post,
struct nvkm_bios *bios = subdev->device->bios;
struct nvbios_pmuR pmu;
- if (!nvbios_pmuRm(bios, type, &pmu)) {
- nvkm_error(subdev, "VBIOS PMU fuc %02x not found\n", type);
+ if (!nvbios_pmuRm(bios, type, &pmu))
return -EINVAL;
- }
if (!post)
return 0;
@@ -124,29 +122,30 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -EINVAL;
}
+ /* Upload DEVINIT application from VBIOS onto PMU. */
ret = pmu_load(init, 0x04, post, &exec, &args);
- if (ret)
+ if (ret) {
+ nvkm_error(subdev, "VBIOS PMU/DEVINIT not found\n");
return ret;
+ }
- /* upload first chunk of init data */
+ /* Upload tables required by opcodes in boot scripts. */
if (post) {
- // devinit tables
u32 pmu = pmu_args(init, args + 0x08, 0x08);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x14);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x16);
pmu_data(init, pmu, img, len);
}
- /* upload second chunk of init data */
+ /* Upload boot scripts. */
if (post) {
- // devinit boot scripts
u32 pmu = pmu_args(init, args + 0x08, 0x10);
u32 img = nvbios_rd16(bios, bit_I.offset + 0x18);
u32 len = nvbios_rd16(bios, bit_I.offset + 0x1a);
pmu_data(init, pmu, img, len);
}
- /* execute init tables */
+ /* Execute DEVINIT. */
if (post) {
nvkm_wr32(device, 0x10a040, 0x00005000);
pmu_exec(init, exec);
@@ -157,8 +156,11 @@ gm200_devinit_post(struct nvkm_devinit *base, bool post)
return -ETIMEDOUT;
}
- /* load and execute some other ucode image (bios therm?) */
- return pmu_load(init, 0x01, post, NULL, NULL);
+ /* Optional: Execute PRE_OS application on PMU, which should at
+ * least take care of fans until a full PMU has been loaded.
+ */
+ pmu_load(init, 0x01, post, NULL, NULL);
+ return 0;
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index de269eb482dd..7459def78d50 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1423,7 +1423,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
- if (vmm->func->part && inst) {
+ if (inst && vmm->func->part) {
mutex_lock(&vmm->mutex);
vmm->func->part(vmm, inst);
mutex_unlock(&vmm->mutex);
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index a534b225e31b..5fa0441bb6df 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev)
}
static const struct of_device_id vexpress_muxfpga_match[] = {
- { .compatible = "arm,vexpress-muxfpga", }
+ { .compatible = "arm,vexpress-muxfpga", },
+ {}
};
static struct platform_driver vexpress_muxfpga_driver = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
index e36004fbe453..2a15f2f9271e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c
+++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c
@@ -81,9 +81,19 @@ static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
int i;
for (i = tcon->dclk_min_div; i <= tcon->dclk_max_div; i++) {
- unsigned long ideal = rate * i;
+ u64 ideal = (u64)rate * i;
unsigned long rounded;
+ /*
+ * ideal has overflowed the max value that can be stored in an
+ * unsigned long, and every clk operation we might do on a
+ * truncated u64 value will give us incorrect results.
+ * Let's just stop there since bigger dividers will result in
+ * the same overflow issue.
+ */
+ if (ideal > ULONG_MAX)
+ goto out;
+
rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
ideal);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index dd19d674055c..8b0cd08034e0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-a33-display-engine" },
{ .compatible = "allwinner,sun8i-a83t-display-engine" },
{ .compatible = "allwinner,sun8i-h3-display-engine" },
- { .compatible = "allwinner,sun8i-r40-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
{ }
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 82502b351aec..a564b5dfe082 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
.has_phy_clk = true,
- .has_second_pll = true,
.phy_init = &sun8i_hdmi_phy_init_h3,
.phy_disable = &sun8i_hdmi_phy_disable_h3,
.phy_config = &sun8i_hdmi_phy_config_h3,
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index fc3713608f78..cb65b0ed53fd 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.vi_num = 1,
};
-static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
- .ccsc = 0,
- .mod_rate = 297000000,
- .scaler_mask = 0xf,
- .ui_num = 3,
- .vi_num = 1,
-};
-
-static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
- .ccsc = 1,
- .mod_rate = 297000000,
- .scaler_mask = 0x3,
- .ui_num = 1,
- .vi_num = 1,
-};
-
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
@@ -583,14 +567,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.data = &sun8i_h3_mixer0_cfg,
},
{
- .compatible = "allwinner,sun8i-r40-de2-mixer-0",
- .data = &sun8i_r40_mixer0_cfg,
- },
- {
- .compatible = "allwinner,sun8i-r40-de2-mixer-1",
- .data = &sun8i_r40_mixer1_cfg,
- },
- {
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 55fe398d8290..d5240b777a8f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
/* sun4i_drv uses this list to check if a device node is a TCON TOP */
const struct of_device_id sun8i_tcon_top_of_table[] = {
- { .compatible = "allwinner,sun8i-r40-tcon-top" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dbb62f6eb48a..dd9ffded223b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev,
{
drm_fb_helper_unregister_fbi(&ufbdev->helper);
drm_fb_helper_fini(&ufbdev->helper);
- drm_framebuffer_unregister_private(&ufbdev->ufb.base);
- drm_framebuffer_cleanup(&ufbdev->ufb.base);
- drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+ if (ufbdev->ufb.obj) {
+ drm_framebuffer_unregister_private(&ufbdev->ufb.base);
+ drm_framebuffer_cleanup(&ufbdev->ufb.base);
+ drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
+ }
}
int udl_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cfb50fedfa2b..a3275fa66b7b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
vc4_state->crtc_h);
+ vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[0] == VC4_SCALING_NONE);
+
if (num_planes > 1) {
vc4_state->is_yuv = true;
@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_get_scaling_mode(vc4_state->src_h[1],
vc4_state->crtc_h);
- /* YUV conversion requires that scaling be enabled,
- * even on a plane that's otherwise 1:1. Choose TPZ
- * for simplicity.
+ /* YUV conversion requires that horizontal scaling be enabled,
+ * even on a plane that's otherwise 1:1. Looks like only PPF
+ * works in that case, so let's pick that one.
*/
- if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
- vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
- if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
- vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+ if (vc4_state->is_unity)
+ vc4_state->x_scaling[0] = VC4_SCALING_PPF;
} else {
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
- vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
- vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
- vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
- vc4_state->y_scaling[1] == VC4_SCALING_NONE);
-
/* No configuring scaling on the cursor plane, since it gets
non-vblank-synced updates, and scaling requires requires
LBM changes which have to be vblank-synced.
@@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
}
- if (!vc4_state->is_unity) {
+ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
/* LBM Base Address. */
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 1f134570b759..f0ab6b2313bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
{
struct vmw_buffer_object *vbo =
container_of(bo, struct vmw_buffer_object, base);
- struct ttm_operation_ctx ctx = { interruptible, true };
+ struct ttm_operation_ctx ctx = { interruptible, false };
int ret;
if (vbo->pin_count > 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 23beff5d8e3c..6a712a8d59e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
struct drm_rect *rects)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_rect bounding_box = {0};
u64 total_pixels = 0, pixel_mem, bb_mem;
int i;
for (i = 0; i < num_rects; i++) {
/*
- * Currently this check is limiting the topology within max
- * texture/screentarget size. This should change in future when
- * user-space support multiple fb with topology.
+ * For STDU only individual screen (screen target) is limited by
+ * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
*/
- if (rects[i].x1 < 0 || rects[i].y1 < 0 ||
- rects[i].x2 > mode_config->max_width ||
- rects[i].y2 > mode_config->max_height) {
- DRM_ERROR("Invalid GUI layout.\n");
+ if (dev_priv->active_display_unit == vmw_du_screen_target &&
+ (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
+ drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
+ DRM_ERROR("Screen size not supported.\n");
return -EINVAL;
}
@@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
struct drm_connector_state *conn_state;
struct vmw_connector_state *vmw_conn_state;
- if (!new_crtc_state->enable && old_crtc_state->enable) {
+ if (!new_crtc_state->enable) {
rects[i].x1 = 0;
rects[i].y1 = 0;
rects[i].x2 = 0;
@@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
if (dev_priv->assume_16bpp)
assumed_bpp = 2;
+ max_width = min(max_width, dev_priv->texture_max_width);
+ max_height = min(max_height, dev_priv->texture_max_height);
+
+ /*
+ * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
+ * HEIGHT registers.
+ */
if (dev_priv->active_display_unit == vmw_du_screen_target) {
max_width = min(max_width, dev_priv->stdu_max_width);
- max_width = min(max_width, dev_priv->texture_max_width);
-
max_height = min(max_height, dev_priv->stdu_max_height);
- max_height = min(max_height, dev_priv->texture_max_height);
}
/* Add preferred mode */
@@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
+ struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
void __user *user_rects;
@@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
drm_rects[i].y1 = curr_rect.y;
drm_rects[i].x2 = curr_rect.x + curr_rect.w;
drm_rects[i].y2 = curr_rect.y + curr_rect.h;
+
+ /*
+ * Currently this check is limiting the topology within
+ * mode_config->max (which actually is max texture size
+ * supported by virtual device). This limit is here to address
+ * window managers that create a big framebuffer for whole
+ * topology.
+ */
+ if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
+ drm_rects[i].x2 > mode_config->max_width ||
+ drm_rects[i].y2 > mode_config->max_height) {
+ DRM_ERROR("Invalid GUI layout.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
}
ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 93f6b96ca7bb..f30e839f7bfd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
dev_priv->active_display_unit = vmw_du_screen_target;
- if (dev_priv->capabilities & SVGA_CAP_3D) {
- /*
- * For 3D VMs, display (scanout) buffer size is the smaller of
- * max texture and max STDU
- */
- uint32_t max_width, max_height;
-
- max_width = min(dev_priv->texture_max_width,
- dev_priv->stdu_max_width);
- max_height = min(dev_priv->texture_max_height,
- dev_priv->stdu_max_height);
-
- dev->mode_config.max_width = max_width;
- dev->mode_config.max_height = max_height;
- } else {
- /*
- * Given various display aspect ratios, there's no way to
- * estimate these using prim_bb_mem. So just set these to
- * something arbitrarily large and we will reject any layout
- * that doesn't fit prim_bb_mem later
- */
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- }
-
vmw_kms_create_implicit_placement_property(dev_priv, false);
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e125233e074b..80a01cd4c051 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
*srf_out = NULL;
if (for_scanout) {
- uint32_t max_width, max_height;
-
if (!svga3dsurface_is_screen_target_format(format)) {
DRM_ERROR("Invalid Screen Target surface format.");
return -EINVAL;
}
- max_width = min(dev_priv->texture_max_width,
- dev_priv->stdu_max_width);
- max_height = min(dev_priv->texture_max_height,
- dev_priv->stdu_max_height);
-
- if (size.width > max_width || size.height > max_height) {
+ if (size.width > dev_priv->texture_max_width ||
+ size.height > dev_priv->texture_max_height) {
DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
size.width, size.height,
- max_width, max_height);
+ dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
return -EINVAL;
}
} else {
@@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
srf->res.backup_size += sizeof(SVGA3dDXSOState);
+ /*
+ * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
+ * size greater than STDU max width/height. This is really a workaround
+ * to support creation of big framebuffer requested by some user-space
+ * for whole topology. That big framebuffer won't really be used for
+ * binding with screen target as during prepare_fb a separate surface is
+ * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
+ */
if (dev_priv->active_display_unit == vmw_du_screen_target &&
- for_scanout)
+ for_scanout && size.width <= dev_priv->stdu_max_width &&
+ size.height <= dev_priv->stdu_max_height)
srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
/*
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index a96bf46bc483..cf2a18571d48 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void)
return;
client->id = ret | ID_BIT_AUDIO;
+ if (client->ops->gpu_bound)
+ client->ops->gpu_bound(client->pdev, ret);
}
vga_switcheroo_debugfs_init(&vgasr_priv);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 25b7bd56ae11..1cb41992aaa1 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -335,7 +335,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
+ if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+ usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -472,6 +473,12 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
.driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+ .driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3da354af7a0a..44564f61e9cc 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1000,7 +1000,7 @@ int hid_open_report(struct hid_device *device)
parser = vzalloc(sizeof(struct hid_parser));
if (!parser) {
ret = -ENOMEM;
- goto err;
+ goto alloc_err;
}
parser->device = device;
@@ -1039,6 +1039,7 @@ int hid_open_report(struct hid_device *device)
hid_err(device, "unbalanced delimiter at end of report description\n");
goto err;
}
+ kfree(parser->collection_stack);
vfree(parser);
device->status |= HID_STAT_PARSED;
return 0;
@@ -1047,6 +1048,8 @@ int hid_open_report(struct hid_device *device)
hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
err:
+ kfree(parser->collection_stack);
+alloc_err:
vfree(parser);
hid_close_report(device);
return ret;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 79bdf0c7e351..bc49909aba8e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -88,6 +88,7 @@
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
#define USB_VENDOR_ID_APPLE 0x05ac
+#define BT_VENDOR_ID_APPLE 0x004c
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
@@ -157,6 +158,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -528,9 +530,6 @@
#define I2C_VENDOR_ID_HANTICK 0x0911
#define I2C_PRODUCT_ID_HANTICK_5288 0x5288
-#define I2C_VENDOR_ID_RAYD 0x2386
-#define I2C_PRODUCT_ID_RAYD_3118 0x3118
-
#define USB_VENDOR_ID_HANWANG 0x0b57
#define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
#define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
@@ -950,6 +949,7 @@
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
+#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd
#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
@@ -976,7 +976,6 @@
#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
#define USB_DEVICE_ID_SIS_TS 0x1013
#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
-#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb
#define USB_VENDOR_ID_SKYCABLE 0x1223
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 4e94ea3e280a..a481eaf39e88 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1582,6 +1582,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
input_dev->dev.parent = &hid->dev;
hidinput->input = input_dev;
+ hidinput->application = application;
list_add_tail(&hidinput->list, &hid->inputs);
INIT_LIST_HEAD(&hidinput->reports);
@@ -1677,8 +1678,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
struct hid_input *hidinput;
list_for_each_entry(hidinput, &hid->inputs, list) {
- if (hidinput->report &&
- hidinput->report->application == report->application)
+ if (hidinput->application == report->application)
return hidinput;
}
@@ -1815,6 +1815,7 @@ void hidinput_disconnect(struct hid_device *hid)
input_unregister_device(hidinput->input);
else
input_free_device(hidinput->input);
+ kfree(hidinput->name);
kfree(hidinput);
}
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 40fbb7c52723..da954f3f4da7 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1375,7 +1375,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
struct hid_usage *usage,
enum latency_mode latency,
bool surface_switch,
- bool button_switch)
+ bool button_switch,
+ bool *inputmode_found)
{
struct mt_device *td = hid_get_drvdata(hdev);
struct mt_class *cls = &td->mtclass;
@@ -1387,6 +1388,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
switch (usage->hid) {
case HID_DG_INPUTMODE:
+ /*
+ * Some elan panels wrongly declare 2 input mode features,
+ * and silently ignore when we set the value in the second
+ * field. Skip the second feature and hope for the best.
+ */
+ if (*inputmode_found)
+ return false;
+
if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
report_len = hid_report_len(report);
buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -1402,6 +1411,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
}
field->value[index] = td->inputmode_value;
+ *inputmode_found = true;
return true;
case HID_DG_CONTACTMAX:
@@ -1439,6 +1449,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
struct hid_usage *usage;
int i, j;
bool update_report;
+ bool inputmode_found = false;
rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
list_for_each_entry(rep, &rep_enum->report_list, list) {
@@ -1457,7 +1468,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
usage,
latency,
surface_switch,
- button_switch))
+ button_switch,
+ &inputmode_found))
update_report = true;
}
}
@@ -1685,6 +1697,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
*/
hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
+ if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
+ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
+
timer_setup(&td->release_timer, mt_expired_timeout, 0);
ret = hid_parse(hdev);
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 39e642686ff0..683861f324e3 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -183,6 +183,8 @@ static const struct hid_device_id saitek_devices[] = {
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
+ .driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 50af72baa5ca..2b63487057c2 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -579,6 +579,28 @@ void sensor_hub_device_close(struct hid_sensor_hub_device *hsdev)
}
EXPORT_SYMBOL_GPL(sensor_hub_device_close);
+static __u8 *sensor_hub_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ /*
+ * Checks if the report descriptor of Thinkpad Helix 2 has a logical
+ * minimum for magnetic flux axis greater than the maximum.
+ */
+ if (hdev->product == USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA &&
+ *rsize == 2558 && rdesc[913] == 0x17 && rdesc[914] == 0x40 &&
+ rdesc[915] == 0x81 && rdesc[916] == 0x08 &&
+ rdesc[917] == 0x00 && rdesc[918] == 0x27 &&
+ rdesc[921] == 0x07 && rdesc[922] == 0x00) {
+ /* Sets negative logical minimum for mag x, y and z */
+ rdesc[914] = rdesc[935] = rdesc[956] = 0xc0;
+ rdesc[915] = rdesc[936] = rdesc[957] = 0x7e;
+ rdesc[916] = rdesc[937] = rdesc[958] = 0xf7;
+ rdesc[917] = rdesc[938] = rdesc[959] = 0xff;
+ }
+
+ return rdesc;
+}
+
static int sensor_hub_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -743,6 +765,7 @@ static struct hid_driver sensor_hub_driver = {
.probe = sensor_hub_probe,
.remove = sensor_hub_remove,
.raw_event = sensor_hub_raw_event,
+ .report_fixup = sensor_hub_report_fixup,
#ifdef CONFIG_PM
.suspend = sensor_hub_suspend,
.resume = sensor_hub_resume,
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 2ce194a84868..4e3592e7a3f7 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -47,7 +47,7 @@
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
-#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
+#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
/* flags */
#define I2C_HID_STARTED 0
@@ -169,11 +169,8 @@ static const struct i2c_hid_quirks {
{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
- I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
- { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
- I2C_HID_QUIRK_RESEND_REPORT_DESCR },
- { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
- I2C_HID_QUIRK_RESEND_REPORT_DESCR },
+ I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
+ I2C_HID_QUIRK_NO_RUNTIME_PM },
{ 0, 0 }
};
@@ -1107,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
goto err_mem_free;
}
- pm_runtime_put(&client->dev);
+ if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+ pm_runtime_put(&client->dev);
+
return 0;
err_mem_free:
@@ -1132,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
struct i2c_hid *ihid = i2c_get_clientdata(client);
struct hid_device *hid;
- pm_runtime_get_sync(&client->dev);
+ if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
+ pm_runtime_get_sync(&client->dev);
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
pm_runtime_put_noidle(&client->dev);
@@ -1235,19 +1235,15 @@ static int i2c_hid_resume(struct device *dev)
pm_runtime_enable(dev);
enable_irq(client->irq);
- ret = i2c_hid_hwreset(client);
- if (ret)
- return ret;
- /* RAYDIUM device (2386:3118) need to re-send report descr cmd
- * after resume, after this it will be back normal.
- * otherwise it issues too many incomplete reports.
+ /* Instead of resetting device, simply powers the device on. This
+ * solves "incomplete reports" on Raydium devices 2386:3118 and
+ * 2386:4B33 and fixes various SIS touchscreens no longer sending
+ * data after a suspend/resume.
*/
- if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
- ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
- if (ret)
- return ret;
- }
+ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+ if (ret)
+ return ret;
if (hid->driver && hid->driver->reset_resume) {
ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index 97869b7410eb..08a8327dfd22 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -29,6 +29,8 @@
#define CNL_Ax_DEVICE_ID 0x9DFC
#define GLK_Ax_DEVICE_ID 0x31A2
#define CNL_H_DEVICE_ID 0xA37C
+#define ICL_MOBILE_DEVICE_ID 0x34FC
+#define SPT_H_DEVICE_ID 0xA135
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 050f9872f5c0..256b3016116c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -38,6 +38,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index ced041899456..f4d08c8ac7f8 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
__u32 version)
{
int ret = 0;
+ unsigned int cur_cpu;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
@@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
* the CPU attempting to connect may not be CPU 0.
*/
if (version >= VERSION_WIN8_1) {
- msg->target_vcpu =
- hv_cpu_number_to_vp_number(smp_processor_id());
- vmbus_connection.connect_cpu = smp_processor_id();
+ cur_cpu = get_cpu();
+ msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+ vmbus_connection.connect_cpu = cur_cpu;
+ put_cpu();
} else {
msg->target_vcpu = 0;
vmbus_connection.connect_cpu = 0;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index b1b548a21f91..c71cc857b649 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1291,6 +1291,9 @@ static ssize_t vmbus_chan_attr_show(struct kobject *kobj,
if (!attribute->show)
return -EIO;
+ if (chan->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
+
return attribute->show(chan, buf);
}
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 944f5b63aecd..78603b78cf41 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -207,8 +207,6 @@ superio_exit(int ioreg)
#define NUM_FAN 7
-#define TEMP_SOURCE_VIRTUAL 0x1f
-
/* Common and NCT6775 specific data */
/* Voltage min/max registers for nr=7..14 are in bank 5 */
@@ -299,8 +297,9 @@ static const u16 NCT6775_REG_PWM_READ[] = {
static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d };
-static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 };
-static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 };
+static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = {
+ 0x641, 0x642, 0x643, 0x644 };
+static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { };
static const u16 NCT6775_REG_TEMP[] = {
0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
@@ -373,6 +372,7 @@ static const char *const nct6775_temp_label[] = {
};
#define NCT6775_TEMP_MASK 0x001ffffe
+#define NCT6775_VIRT_TEMP_MASK 0x00000000
static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = {
[13] = 0x661,
@@ -425,8 +425,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
static const u16 NCT6776_REG_FAN_MIN[] = {
0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
-static const u16 NCT6776_REG_FAN_PULSES[] = {
- 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 };
+static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = {
+ 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = {
0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e };
@@ -461,6 +461,7 @@ static const char *const nct6776_temp_label[] = {
};
#define NCT6776_TEMP_MASK 0x007ffffe
+#define NCT6776_VIRT_TEMP_MASK 0x00000000
static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = {
[14] = 0x401,
@@ -501,9 +502,9 @@ static const s8 NCT6779_BEEP_BITS[] = {
30, 31 }; /* intrusion0, intrusion1 */
static const u16 NCT6779_REG_FAN[] = {
- 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 };
-static const u16 NCT6779_REG_FAN_PULSES[] = {
- 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 };
+ 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce };
+static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = {
+ 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = {
0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 };
@@ -559,7 +560,9 @@ static const char *const nct6779_temp_label[] = {
};
#define NCT6779_TEMP_MASK 0x07ffff7e
+#define NCT6779_VIRT_TEMP_MASK 0x00000000
#define NCT6791_TEMP_MASK 0x87ffff7e
+#define NCT6791_VIRT_TEMP_MASK 0x80000000
static const u16 NCT6779_REG_TEMP_ALTERNATE[32]
= { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0,
@@ -638,6 +641,7 @@ static const char *const nct6792_temp_label[] = {
};
#define NCT6792_TEMP_MASK 0x9fffff7e
+#define NCT6792_VIRT_TEMP_MASK 0x80000000
static const char *const nct6793_temp_label[] = {
"",
@@ -675,6 +679,7 @@ static const char *const nct6793_temp_label[] = {
};
#define NCT6793_TEMP_MASK 0xbfff037e
+#define NCT6793_VIRT_TEMP_MASK 0x80000000
static const char *const nct6795_temp_label[] = {
"",
@@ -712,6 +717,7 @@ static const char *const nct6795_temp_label[] = {
};
#define NCT6795_TEMP_MASK 0xbfffff7e
+#define NCT6795_VIRT_TEMP_MASK 0x80000000
static const char *const nct6796_temp_label[] = {
"",
@@ -724,8 +730,8 @@ static const char *const nct6796_temp_label[] = {
"AUXTIN4",
"SMBUSMASTER 0",
"SMBUSMASTER 1",
- "",
- "",
+ "Virtual_TEMP",
+ "Virtual_TEMP",
"",
"",
"",
@@ -748,7 +754,8 @@ static const char *const nct6796_temp_label[] = {
"Virtual_TEMP"
};
-#define NCT6796_TEMP_MASK 0xbfff03fe
+#define NCT6796_TEMP_MASK 0xbfff0ffe
+#define NCT6796_VIRT_TEMP_MASK 0x80000c00
/* NCT6102D/NCT6106D specific data */
@@ -779,8 +786,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = {
static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 };
static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 };
-static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 };
-static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 };
+static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 };
+static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 };
static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
@@ -917,6 +924,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
return 1350000U / (reg << divreg);
}
+static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg)
+{
+ return reg;
+}
+
static u16 fan_to_reg(u32 fan, unsigned int divreg)
{
if (!fan)
@@ -969,6 +981,7 @@ struct nct6775_data {
u16 reg_temp_config[NUM_TEMP];
const char * const *temp_label;
u32 temp_mask;
+ u32 virt_temp_mask;
u16 REG_CONFIG;
u16 REG_VBAT;
@@ -1276,11 +1289,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
case nct6795:
case nct6796:
return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
- ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
+ (reg & 0xfff0) == 0x4c0 ||
reg == 0x402 ||
reg == 0x63a || reg == 0x63c || reg == 0x63e ||
reg == 0x640 || reg == 0x642 || reg == 0x64a ||
- reg == 0x64c || reg == 0x660 ||
+ reg == 0x64c ||
reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 ||
reg == 0x7b || reg == 0x7d;
}
@@ -1558,7 +1571,7 @@ static void nct6775_update_pwm(struct device *dev)
reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
data->pwm_weight_temp_sel[i] = reg & 0x1f;
/* If weight is disabled, report weight source as 0 */
- if (j == 1 && !(reg & 0x80))
+ if (!(reg & 0x80))
data->pwm_weight_temp_sel[i] = 0;
/* Weight temp data */
@@ -1682,9 +1695,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (data->has_fan_min & BIT(i))
data->fan_min[i] = nct6775_read_value(data,
data->REG_FAN_MIN[i]);
- data->fan_pulses[i] =
- (nct6775_read_value(data, data->REG_FAN_PULSES[i])
- >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+
+ if (data->REG_FAN_PULSES[i]) {
+ data->fan_pulses[i] =
+ (nct6775_read_value(data,
+ data->REG_FAN_PULSES[i])
+ >> data->FAN_PULSE_SHIFT[i]) & 0x03;
+ }
nct6775_select_fan_div(dev, data, i, reg);
}
@@ -3639,6 +3656,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6776_temp_label;
data->temp_mask = NCT6776_TEMP_MASK;
+ data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
data->REG_VBAT = NCT6106_REG_VBAT;
data->REG_DIODE = NCT6106_REG_DIODE;
@@ -3717,6 +3735,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6775_temp_label;
data->temp_mask = NCT6775_TEMP_MASK;
+ data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK;
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3789,6 +3808,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6776_temp_label;
data->temp_mask = NCT6776_TEMP_MASK;
+ data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3853,7 +3873,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->ALARM_BITS = NCT6779_ALARM_BITS;
data->BEEP_BITS = NCT6779_BEEP_BITS;
- data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg = fan_from_reg_rpm;
data->fan_from_reg_min = fan_from_reg13;
data->target_temp_mask = 0xff;
data->tolerance_mask = 0x07;
@@ -3861,6 +3881,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->temp_label = nct6779_temp_label;
data->temp_mask = NCT6779_TEMP_MASK;
+ data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK;
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3933,7 +3954,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->ALARM_BITS = NCT6791_ALARM_BITS;
data->BEEP_BITS = NCT6779_BEEP_BITS;
- data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg = fan_from_reg_rpm;
data->fan_from_reg_min = fan_from_reg13;
data->target_temp_mask = 0xff;
data->tolerance_mask = 0x07;
@@ -3944,22 +3965,27 @@ static int nct6775_probe(struct platform_device *pdev)
case nct6791:
data->temp_label = nct6779_temp_label;
data->temp_mask = NCT6791_TEMP_MASK;
+ data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK;
break;
case nct6792:
data->temp_label = nct6792_temp_label;
data->temp_mask = NCT6792_TEMP_MASK;
+ data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK;
break;
case nct6793:
data->temp_label = nct6793_temp_label;
data->temp_mask = NCT6793_TEMP_MASK;
+ data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK;
break;
case nct6795:
data->temp_label = nct6795_temp_label;
data->temp_mask = NCT6795_TEMP_MASK;
+ data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK;
break;
case nct6796:
data->temp_label = nct6796_temp_label;
data->temp_mask = NCT6796_TEMP_MASK;
+ data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK;
break;
}
@@ -4143,7 +4169,7 @@ static int nct6775_probe(struct platform_device *pdev)
* for each fan reflects a different temperature, and there
* are no duplicates.
*/
- if (src != TEMP_SOURCE_VIRTUAL) {
+ if (!(data->virt_temp_mask & BIT(src))) {
if (mask & BIT(src))
continue;
mask |= BIT(src);
diff --git a/drivers/hwmon/npcm750-pwm-fan.c b/drivers/hwmon/npcm750-pwm-fan.c
index 8474d601aa63..b998f9fbed41 100644
--- a/drivers/hwmon/npcm750-pwm-fan.c
+++ b/drivers/hwmon/npcm750-pwm-fan.c
@@ -908,7 +908,7 @@ static int npcm7xx_en_pwm_fan(struct device *dev,
if (fan_cnt < 1)
return -EINVAL;
- fan_ch = devm_kzalloc(dev, sizeof(*fan_ch) * fan_cnt, GFP_KERNEL);
+ fan_ch = devm_kcalloc(dev, fan_cnt, sizeof(*fan_ch), GFP_KERNEL);
if (!fan_ch)
return -ENOMEM;
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index fb4e4a6bb1f6..be5ba4690895 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -164,3 +164,4 @@ module_platform_driver(rpi_hwmon_driver);
MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
MODULE_DESCRIPTION("Raspberry Pi voltage sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:raspberrypi-hwmon");
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index da962aa2cef5..fc6b7f8b62fb 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev)
th->thdev[i] = NULL;
}
- th->num_thdevs = lowest;
+ if (lowest >= 0)
+ th->num_thdevs = lowest;
}
if (thdrv->attr_group)
@@ -487,7 +488,7 @@ static const struct intel_th_subdevice {
.flags = IORESOURCE_MEM,
},
{
- .start = TH_MMIO_SW,
+ .start = 1, /* use resource[1] */
.end = 0,
.flags = IORESOURCE_MEM,
},
@@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th,
struct intel_th_device *thdev;
struct resource res[3];
unsigned int req = 0;
+ bool is64bit = false;
int r, err;
thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th,
thdev->drvdata = th->drvdata;
+ for (r = 0; r < th->num_resources; r++)
+ if (th->resource[r].flags & IORESOURCE_MEM_64) {
+ is64bit = true;
+ break;
+ }
+
memcpy(res, subdev->res,
sizeof(struct resource) * subdev->nres);
for (r = 0; r < subdev->nres; r++) {
struct resource *devres = th->resource;
- int bar = TH_MMIO_CONFIG;
+ int bar = 0; /* cut subdevices' MMIO from resource[0] */
/*
* Take .end == 0 to mean 'take the whole bar',
@@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
*/
if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
bar = res[r].start;
+ if (is64bit)
+ bar *= 2;
res[r].start = 0;
res[r].end = resource_size(&devres[bar]) - 1;
}
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index c2e55e5d97f6..1cf6290d6435 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Ice Lake PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 94d94b4a9a0d..18cc324f3ca9 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -34,11 +34,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
{
- u32 ic_clk = i2c_dw_clk_rate(dev);
const char *mode_str, *fp_str = "";
u32 comp_param1;
u32 sda_falling_time, scl_falling_time;
struct i2c_timings *t = &dev->timings;
+ u32 ic_clk;
int ret;
ret = i2c_dw_acquire_lock(dev);
@@ -53,6 +53,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
/* Calculate SCL timing parameters for standard mode if not set */
if (!dev->ss_hcnt || !dev->ss_lcnt) {
+ ic_clk = i2c_dw_clk_rate(dev);
dev->ss_hcnt =
i2c_dw_scl_hcnt(ic_clk,
4000, /* tHD;STA = tHIGH = 4.0 us */
@@ -89,6 +90,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
* needed also in high speed mode.
*/
if (!dev->fs_hcnt || !dev->fs_lcnt) {
+ ic_clk = i2c_dw_clk_rate(dev);
dev->fs_hcnt =
i2c_dw_scl_hcnt(ic_clk,
600, /* tHD;STA = tHIGH = 0.6 us */
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 0cf1379f4e80..5c754bf659e2 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -164,7 +164,7 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
* run ~75 kHz instead which should do no harm.
*/
dev_notice(&sch_adapter.dev,
- "Clock divider unitialized. Setting defaults\n");
+ "Clock divider uninitialized. Setting defaults\n");
outw(backbone_speed / (4 * 100), SMBHSTCLK);
}
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 36732eb688a4..9f2eb02481d3 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -367,20 +367,26 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_addr_t rx_dma;
enum geni_se_xfer_mode mode;
unsigned long time_left = XFER_TIMEOUT;
+ void *dma_buf;
gi2c->cur = msg;
- mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+ mode = GENI_SE_FIFO;
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ if (dma_buf)
+ mode = GENI_SE_DMA;
+
geni_se_select_mode(&gi2c->se, mode);
writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
if (mode == GENI_SE_DMA) {
int ret;
- ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len,
+ ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len,
&rx_dma);
if (ret) {
mode = GENI_SE_FIFO;
geni_se_select_mode(&gi2c->se, mode);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
}
}
@@ -393,6 +399,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
if (gi2c->err)
geni_i2c_rx_fsm_rst(gi2c);
geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
}
return gi2c->err;
}
@@ -403,20 +410,26 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_addr_t tx_dma;
enum geni_se_xfer_mode mode;
unsigned long time_left;
+ void *dma_buf;
gi2c->cur = msg;
- mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
+ mode = GENI_SE_FIFO;
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+ if (dma_buf)
+ mode = GENI_SE_DMA;
+
geni_se_select_mode(&gi2c->se, mode);
writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
if (mode == GENI_SE_DMA) {
int ret;
- ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len,
+ ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len,
&tx_dma);
if (ret) {
mode = GENI_SE_FIFO;
geni_se_select_mode(&gi2c->se, mode);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
}
}
@@ -432,6 +445,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
if (gi2c->err)
geni_i2c_tx_fsm_rst(gi2c);
geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
}
return gi2c->err;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 52cf42b32f0a..4aa7dde876f3 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -806,8 +806,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
time_left = wait_event_timeout(priv->wait, priv->flags & ID_DONE,
num * adap->timeout);
- if (!time_left) {
+
+ /* cleanup DMA if it couldn't complete properly due to an error */
+ if (priv->dma_direction != DMA_NONE)
rcar_i2c_cleanup_dma(priv);
+
+ if (!time_left) {
rcar_i2c_init(priv);
ret = -ETIMEDOUT;
} else if (priv->flags & ID_NACK) {
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index a01389b85f13..7e9a2bbf5ddc 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
mt_params[3].type = ACPI_TYPE_INTEGER;
mt_params[3].integer.value = len;
mt_params[4].type = ACPI_TYPE_BUFFER;
+ mt_params[4].buffer.length = len;
mt_params[4].buffer.pointer = data->block + 1;
}
break;
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 9ee9a15e7134..9200e349f29e 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2270,7 +2270,7 @@ EXPORT_SYMBOL(i2c_put_adapter);
*
* Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO.
* Or a valid pointer to be used with DMA. After use, release it by
- * calling i2c_release_dma_safe_msg_buf().
+ * calling i2c_put_dma_safe_msg_buf().
*
* This function must only be called from process context!
*/
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 44a7a255ef74..f9b59d41813f 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -1784,7 +1784,7 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- device_add_disk(&drive->gendev, g);
+ device_add_disk(&drive->gendev, g, NULL);
return 0;
out_free_disk:
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index e823394ed543..04e008e8f6f9 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -416,7 +416,7 @@ static int ide_gd_probe(ide_drive_t *drive)
if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
g->flags = GENHD_FL_REMOVABLE;
g->fops = &ide_gd_ops;
- device_add_disk(&drive->gendev, g);
+ device_add_disk(&drive->gendev, g, NULL);
return 0;
out_free_disk:
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 7589f2ad1dae..631360b14ca7 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -187,12 +187,15 @@ static int st_lsm6dsx_set_fifo_odr(struct st_lsm6dsx_sensor *sensor,
int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
{
- u16 fifo_watermark = ~0, cur_watermark, sip = 0, fifo_th_mask;
+ u16 fifo_watermark = ~0, cur_watermark, fifo_th_mask;
struct st_lsm6dsx_hw *hw = sensor->hw;
struct st_lsm6dsx_sensor *cur_sensor;
int i, err, data;
__le16 wdata;
+ if (!hw->sip)
+ return 0;
+
for (i = 0; i < ST_LSM6DSX_ID_MAX; i++) {
cur_sensor = iio_priv(hw->iio_devs[i]);
@@ -203,14 +206,10 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark)
: cur_sensor->watermark;
fifo_watermark = min_t(u16, fifo_watermark, cur_watermark);
- sip += cur_sensor->sip;
}
- if (!sip)
- return 0;
-
- fifo_watermark = max_t(u16, fifo_watermark, sip);
- fifo_watermark = (fifo_watermark / sip) * sip;
+ fifo_watermark = max_t(u16, fifo_watermark, hw->sip);
+ fifo_watermark = (fifo_watermark / hw->sip) * hw->sip;
fifo_watermark = fifo_watermark * hw->settings->fifo_ops.th_wl;
err = regmap_read(hw->regmap, hw->settings->fifo_ops.fifo_th.addr + 1,
diff --git a/drivers/iio/temperature/maxim_thermocouple.c b/drivers/iio/temperature/maxim_thermocouple.c
index 54e383231d1e..c31b9633f32d 100644
--- a/drivers/iio/temperature/maxim_thermocouple.c
+++ b/drivers/iio/temperature/maxim_thermocouple.c
@@ -258,7 +258,6 @@ static int maxim_thermocouple_remove(struct spi_device *spi)
static const struct spi_device_id maxim_thermocouple_id[] = {
{"max6675", MAX6675},
{"max31855", MAX31855},
- {"max31856", MAX31855},
{},
};
MODULE_DEVICE_TABLE(spi, maxim_thermocouple_id);
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 0bee1f4b914e..3208ad6ad540 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -338,6 +338,39 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
}
/**
+ * del_gid - Delete GID table entry
+ *
+ * @ib_dev: IB device whose GID entry to be deleted
+ * @port: Port number of the IB device
+ * @table: GID table of the IB device for a port
+ * @ix: GID entry index to delete
+ *
+ */
+static void del_gid(struct ib_device *ib_dev, u8 port,
+ struct ib_gid_table *table, int ix)
+{
+ struct ib_gid_table_entry *entry;
+
+ lockdep_assert_held(&table->lock);
+
+ pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
+ ib_dev->name, port, ix,
+ table->data_vec[ix]->attr.gid.raw);
+
+ write_lock_irq(&table->rwlock);
+ entry = table->data_vec[ix];
+ entry->state = GID_TABLE_ENTRY_PENDING_DEL;
+ /*
+ * For non RoCE protocol, GID entry slot is ready to use.
+ */
+ if (!rdma_protocol_roce(ib_dev, port))
+ table->data_vec[ix] = NULL;
+ write_unlock_irq(&table->rwlock);
+
+ put_gid_entry_locked(entry);
+}
+
+/**
* add_modify_gid - Add or modify GID table entry
*
* @table: GID table in which GID to be added or modified
@@ -358,7 +391,7 @@ static int add_modify_gid(struct ib_gid_table *table,
* this index.
*/
if (is_gid_entry_valid(table->data_vec[attr->index]))
- put_gid_entry(table->data_vec[attr->index]);
+ del_gid(attr->device, attr->port_num, table, attr->index);
/*
* Some HCA's report multiple GID entries with only one valid GID, and
@@ -386,39 +419,6 @@ done:
return ret;
}
-/**
- * del_gid - Delete GID table entry
- *
- * @ib_dev: IB device whose GID entry to be deleted
- * @port: Port number of the IB device
- * @table: GID table of the IB device for a port
- * @ix: GID entry index to delete
- *
- */
-static void del_gid(struct ib_device *ib_dev, u8 port,
- struct ib_gid_table *table, int ix)
-{
- struct ib_gid_table_entry *entry;
-
- lockdep_assert_held(&table->lock);
-
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- ib_dev->name, port, ix,
- table->data_vec[ix]->attr.gid.raw);
-
- write_lock_irq(&table->rwlock);
- entry = table->data_vec[ix];
- entry->state = GID_TABLE_ENTRY_PENDING_DEL;
- /*
- * For non RoCE protocol, GID entry slot is ready to use.
- */
- if (!rdma_protocol_roce(ib_dev, port))
- table->data_vec[ix] = NULL;
- write_unlock_irq(&table->rwlock);
-
- put_gid_entry_locked(entry);
-}
-
/* rwlock should be read locked, or lock should be held */
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
const struct ib_gid_attr *val, bool default_gid,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f72677291b69..a36c94930c31 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -724,6 +724,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
dgid = (union ib_gid *) &addr->sib_addr;
pkey = ntohs(addr->sib_pkey);
+ mutex_lock(&lock);
list_for_each_entry(cur_dev, &dev_list, list) {
for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
if (!rdma_cap_af_ib(cur_dev->device, p))
@@ -750,18 +751,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
cma_dev = cur_dev;
sgid = gid;
id_priv->id.port_num = p;
+ goto found;
}
}
}
}
-
- if (!cma_dev)
- return -ENODEV;
+ mutex_unlock(&lock);
+ return -ENODEV;
found:
cma_attach_to_dev(id_priv, cma_dev);
- addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
- memcpy(&addr->sib_addr, &sgid, sizeof sgid);
+ mutex_unlock(&lock);
+ addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+ memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
return 0;
}
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 6eb64c6f0802..c4118bcd5103 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -882,6 +882,8 @@ static int __uverbs_cleanup_ufile(struct ib_uverbs_file *ufile,
WARN_ON(uverbs_try_lock_object(obj, UVERBS_LOOKUP_WRITE));
if (!uverbs_destroy_uobject(obj, reason))
ret = 0;
+ else
+ atomic_set(&obj->usecnt, 0);
}
return ret;
}
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index faa9e6116b2f..73332b9a25b5 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -46,6 +46,8 @@
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
+
#include <linux/uaccess.h>
#include <rdma/ib.h>
@@ -1120,6 +1122,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
return -EINVAL;
+ hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucm_cmd_table));
if (hdr.in + sizeof(hdr) > len)
return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ec8fb289621f..01d68ed46c1b 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -44,6 +44,8 @@
#include <linux/module.h>
#include <linux/nsproxy.h>
+#include <linux/nospec.h>
+
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
#include <rdma/rdma_cm.h>
@@ -124,6 +126,8 @@ static DEFINE_MUTEX(mut);
static DEFINE_IDR(ctx_idr);
static DEFINE_IDR(multicast_idr);
+static const struct file_operations ucma_fops;
+
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
{
@@ -1581,6 +1585,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
f = fdget(cmd.fd);
if (!f.file)
return -ENOENT;
+ if (f.file->f_op != &ucma_fops) {
+ ret = -EINVAL;
+ goto file_put;
+ }
/* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(f.file->private_data, cmd.id);
@@ -1670,6 +1678,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
return -EINVAL;
+ hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
if (hdr.in + sizeof(hdr) > len)
return -EINVAL;
@@ -1753,6 +1762,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
mutex_lock(&mut);
if (!ctx->closing) {
mutex_unlock(&mut);
+ ucma_put_ctx(ctx);
+ wait_for_completion(&ctx->comp);
/* rdma_destroy_id ensures that no event handlers are
* inflight for that id before releasing it.
*/
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a21d5214afc3..e012ca80f9d1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file,
if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
cmd->base.cur_qp_state > IB_QPS_ERR) ||
- cmd->base.qp_state > IB_QPS_ERR) {
+ (cmd->base.attr_mask & IB_QP_STATE &&
+ cmd->base.qp_state > IB_QPS_ERR)) {
ret = -EINVAL;
goto release_qp;
}
- attr->qp_state = cmd->base.qp_state;
- attr->cur_qp_state = cmd->base.cur_qp_state;
- attr->path_mtu = cmd->base.path_mtu;
- attr->path_mig_state = cmd->base.path_mig_state;
- attr->qkey = cmd->base.qkey;
- attr->rq_psn = cmd->base.rq_psn;
- attr->sq_psn = cmd->base.sq_psn;
- attr->dest_qp_num = cmd->base.dest_qp_num;
- attr->qp_access_flags = cmd->base.qp_access_flags;
- attr->pkey_index = cmd->base.pkey_index;
- attr->alt_pkey_index = cmd->base.alt_pkey_index;
- attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
- attr->max_rd_atomic = cmd->base.max_rd_atomic;
- attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
- attr->min_rnr_timer = cmd->base.min_rnr_timer;
- attr->port_num = cmd->base.port_num;
- attr->timeout = cmd->base.timeout;
- attr->retry_cnt = cmd->base.retry_cnt;
- attr->rnr_retry = cmd->base.rnr_retry;
- attr->alt_port_num = cmd->base.alt_port_num;
- attr->alt_timeout = cmd->base.alt_timeout;
- attr->rate_limit = cmd->rate_limit;
+ if (cmd->base.attr_mask & IB_QP_STATE)
+ attr->qp_state = cmd->base.qp_state;
+ if (cmd->base.attr_mask & IB_QP_CUR_STATE)
+ attr->cur_qp_state = cmd->base.cur_qp_state;
+ if (cmd->base.attr_mask & IB_QP_PATH_MTU)
+ attr->path_mtu = cmd->base.path_mtu;
+ if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
+ attr->path_mig_state = cmd->base.path_mig_state;
+ if (cmd->base.attr_mask & IB_QP_QKEY)
+ attr->qkey = cmd->base.qkey;
+ if (cmd->base.attr_mask & IB_QP_RQ_PSN)
+ attr->rq_psn = cmd->base.rq_psn;
+ if (cmd->base.attr_mask & IB_QP_SQ_PSN)
+ attr->sq_psn = cmd->base.sq_psn;
+ if (cmd->base.attr_mask & IB_QP_DEST_QPN)
+ attr->dest_qp_num = cmd->base.dest_qp_num;
+ if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
+ attr->qp_access_flags = cmd->base.qp_access_flags;
+ if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
+ attr->pkey_index = cmd->base.pkey_index;
+ if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
+ attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
+ if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+ attr->max_rd_atomic = cmd->base.max_rd_atomic;
+ if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+ attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
+ if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
+ attr->min_rnr_timer = cmd->base.min_rnr_timer;
+ if (cmd->base.attr_mask & IB_QP_PORT)
+ attr->port_num = cmd->base.port_num;
+ if (cmd->base.attr_mask & IB_QP_TIMEOUT)
+ attr->timeout = cmd->base.timeout;
+ if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
+ attr->retry_cnt = cmd->base.retry_cnt;
+ if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
+ attr->rnr_retry = cmd->base.rnr_retry;
+ if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
+ attr->alt_port_num = cmd->base.alt_port_num;
+ attr->alt_timeout = cmd->base.alt_timeout;
+ attr->alt_pkey_index = cmd->base.alt_pkey_index;
+ }
+ if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
+ attr->rate_limit = cmd->rate_limit;
if (cmd->base.attr_mask & IB_QP_AV)
copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 823beca448e1..50152c1b1004 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
list_del(&entry->obj_list);
kfree(entry);
}
+ file->ev_queue.is_closed = 1;
spin_unlock_irq(&file->ev_queue.lock);
uverbs_close_fd(filp);
@@ -1050,7 +1051,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
if (ib_uverbs_create_uapi(device, uverbs_dev))
- goto err;
+ goto err_uapi;
cdev_init(&uverbs_dev->cdev, NULL);
uverbs_dev->cdev.owner = THIS_MODULE;
@@ -1077,11 +1078,10 @@ static void ib_uverbs_add_one(struct ib_device *device)
err_class:
device_destroy(uverbs_class, uverbs_dev->cdev.dev);
-
err_cdev:
cdev_del(&uverbs_dev->cdev);
+err_uapi:
clear_bit(devnum, dev_map);
-
err:
if (atomic_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 73ea6f0db88f..be854628a7c6 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
kfree(rcu_dereference_protected(*slot, true));
radix_tree_iter_delete(&uapi->radix, &iter, slot);
}
+ kfree(uapi);
}
struct uverbs_api *uverbs_alloc_api(
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index bbfb86eb2d24..bc2b9e038439 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -833,6 +833,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
"Failed to destroy Shadow QP");
return rc;
}
+ bnxt_qplib_free_qp_res(&rdev->qplib_res,
+ &rdev->qp1_sqp->qplib_qp);
mutex_lock(&rdev->qp_lock);
list_del(&rdev->qp1_sqp->list);
atomic_dec(&rdev->qp_count);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 20b9f31052bf..85cd1a3593d6 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
/* Mutex to protect the list of bnxt_re devices added */
static DEFINE_MUTEX(bnxt_re_dev_lock);
static struct workqueue_struct *bnxt_re_wq;
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
/* SR-IOV helper functions */
@@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p)
if (!rdev)
return;
- bnxt_re_ib_unreg(rdev, false);
+ bnxt_re_ib_unreg(rdev);
}
static void bnxt_re_stop_irq(void *handle)
@@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
/* Driver registration routines used to let the networking driver (bnxt_en)
* to know that the RoCE driver is now installed
*/
-static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
int rc;
@@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
return -EINVAL;
en_dev = rdev->en_dev;
- /* Acquire rtnl lock if it is not invokded from netdev event */
- if (lock_wait)
- rtnl_lock();
rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
BNXT_ROCE_ULP);
- if (lock_wait)
- rtnl_unlock();
return rc;
}
@@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
en_dev = rdev->en_dev;
- rtnl_lock();
rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
&bnxt_re_ulp_ops, rdev);
- rtnl_unlock();
return rc;
}
-static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
+static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
int rc;
@@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
en_dev = rdev->en_dev;
- if (lock_wait)
- rtnl_lock();
rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
- if (lock_wait)
- rtnl_unlock();
return rc;
}
@@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
- rtnl_lock();
num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
rdev->msix_entries,
num_msix_want);
@@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
}
rdev->num_msix = num_msix_got;
done:
- rtnl_unlock();
return rc;
}
@@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
fw_msg->timeout = timeout;
}
-static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
- bool lock_wait)
+static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_ring_free_input req = {0};
struct hwrm_ring_free_output resp;
struct bnxt_fw_msg fw_msg;
- bool do_unlock = false;
int rc = -EINVAL;
if (!en_dev)
return rc;
memset(&fw_msg, 0, sizeof(fw_msg));
- if (lock_wait) {
- rtnl_lock();
- do_unlock = true;
- }
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
if (rc)
dev_err(rdev_to_dev(rdev),
"Failed to free HW ring:%d :%#x", req.ring_id, rc);
- if (do_unlock)
- rtnl_unlock();
return rc;
}
@@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
return rc;
memset(&fw_msg, 0, sizeof(fw_msg));
- rtnl_lock();
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
req.enables = 0;
req.page_tbl_addr = cpu_to_le64(dma_arr[0]);
@@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
if (!rc)
*fw_ring_id = le16_to_cpu(resp.ring_id);
- rtnl_unlock();
return rc;
}
static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
- u32 fw_stats_ctx_id, bool lock_wait)
+ u32 fw_stats_ctx_id)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct hwrm_stat_ctx_free_input req = {0};
struct bnxt_fw_msg fw_msg;
- bool do_unlock = false;
int rc = -EINVAL;
if (!en_dev)
return rc;
memset(&fw_msg, 0, sizeof(fw_msg));
- if (lock_wait) {
- rtnl_lock();
- do_unlock = true;
- }
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
@@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
dev_err(rdev_to_dev(rdev),
"Failed to free HW stats context %#x", rc);
- if (do_unlock)
- rtnl_unlock();
return rc;
}
@@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
return rc;
memset(&fw_msg, 0, sizeof(fw_msg));
- rtnl_lock();
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
req.update_period_ms = cpu_to_le32(1000);
@@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
if (!rc)
*fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
- rtnl_unlock();
return rc;
}
@@ -929,19 +897,19 @@ fail:
return rc;
}
-static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
{
int i;
for (i = 0; i < rdev->num_msix - 1; i++) {
- bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait);
+ bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
bnxt_qplib_free_nq(&rdev->nq[i]);
}
}
-static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
{
- bnxt_re_free_nq_res(rdev, lock_wait);
+ bnxt_re_free_nq_res(rdev);
if (rdev->qplib_res.dpi_tbl.max) {
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
@@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
return 0;
}
-static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
+static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
{
int i, rc;
@@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
cancel_delayed_work(&rdev->worker);
bnxt_re_cleanup_res(rdev);
- bnxt_re_free_res(rdev, lock_wait);
+ bnxt_re_free_res(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
if (rc)
dev_warn(rdev_to_dev(rdev),
"Failed to deinitialize RCFW: %#x", rc);
- bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id,
- lock_wait);
+ bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
- bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
- rc = bnxt_re_free_msix(rdev, lock_wait);
+ rc = bnxt_re_free_msix(rdev);
if (rc)
dev_warn(rdev_to_dev(rdev),
"Failed to free MSI-X vectors: %#x", rc);
}
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
- rc = bnxt_re_unregister_netdev(rdev, lock_wait);
+ rc = bnxt_re_unregister_netdev(rdev);
if (rc)
dev_warn(rdev_to_dev(rdev),
"Failed to unregister with netdev: %#x", rc);
@@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
{
int i, j, rc;
+ bool locked;
+
+ /* Acquire rtnl lock through out this function */
+ rtnl_lock();
+ locked = true;
+
/* Registered a new RoCE device instance to netdev */
rc = bnxt_re_register_netdev(rdev);
if (rc) {
@@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
}
+ rtnl_unlock();
+ locked = false;
+
/* Register ib dev */
rc = bnxt_re_register_ib(rdev);
if (rc) {
pr_err("Failed to register with IB: %#x\n", rc);
goto fail;
}
+ set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
dev_info(rdev_to_dev(rdev), "Device registered successfully");
for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
rc = device_create_file(&rdev->ibdev.dev,
@@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
goto fail;
}
}
- set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
return 0;
free_sctx:
- bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true);
+ bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
free_ctx:
bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
disable_rcfw:
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
free_ring:
- bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true);
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
free_rcfw:
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
fail:
- bnxt_re_ib_unreg(rdev, true);
+ if (!locked)
+ rtnl_lock();
+ bnxt_re_ib_unreg(rdev);
+ rtnl_unlock();
+
return rc;
}
@@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
*/
if (atomic_read(&rdev->sched_count) > 0)
goto exit;
- bnxt_re_ib_unreg(rdev, false);
+ bnxt_re_ib_unreg(rdev);
bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev);
break;
@@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void)
*/
flush_workqueue(bnxt_re_wq);
bnxt_re_dev_stop(rdev);
- bnxt_re_ib_unreg(rdev, true);
+ /* Acquire the rtnl_lock as the L2 resources are freed here */
+ rtnl_lock();
+ bnxt_re_ib_unreg(rdev);
+ rtnl_unlock();
bnxt_re_remove_one(rdev);
bnxt_re_dev_unreg(rdev);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index e426b990c1dd..6ad0d46ab879 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -196,7 +196,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
struct bnxt_qplib_qp *qp)
{
struct bnxt_qplib_q *rq = &qp->rq;
- struct bnxt_qplib_q *sq = &qp->rq;
+ struct bnxt_qplib_q *sq = &qp->sq;
int rc = 0;
if (qp->sq_hdr_buf_size && sq->hwq.max_elements) {
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index b3203afa3b1d..347fe18b1a41 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1685,6 +1685,12 @@ static void flush_qp(struct c4iw_qp *qhp)
schp = to_c4iw_cq(qhp->ibqp.send_cq);
if (qhp->ibqp.uobject) {
+
+ /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+ if (qhp->wq.flushed)
+ return;
+
+ qhp->wq.flushed = 1;
t4_set_wq_in_error(&qhp->wq, 0);
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2c19bf772451..e1668bcc2d13 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
struct hfi1_devdata *dd = ppd->dd;
struct send_context *sc;
int i;
+ int sc_flags;
if (flags & FREEZE_SELF)
write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
@@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
/* notify all SDMA engines that they are going into a freeze */
sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
+ sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
+ SCF_LINK_DOWN : 0);
/* do halt pre-handling on all enabled send contexts */
for (i = 0; i < dd->num_send_contexts; i++) {
sc = dd->send_contexts[i].sc;
if (sc && (sc->flags & SCF_ENABLED))
- sc_stop(sc, SCF_FROZEN | SCF_HALTED);
+ sc_stop(sc, sc_flags);
}
/* Send context are frozen. Notify user space */
@@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
handle_linkup_change(dd, 1);
+ pio_kernel_linkup(dd);
/*
* After link up, a new link width will have been set.
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index eec83757d55f..6c967dde58e7 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -893,14 +893,11 @@ static int trigger_sbr(struct hfi1_devdata *dd)
}
/*
- * A secondary bus reset (SBR) issues a hot reset to our device.
- * The following routine does a 1s wait after the reset is dropped
- * per PCI Trhfa (recovery time). PCIe 3.0 section 6.6.1 -
- * Conventional Reset, paragraph 3, line 35 also says that a 1s
- * delay after a reset is required. Per spec requirements,
- * the link is either working or not after that point.
+ * This is an end around to do an SBR during probe time. A new API needs
+ * to be implemented to have cleaner interface but this fixes the
+ * current brokenness
*/
- return pci_reset_bus(dev);
+ return pci_bridge_secondary_bus_reset(dev->bus->self);
}
/*
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index c2c1cba5b23b..752057647f09 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
unsigned long flags;
int write = 1; /* write sendctrl back */
int flush = 0; /* re-read sendctrl to make sure it is flushed */
+ int i;
spin_lock_irqsave(&dd->sendctrl_lock, flags);
@@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
reg |= SEND_CTRL_SEND_ENABLE_SMASK;
/* Fall through */
case PSC_DATA_VL_ENABLE:
+ mask = 0;
+ for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+ if (!dd->vld[i].mtu)
+ mask |= BIT_ULL(i);
/* Disallow sending on VLs not enabled */
- mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
- SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+ mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+ SEND_CTRL_UNSUPPORTED_VL_SHIFT;
reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
break;
case PSC_GLOBAL_DISABLE:
@@ -921,20 +926,18 @@ void sc_free(struct send_context *sc)
void sc_disable(struct send_context *sc)
{
u64 reg;
- unsigned long flags;
struct pio_buf *pbuf;
if (!sc)
return;
/* do all steps, even if already disabled */
- spin_lock_irqsave(&sc->alloc_lock, flags);
+ spin_lock_irq(&sc->alloc_lock);
reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
sc->flags &= ~SCF_ENABLED;
sc_wait_for_packet_egress(sc, 1);
write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
- spin_unlock_irqrestore(&sc->alloc_lock, flags);
/*
* Flush any waiters. Once the context is disabled,
@@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc)
* proceed with the flush.
*/
udelay(1);
- spin_lock_irqsave(&sc->release_lock, flags);
+ spin_lock(&sc->release_lock);
if (sc->sr) { /* this context has a shadow ring */
while (sc->sr_tail != sc->sr_head) {
pbuf = &sc->sr[sc->sr_tail].pbuf;
@@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc)
sc->sr_tail = 0;
}
}
- spin_unlock_irqrestore(&sc->release_lock, flags);
+ spin_unlock(&sc->release_lock);
+ spin_unlock_irq(&sc->alloc_lock);
}
/* return SendEgressCtxtStatus.PacketOccupancy */
@@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
sc = dd->send_contexts[i].sc;
if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
continue;
+ if (sc->flags & SCF_LINK_DOWN)
+ continue;
sc_enable(sc); /* will clear the sc frozen flag */
}
}
+/**
+ * pio_kernel_linkup() - Re-enable send contexts after linkup event
+ * @dd: valid devive data
+ *
+ * When the link goes down, the freeze path is taken. However, a link down
+ * event is different from a freeze because if the send context is re-enabled
+ * whowever is sending data will start sending data again, which will hang
+ * any QP that is sending data.
+ *
+ * The freeze path now looks at the type of event that occurs and takes this
+ * path for link down event.
+ */
+void pio_kernel_linkup(struct hfi1_devdata *dd)
+{
+ struct send_context *sc;
+ int i;
+
+ for (i = 0; i < dd->num_send_contexts; i++) {
+ sc = dd->send_contexts[i].sc;
+ if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
+ continue;
+
+ sc_enable(sc); /* will clear the sc link down flag */
+ }
+}
+
/*
* Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
* Returns:
@@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag)
{
unsigned long flags;
- /* mark the context */
- sc->flags |= flag;
-
/* stop buffer allocations */
spin_lock_irqsave(&sc->alloc_lock, flags);
+ /* mark the context */
+ sc->flags |= flag;
sc->flags &= ~SCF_ENABLED;
spin_unlock_irqrestore(&sc->alloc_lock, flags);
wake_up(&sc->halt_wait);
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 058b08f459ab..aaf372c3e5d6 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -139,6 +139,7 @@ struct send_context {
#define SCF_IN_FREE 0x02
#define SCF_HALTED 0x04
#define SCF_FROZEN 0x08
+#define SCF_LINK_DOWN 0x10
struct send_context_info {
struct send_context *sc; /* allocated working context */
@@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc);
void pio_reset_all(struct hfi1_devdata *dd);
void pio_freeze(struct hfi1_devdata *dd);
void pio_kernel_unfreeze(struct hfi1_devdata *dd);
+void pio_kernel_linkup(struct hfi1_devdata *dd);
/* global PIO send control operations */
#define PSC_GLOBAL_ENABLE 0
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a3a7b33196d6..5c88706121c1 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
- goto free_txreq;
+ goto free_tx;
}
iovec = &req->iovs[req->iov_idx];
WARN_ON(iovec->offset);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 13374c727b14..a7c586a5589d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
struct hfi1_pportdata *ppd;
struct hfi1_devdata *dd;
u8 sc5;
+ u8 sl;
if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
/* test the mapping for validity */
ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
ppd = ppd_from_ibp(ibp);
- sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
dd = dd_from_ppd(ppd);
+
+ sl = rdma_ah_get_sl(ah_attr);
+ if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+ return -EINVAL;
+
+ sc5 = ibp->sl_to_sc[sl];
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
return -EINVAL;
return 0;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ca0f1ee26091..0bbeaaae47e0 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -517,9 +517,11 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->page_size_cap = dev->dev->caps.page_size_cap;
props->max_qp = dev->dev->quotas.qp;
props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
- props->max_send_sge = dev->dev->caps.max_sq_sg;
- props->max_recv_sge = dev->dev->caps.max_rq_sg;
- props->max_sge_rd = MLX4_MAX_SGE_RD;
+ props->max_send_sge =
+ min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+ props->max_recv_sge =
+ min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg);
+ props->max_sge_rd = MLX4_MAX_SGE_RD;
props->max_cq = dev->dev->quotas.cq;
props->max_cqe = dev->dev->caps.max_cqes;
props->max_mr = dev->dev->quotas.mpt;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index ac116d63e466..f2f11e652dcd 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj;
int err;
@@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
if (err)
- goto obj_free;
+ goto obj_destroy;
return 0;
+obj_destroy:
+ mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
obj_free:
kfree(obj);
return err;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 9fb1d9cb9401..e22314837645 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -544,6 +544,9 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
int shrink = 0;
int c;
+ if (!mr->allocated_from_cache)
+ return;
+
c = order2idx(dev, mr->order);
if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
@@ -1647,18 +1650,19 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
umem = NULL;
}
#endif
-
clean_mr(dev, mr);
+ /*
+ * We should unregister the DMA address from the HCA before
+ * remove the DMA mapping.
+ */
+ mlx5_mr_cache_free(dev, mr);
if (umem) {
ib_umem_release(umem);
atomic_sub(npages, &dev->mdev->priv.reg_pages);
}
-
if (!mr->allocated_from_cache)
kfree(mr);
- else
- mlx5_mr_cache_free(dev, mr);
}
int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index ea01b8dd2be6..3d5424f335cb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1027,12 +1027,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
skb_queue_head_init(&skqueue);
+ netif_tx_lock_bh(p->dev);
spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb);
spin_unlock_irq(&priv->lock);
+ netif_tx_unlock_bh(p->dev);
while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 444d16520506..0b34e909505f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
- int i;
+ int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
- for (i = 0; i < target->req_ring_size; ++i) {
- struct srp_request *req = &ch->req_ring[i];
+ for (j = 0; j < target->req_ring_size; ++j) {
+ struct srp_request *req = &ch->req_ring[j];
srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
}
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 370206f987f9..f48369d6f3a0 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -564,6 +564,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
input_inject_event(&evdev->handle,
event.type, event.code, event.value);
+ cond_resched();
}
out:
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index cd620e009bad..d4b9db487b16 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -231,6 +231,7 @@ static const struct xpad_device {
{ 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
@@ -530,6 +531,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
+ XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index 6f62da2909ec..6caee807cafa 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -75,8 +75,7 @@ MODULE_LICENSE("GPL");
*/
-static unsigned char atakbd_keycode[0x72] = { /* American layout */
- [0] = KEY_GRAVE,
+static unsigned char atakbd_keycode[0x73] = { /* American layout */
[1] = KEY_ESC,
[2] = KEY_1,
[3] = KEY_2,
@@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[38] = KEY_L,
[39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE,
- [41] = KEY_BACKSLASH, /* FIXME, '#' */
+ [41] = KEY_GRAVE,
[42] = KEY_LEFTSHIFT,
- [43] = KEY_GRAVE, /* FIXME: '~' */
+ [43] = KEY_BACKSLASH,
[44] = KEY_Z,
[45] = KEY_X,
[46] = KEY_C,
@@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[66] = KEY_F8,
[67] = KEY_F9,
[68] = KEY_F10,
- [69] = KEY_ESC,
- [70] = KEY_DELETE,
- [71] = KEY_KP7,
- [72] = KEY_KP8,
- [73] = KEY_KP9,
+ [71] = KEY_HOME,
+ [72] = KEY_UP,
[74] = KEY_KPMINUS,
- [75] = KEY_KP4,
- [76] = KEY_KP5,
- [77] = KEY_KP6,
+ [75] = KEY_LEFT,
+ [77] = KEY_RIGHT,
[78] = KEY_KPPLUS,
- [79] = KEY_KP1,
- [80] = KEY_KP2,
- [81] = KEY_KP3,
- [82] = KEY_KP0,
- [83] = KEY_KPDOT,
- [90] = KEY_KPLEFTPAREN,
- [91] = KEY_KPRIGHTPAREN,
- [92] = KEY_KPASTERISK, /* FIXME */
- [93] = KEY_KPASTERISK,
- [94] = KEY_KPPLUS,
- [95] = KEY_HELP,
+ [80] = KEY_DOWN,
+ [82] = KEY_INSERT,
+ [83] = KEY_DELETE,
[96] = KEY_102ND,
- [97] = KEY_KPASTERISK, /* FIXME */
- [98] = KEY_KPSLASH,
+ [97] = KEY_UNDO,
+ [98] = KEY_HELP,
[99] = KEY_KPLEFTPAREN,
[100] = KEY_KPRIGHTPAREN,
[101] = KEY_KPSLASH,
[102] = KEY_KPASTERISK,
- [103] = KEY_UP,
- [104] = KEY_KPASTERISK, /* FIXME */
- [105] = KEY_LEFT,
- [106] = KEY_RIGHT,
- [107] = KEY_KPASTERISK, /* FIXME */
- [108] = KEY_DOWN,
- [109] = KEY_KPASTERISK, /* FIXME */
- [110] = KEY_KPASTERISK, /* FIXME */
- [111] = KEY_KPASTERISK, /* FIXME */
- [112] = KEY_KPASTERISK, /* FIXME */
- [113] = KEY_KPASTERISK /* FIXME */
+ [103] = KEY_KP7,
+ [104] = KEY_KP8,
+ [105] = KEY_KP9,
+ [106] = KEY_KP4,
+ [107] = KEY_KP5,
+ [108] = KEY_KP6,
+ [109] = KEY_KP1,
+ [110] = KEY_KP2,
+ [111] = KEY_KP3,
+ [112] = KEY_KP0,
+ [113] = KEY_KPDOT,
+ [114] = KEY_KPENTER,
};
static struct input_dev *atakbd_dev;
@@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev;
static void atakbd_interrupt(unsigned char scancode, char down)
{
- if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
+ if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
// report raw events here?
scancode = atakbd_keycode[scancode];
- if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
- input_report_key(atakbd_dev, scancode, 1);
- input_report_key(atakbd_dev, scancode, 0);
- input_sync(atakbd_dev);
- } else {
- input_report_key(atakbd_dev, scancode, down);
- input_sync(atakbd_dev);
- }
- } else /* scancodes >= 0xf2 are mouse data, most likely */
+ input_report_key(atakbd_dev, scancode, down);
+ input_sync(atakbd_dev);
+ } else /* scancodes >= 0xf3 are mouse data, most likely */
printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
return;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 96a887f33698..8ec483e8688b 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
min = abs->minimum;
max = abs->maximum;
- if ((min != 0 || max != 0) && max <= min) {
+ if ((min != 0 || max != 0) && max < min) {
printk(KERN_DEBUG
"%s: invalid abs[%02x] min:%d max:%d\n",
UINPUT_NAME, code, min, max);
@@ -598,6 +598,7 @@ static ssize_t uinput_inject_events(struct uinput_device *udev,
input_event(udev->dev, ev.type, ev.code, ev.value);
bytes += input_event_size();
+ cond_resched();
}
return bytes;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index f5ae24865355..b0f9d19b3410 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1346,6 +1346,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
{ "ELAN0618", 0 },
+ { "ELAN061C", 0 },
{ "ELAN061D", 0 },
{ "ELAN0622", 0 },
{ "ELAN1000", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 44f57cf6675b..2d95e8d93cc7 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
static const char * const middle_button_pnp_ids[] = {
"LEN2131", /* ThinkPad P52 w/ NFC */
"LEN2132", /* ThinkPad P52 */
+ "LEN2133", /* ThinkPad P72 w/ NFC */
+ "LEN2134", /* ThinkPad P72 */
NULL
};
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index e08228061bcd..412fa71245af 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -707,6 +707,7 @@ static ssize_t mousedev_write(struct file *file, const char __user *buffer,
mousedev_generate_response(client, c);
spin_unlock_irq(&client->packet_lock);
+ cond_resched();
}
kill_fasync(&client->fasync, SIGIO, POLL_IN);
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index b8bc71569349..95a78ccbd847 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -1395,15 +1395,26 @@ static void __init i8042_register_ports(void)
for (i = 0; i < I8042_NUM_PORTS; i++) {
struct serio *serio = i8042_ports[i].serio;
- if (serio) {
- printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
- serio->name,
- (unsigned long) I8042_DATA_REG,
- (unsigned long) I8042_COMMAND_REG,
- i8042_ports[i].irq);
- serio_register_port(serio);
- device_set_wakeup_capable(&serio->dev, true);
- }
+ if (!serio)
+ continue;
+
+ printk(KERN_INFO "serio: %s at %#lx,%#lx irq %d\n",
+ serio->name,
+ (unsigned long) I8042_DATA_REG,
+ (unsigned long) I8042_COMMAND_REG,
+ i8042_ports[i].irq);
+ serio_register_port(serio);
+ device_set_wakeup_capable(&serio->dev, true);
+
+ /*
+ * On platforms using suspend-to-idle, allow the keyboard to
+ * wake up the system from sleep by enabling keyboard wakeups
+ * by default. This is consistent with keyboard wakeup
+ * behavior on many platforms using suspend-to-RAM (ACPI S3)
+ * by default.
+ */
+ if (pm_suspend_via_s2idle() && i == I8042_KBD_PORT_NO)
+ device_set_wakeup_enable(&serio->dev, true);
}
}
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 80e69bb8283e..83ac8c128192 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev)
struct i2c_client *client = to_i2c_client(dev);
int ret;
+ if (device_may_wakeup(dev))
+ return enable_irq_wake(client->irq);
+
ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
return ret > 0 ? 0 : ret;
}
@@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
+ if (device_may_wakeup(dev))
+ return disable_irq_wake(client->irq);
+
return egalax_wake_up_device(client);
}
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4e04fff23977..bee0dfb7b93b 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev)
/* The callers make sure that get_device_id() does not fail here */
devid = get_device_id(dev);
+
+ /* For ACPI HID devices, we simply return the devid as such */
+ if (!dev_is_pci(dev))
+ return devid;
+
ivrs_alias = amd_iommu_alias_table[devid];
+
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
if (ivrs_alias == pci_alias)
@@ -3063,7 +3069,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
return 0;
offset_mask = pte_pgsize - 1;
- __pte = *pte & PM_ADDR_MASK;
+ __pte = __sme_clr(*pte & PM_ADDR_MASK);
return (__pte & ~offset_mask) | (iova & offset_mask);
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 5f3f10cf9d9d..bedc801b06a0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev && dev_is_pci(dev) && info->pasid_supported) {
ret = intel_pasid_alloc_table(dev);
if (ret) {
- __dmar_remove_one_dev_info(info);
- spin_unlock_irqrestore(&device_domain_lock, flags);
- return NULL;
+ pr_warn("No pasid table for %s, pasid disabled\n",
+ dev_name(dev));
+ info->pasid_supported = 0;
}
}
spin_unlock_irqrestore(&device_domain_lock, flags);
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index 1c05ed6fc5a5..1fb5e12b029a 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -11,7 +11,7 @@
#define __INTEL_PASID_H
#define PASID_MIN 0x1
-#define PASID_MAX 0x100000
+#define PASID_MAX 0x20000
struct pasid_entry {
u64 val;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 258115b10fa9..ad3e2b97469e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1241,6 +1241,12 @@ err_unprepare_clocks:
static void rk_iommu_shutdown(struct platform_device *pdev)
{
+ struct rk_iommu *iommu = platform_get_drvdata(pdev);
+ int i = 0, irq;
+
+ while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
+ devm_free_irq(iommu->dev, irq, iommu);
+
pm_runtime_force_suspend(&pdev->dev);
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 316a57530f6d..c2df341ff6fa 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1439,6 +1439,7 @@ static struct irq_chip its_irq_chip = {
* The consequence of the above is that allocation is cost is low, but
* freeing is expensive. We assumes that freeing rarely occurs.
*/
+#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
static DEFINE_MUTEX(lpi_range_lock);
static LIST_HEAD(lpi_range_list);
@@ -1625,7 +1626,8 @@ static int __init its_alloc_lpi_tables(void)
{
phys_addr_t paddr;
- lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
+ lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+ ITS_MAX_LPI_NRBITS);
gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
if (!gic_rdists->prop_page) {
pr_err("Failed to allocate PROPBASE\n");
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 439bf90d084d..a872cd720967 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -4,8 +4,7 @@
menuconfig NVM
bool "Open-Channel SSD target support"
- depends on BLOCK && PCI
- select BLK_DEV_NVME
+ depends on BLOCK
help
Say Y here to get to enable Open-channel SSDs.
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 60aa7bc5a630..efb976a863d2 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -355,6 +355,11 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
return -EINVAL;
}
+ if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
+ pr_err("nvm: device is incompatible with target L2P type.\n");
+ return -EINVAL;
+ }
+
if (nvm_target_exists(create->tgtname)) {
pr_err("nvm: target name already exists (%s)\n",
create->tgtname);
@@ -598,22 +603,16 @@ static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
- if (rqd->nr_ppas == 1) {
- nvm_ppa_tgt_to_dev(tgt_dev, &rqd->ppa_addr, 1);
- return;
- }
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
- nvm_ppa_tgt_to_dev(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
+ nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
}
static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
- if (rqd->nr_ppas == 1) {
- nvm_ppa_dev_to_tgt(tgt_dev, &rqd->ppa_addr, 1);
- return;
- }
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
- nvm_ppa_dev_to_tgt(tgt_dev, rqd->ppa_list, rqd->nr_ppas);
+ nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
}
int nvm_register_tgt_type(struct nvm_tgt_type *tt)
@@ -712,45 +711,23 @@ static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
}
-int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct nvm_chk_meta *meta,
- struct ppa_addr ppa, int nchks)
+static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
{
- struct nvm_dev *dev = tgt_dev->parent;
-
- nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
-
- return dev->ops->get_chk_meta(tgt_dev->parent, meta,
- (sector_t)ppa.ppa, nchks);
-}
-EXPORT_SYMBOL(nvm_get_chunk_meta);
-
-int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
- int nr_ppas, int type)
-{
- struct nvm_dev *dev = tgt_dev->parent;
- struct nvm_rq rqd;
- int ret;
+ int flags = 0;
- if (nr_ppas > NVM_MAX_VLBA) {
- pr_err("nvm: unable to update all blocks atomically\n");
- return -EINVAL;
- }
+ if (geo->version == NVM_OCSSD_SPEC_20)
+ return 0;
- memset(&rqd, 0, sizeof(struct nvm_rq));
+ if (rqd->is_seq)
+ flags |= geo->pln_mode >> 1;
- nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
- nvm_rq_tgt_to_dev(tgt_dev, &rqd);
+ if (rqd->opcode == NVM_OP_PREAD)
+ flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
+ else if (rqd->opcode == NVM_OP_PWRITE)
+ flags |= NVM_IO_SCRAMBLE_ENABLE;
- ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
- nvm_free_rqd_ppalist(tgt_dev, &rqd);
- if (ret) {
- pr_err("nvm: failed bb mark\n");
- return -EINVAL;
- }
-
- return 0;
+ return flags;
}
-EXPORT_SYMBOL(nvm_set_tgt_bb_tbl);
int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
{
@@ -763,6 +740,7 @@ int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_rq_tgt_to_dev(tgt_dev, rqd);
rqd->dev = tgt_dev;
+ rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
/* In case of error, fail with right address format */
ret = dev->ops->submit_io(dev, rqd);
@@ -783,6 +761,7 @@ int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
nvm_rq_tgt_to_dev(tgt_dev, rqd);
rqd->dev = tgt_dev;
+ rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
/* In case of error, fail with right address format */
ret = dev->ops->submit_io_sync(dev, rqd);
@@ -805,27 +784,159 @@ void nvm_end_io(struct nvm_rq *rqd)
}
EXPORT_SYMBOL(nvm_end_io);
+static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
+{
+ if (!dev->ops->submit_io_sync)
+ return -ENODEV;
+
+ rqd->flags = nvm_set_flags(&dev->geo, rqd);
+
+ return dev->ops->submit_io_sync(dev, rqd);
+}
+
+static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
+{
+ struct nvm_rq rqd = { NULL };
+ struct bio bio;
+ struct bio_vec bio_vec;
+ struct page *page;
+ int ret;
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ bio_init(&bio, &bio_vec, 1);
+ bio_add_page(&bio, page, PAGE_SIZE, 0);
+ bio_set_op_attrs(&bio, REQ_OP_READ, 0);
+
+ rqd.bio = &bio;
+ rqd.opcode = NVM_OP_PREAD;
+ rqd.is_seq = 1;
+ rqd.nr_ppas = 1;
+ rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
+
+ ret = nvm_submit_io_sync_raw(dev, &rqd);
+ if (ret)
+ return ret;
+
+ __free_page(page);
+
+ return rqd.error;
+}
+
/*
- * folds a bad block list from its plane representation to its virtual
- * block representation. The fold is done in place and reduced size is
- * returned.
- *
- * If any of the planes status are bad or grown bad block, the virtual block
- * is marked bad. If not bad, the first plane state acts as the block state.
+ * Scans a 1.2 chunk first and last page to determine if its state.
+ * If the chunk is found to be open, also scan it to update the write
+ * pointer.
*/
-int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
+static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
+ struct nvm_chk_meta *meta)
{
struct nvm_geo *geo = &dev->geo;
- int blk, offset, pl, blktype;
+ int ret, pg, pl;
- if (nr_blks != geo->num_chk * geo->pln_mode)
- return -EINVAL;
+ /* sense first page */
+ ret = nvm_bb_chunk_sense(dev, ppa);
+ if (ret < 0) /* io error */
+ return ret;
+ else if (ret == 0) /* valid data */
+ meta->state = NVM_CHK_ST_OPEN;
+ else if (ret > 0) {
+ /*
+ * If empty page, the chunk is free, else it is an
+ * actual io error. In that case, mark it offline.
+ */
+ switch (ret) {
+ case NVM_RSP_ERR_EMPTYPAGE:
+ meta->state = NVM_CHK_ST_FREE;
+ return 0;
+ case NVM_RSP_ERR_FAILCRC:
+ case NVM_RSP_ERR_FAILECC:
+ case NVM_RSP_WARN_HIGHECC:
+ meta->state = NVM_CHK_ST_OPEN;
+ goto scan;
+ default:
+ return -ret; /* other io error */
+ }
+ }
+
+ /* sense last page */
+ ppa.g.pg = geo->num_pg - 1;
+ ppa.g.pl = geo->num_pln - 1;
+
+ ret = nvm_bb_chunk_sense(dev, ppa);
+ if (ret < 0) /* io error */
+ return ret;
+ else if (ret == 0) { /* Chunk fully written */
+ meta->state = NVM_CHK_ST_CLOSED;
+ meta->wp = geo->clba;
+ return 0;
+ } else if (ret > 0) {
+ switch (ret) {
+ case NVM_RSP_ERR_EMPTYPAGE:
+ case NVM_RSP_ERR_FAILCRC:
+ case NVM_RSP_ERR_FAILECC:
+ case NVM_RSP_WARN_HIGHECC:
+ meta->state = NVM_CHK_ST_OPEN;
+ break;
+ default:
+ return -ret; /* other io error */
+ }
+ }
+
+scan:
+ /*
+ * chunk is open, we scan sequentially to update the write pointer.
+ * We make the assumption that targets write data across all planes
+ * before moving to the next page.
+ */
+ for (pg = 0; pg < geo->num_pg; pg++) {
+ for (pl = 0; pl < geo->num_pln; pl++) {
+ ppa.g.pg = pg;
+ ppa.g.pl = pl;
+
+ ret = nvm_bb_chunk_sense(dev, ppa);
+ if (ret < 0) /* io error */
+ return ret;
+ else if (ret == 0) {
+ meta->wp += geo->ws_min;
+ } else if (ret > 0) {
+ switch (ret) {
+ case NVM_RSP_ERR_EMPTYPAGE:
+ return 0;
+ case NVM_RSP_ERR_FAILCRC:
+ case NVM_RSP_ERR_FAILECC:
+ case NVM_RSP_WARN_HIGHECC:
+ meta->wp += geo->ws_min;
+ break;
+ default:
+ return -ret; /* other io error */
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * folds a bad block list from its plane representation to its
+ * chunk representation.
+ *
+ * If any of the planes status are bad or grown bad, the chunk is marked
+ * offline. If not bad, the first plane state acts as the chunk state.
+ */
+static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
+ u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
+{
+ struct nvm_geo *geo = &dev->geo;
+ int ret, blk, pl, offset, blktype;
for (blk = 0; blk < geo->num_chk; blk++) {
offset = blk * geo->pln_mode;
blktype = blks[offset];
- /* Bad blocks on any planes take precedence over other types */
for (pl = 0; pl < geo->pln_mode; pl++) {
if (blks[offset + pl] &
(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
@@ -834,23 +945,124 @@ int nvm_bb_tbl_fold(struct nvm_dev *dev, u8 *blks, int nr_blks)
}
}
- blks[blk] = blktype;
+ ppa.g.blk = blk;
+
+ meta->wp = 0;
+ meta->type = NVM_CHK_TP_W_SEQ;
+ meta->wi = 0;
+ meta->slba = generic_to_dev_addr(dev, ppa).ppa;
+ meta->cnlb = dev->geo.clba;
+
+ if (blktype == NVM_BLK_T_FREE) {
+ ret = nvm_bb_chunk_scan(dev, ppa, meta);
+ if (ret)
+ return ret;
+ } else {
+ meta->state = NVM_CHK_ST_OFFLINE;
+ }
+
+ meta++;
}
- return geo->num_chk;
+ return 0;
+}
+
+static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
+ int nchks, struct nvm_chk_meta *meta)
+{
+ struct nvm_geo *geo = &dev->geo;
+ struct ppa_addr ppa;
+ u8 *blks;
+ int ch, lun, nr_blks;
+ int ret;
+
+ ppa.ppa = slba;
+ ppa = dev_to_generic_addr(dev, ppa);
+
+ if (ppa.g.blk != 0)
+ return -EINVAL;
+
+ if ((nchks % geo->num_chk) != 0)
+ return -EINVAL;
+
+ nr_blks = geo->num_chk * geo->pln_mode;
+
+ blks = kmalloc(nr_blks, GFP_KERNEL);
+ if (!blks)
+ return -ENOMEM;
+
+ for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
+ for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
+ struct ppa_addr ppa_gen, ppa_dev;
+
+ if (!nchks)
+ goto done;
+
+ ppa_gen.ppa = 0;
+ ppa_gen.g.ch = ch;
+ ppa_gen.g.lun = lun;
+ ppa_dev = generic_to_dev_addr(dev, ppa_gen);
+
+ ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
+ if (ret)
+ goto done;
+
+ ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
+ meta);
+ if (ret)
+ goto done;
+
+ meta += geo->num_chk;
+ nchks -= geo->num_chk;
+ }
+ }
+done:
+ kfree(blks);
+ return ret;
}
-EXPORT_SYMBOL(nvm_bb_tbl_fold);
-int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
- u8 *blks)
+int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
+ int nchks, struct nvm_chk_meta *meta)
{
struct nvm_dev *dev = tgt_dev->parent;
nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
- return dev->ops->get_bb_tbl(dev, ppa, blks);
+ if (dev->geo.version == NVM_OCSSD_SPEC_12)
+ return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
+
+ return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
+}
+EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
+
+int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
+ int nr_ppas, int type)
+{
+ struct nvm_dev *dev = tgt_dev->parent;
+ struct nvm_rq rqd;
+ int ret;
+
+ if (dev->geo.version == NVM_OCSSD_SPEC_20)
+ return 0;
+
+ if (nr_ppas > NVM_MAX_VLBA) {
+ pr_err("nvm: unable to update all blocks atomically\n");
+ return -EINVAL;
+ }
+
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
+ nvm_rq_tgt_to_dev(tgt_dev, &rqd);
+
+ ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
+ nvm_free_rqd_ppalist(tgt_dev, &rqd);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
}
-EXPORT_SYMBOL(nvm_get_tgt_bb_tbl);
+EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
static int nvm_core_init(struct nvm_dev *dev)
{
diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c
index f565a56b898a..c9fa26f95659 100644
--- a/drivers/lightnvm/pblk-cache.c
+++ b/drivers/lightnvm/pblk-cache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 00984b486fea..6944aac43b01 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -16,7 +17,10 @@
*
*/
+#define CREATE_TRACE_POINTS
+
#include "pblk.h"
+#include "pblk-trace.h"
static void pblk_line_mark_bb(struct work_struct *work)
{
@@ -27,12 +31,12 @@ static void pblk_line_mark_bb(struct work_struct *work)
struct ppa_addr *ppa = line_ws->priv;
int ret;
- ret = nvm_set_tgt_bb_tbl(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
+ ret = nvm_set_chunk_meta(dev, ppa, 1, NVM_BLK_T_GRWN_BAD);
if (ret) {
struct pblk_line *line;
int pos;
- line = &pblk->lines[pblk_ppa_to_line(*ppa)];
+ line = pblk_ppa_to_line(pblk, *ppa);
pos = pblk_ppa_to_pos(&dev->geo, *ppa);
pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n",
@@ -80,19 +84,28 @@ static void __pblk_end_io_erase(struct pblk *pblk, struct nvm_rq *rqd)
struct pblk_line *line;
int pos;
- line = &pblk->lines[pblk_ppa_to_line(rqd->ppa_addr)];
+ line = pblk_ppa_to_line(pblk, rqd->ppa_addr);
pos = pblk_ppa_to_pos(geo, rqd->ppa_addr);
chunk = &line->chks[pos];
atomic_dec(&line->left_seblks);
if (rqd->error) {
+ trace_pblk_chunk_reset(pblk_disk_name(pblk),
+ &rqd->ppa_addr, PBLK_CHUNK_RESET_FAILED);
+
chunk->state = NVM_CHK_ST_OFFLINE;
pblk_mark_bb(pblk, line, rqd->ppa_addr);
} else {
+ trace_pblk_chunk_reset(pblk_disk_name(pblk),
+ &rqd->ppa_addr, PBLK_CHUNK_RESET_DONE);
+
chunk->state = NVM_CHK_ST_FREE;
}
+ trace_pblk_chunk_state(pblk_disk_name(pblk), &rqd->ppa_addr,
+ chunk->state);
+
atomic_dec(&pblk->inflight_io);
}
@@ -108,9 +121,9 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
/*
* Get information for all chunks from the device.
*
- * The caller is responsible for freeing the returned structure
+ * The caller is responsible for freeing (vmalloc) the returned structure
*/
-struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
+struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
@@ -122,11 +135,11 @@ struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk)
ppa.ppa = 0;
len = geo->all_chunks * sizeof(*meta);
- meta = kzalloc(len, GFP_KERNEL);
+ meta = vzalloc(len);
if (!meta)
return ERR_PTR(-ENOMEM);
- ret = nvm_get_chunk_meta(dev, meta, ppa, geo->all_chunks);
+ ret = nvm_get_chunk_meta(dev, ppa, geo->all_chunks, meta);
if (ret) {
kfree(meta);
return ERR_PTR(-EIO);
@@ -192,7 +205,6 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
{
struct pblk_line *line;
u64 paddr;
- int line_id;
#ifdef CONFIG_NVM_PBLK_DEBUG
/* Callers must ensure that the ppa points to a device address */
@@ -200,8 +212,7 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa)
BUG_ON(pblk_ppa_empty(ppa));
#endif
- line_id = pblk_ppa_to_line(ppa);
- line = &pblk->lines[line_id];
+ line = pblk_ppa_to_line(pblk, ppa);
paddr = pblk_dev_ppa_to_line_addr(pblk, ppa);
__pblk_map_invalidate(pblk, line, paddr);
@@ -227,6 +238,33 @@ static void pblk_invalidate_range(struct pblk *pblk, sector_t slba,
spin_unlock(&pblk->trans_lock);
}
+int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+
+ rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+ &rqd->dma_meta_list);
+ if (!rqd->meta_list)
+ return -ENOMEM;
+
+ if (rqd->nr_ppas == 1)
+ return 0;
+
+ rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
+ rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
+
+ return 0;
+}
+
+void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+
+ if (rqd->meta_list)
+ nvm_dev_dma_free(dev->parent, rqd->meta_list,
+ rqd->dma_meta_list);
+}
+
/* Caller must guarantee that the request is a valid type */
struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
{
@@ -258,7 +296,6 @@ struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
/* Typically used on completion path. Cannot guarantee request consistency */
void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
{
- struct nvm_tgt_dev *dev = pblk->dev;
mempool_t *pool;
switch (type) {
@@ -279,9 +316,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
return;
}
- if (rqd->meta_list)
- nvm_dev_dma_free(dev->parent, rqd->meta_list,
- rqd->dma_meta_list);
+ pblk_free_rqd_meta(pblk, rqd);
mempool_free(rqd, pool);
}
@@ -409,6 +444,9 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line)
}
} else {
line->state = PBLK_LINESTATE_CORRUPT;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
+
line->gc_group = PBLK_LINEGC_NONE;
move_list = &l_mg->corrupt_list;
pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n",
@@ -479,9 +517,30 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd)
return nvm_submit_io(dev, rqd);
}
+void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
+
+ int i;
+
+ for (i = 0; i < rqd->nr_ppas; i++) {
+ struct ppa_addr *ppa = &ppa_list[i];
+ struct nvm_chk_meta *chunk = pblk_dev_ppa_to_chunk(pblk, *ppa);
+ u64 caddr = pblk_dev_ppa_to_chunk_addr(pblk, *ppa);
+
+ if (caddr == 0)
+ trace_pblk_chunk_state(pblk_disk_name(pblk),
+ ppa, NVM_CHK_ST_OPEN);
+ else if (caddr == chunk->cnlb)
+ trace_pblk_chunk_state(pblk_disk_name(pblk),
+ ppa, NVM_CHK_ST_CLOSED);
+ }
+}
+
int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
{
struct nvm_tgt_dev *dev = pblk->dev;
+ int ret;
atomic_inc(&pblk->inflight_io);
@@ -490,7 +549,27 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd)
return NVM_IO_ERR;
#endif
- return nvm_submit_io_sync(dev, rqd);
+ ret = nvm_submit_io_sync(dev, rqd);
+
+ if (trace_pblk_chunk_state_enabled() && !ret &&
+ rqd->opcode == NVM_OP_PWRITE)
+ pblk_check_chunk_state_update(pblk, rqd);
+
+ return ret;
+}
+
+int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct ppa_addr *ppa_list;
+ int ret;
+
+ ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+
+ pblk_down_chunk(pblk, ppa_list[0]);
+ ret = pblk_submit_io_sync(pblk, rqd);
+ pblk_up_chunk(pblk, ppa_list[0]);
+
+ return ret;
}
static void pblk_bio_map_addr_endio(struct bio *bio)
@@ -621,262 +700,227 @@ u64 pblk_lookup_page(struct pblk *pblk, struct pblk_line *line)
return paddr;
}
-/*
- * Submit emeta to one LUN in the raid line at the time to avoid a deadlock when
- * taking the per LUN semaphore.
- */
-static int pblk_line_submit_emeta_io(struct pblk *pblk, struct pblk_line *line,
- void *emeta_buf, u64 paddr, int dir)
+u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
- void *ppa_list, *meta_list;
- struct bio *bio;
- struct nvm_rq rqd;
- dma_addr_t dma_ppa_list, dma_meta_list;
- int min = pblk->min_write_pgs;
- int left_ppas = lm->emeta_sec[0];
- int id = line->id;
- int rq_ppas, rq_len;
- int cmd_op, bio_op;
- int i, j;
- int ret;
+ int bit;
- if (dir == PBLK_WRITE) {
- bio_op = REQ_OP_WRITE;
- cmd_op = NVM_OP_PWRITE;
- } else if (dir == PBLK_READ) {
- bio_op = REQ_OP_READ;
- cmd_op = NVM_OP_PREAD;
- } else
- return -EINVAL;
+ /* This usually only happens on bad lines */
+ bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
+ if (bit >= lm->blk_per_line)
+ return -1;
- meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &dma_meta_list);
- if (!meta_list)
- return -ENOMEM;
+ return bit * geo->ws_opt;
+}
- ppa_list = meta_list + pblk_dma_meta_size;
- dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
+int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct pblk_line_meta *lm = &pblk->lm;
+ struct bio *bio;
+ struct nvm_rq rqd;
+ u64 paddr = pblk_line_smeta_start(pblk, line);
+ int i, ret;
-next_rq:
memset(&rqd, 0, sizeof(struct nvm_rq));
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
- rq_len = rq_ppas * geo->csecs;
+ ret = pblk_alloc_rqd_meta(pblk, &rqd);
+ if (ret)
+ return ret;
- bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
- l_mg->emeta_alloc_type, GFP_KERNEL);
+ bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto free_rqd_dma;
+ goto clear_rqd;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, bio_op, 0);
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
rqd.bio = bio;
- rqd.meta_list = meta_list;
- rqd.ppa_list = ppa_list;
- rqd.dma_meta_list = dma_meta_list;
- rqd.dma_ppa_list = dma_ppa_list;
- rqd.opcode = cmd_op;
- rqd.nr_ppas = rq_ppas;
-
- if (dir == PBLK_WRITE) {
- struct pblk_sec_meta *meta_list = rqd.meta_list;
-
- rqd.flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
- for (i = 0; i < rqd.nr_ppas; ) {
- spin_lock(&line->lock);
- paddr = __pblk_alloc_page(pblk, line, min);
- spin_unlock(&line->lock);
- for (j = 0; j < min; j++, i++, paddr++) {
- meta_list[i].lba = cpu_to_le64(ADDR_EMPTY);
- rqd.ppa_list[i] =
- addr_to_gen_ppa(pblk, paddr, id);
- }
- }
- } else {
- for (i = 0; i < rqd.nr_ppas; ) {
- struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, id);
- int pos = pblk_ppa_to_pos(geo, ppa);
- int read_type = PBLK_READ_RANDOM;
-
- if (pblk_io_aligned(pblk, rq_ppas))
- read_type = PBLK_READ_SEQUENTIAL;
- rqd.flags = pblk_set_read_mode(pblk, read_type);
-
- while (test_bit(pos, line->blk_bitmap)) {
- paddr += min;
- if (pblk_boundary_paddr_checks(pblk, paddr)) {
- pblk_err(pblk, "corrupt emeta line:%d\n",
- line->id);
- bio_put(bio);
- ret = -EINTR;
- goto free_rqd_dma;
- }
-
- ppa = addr_to_gen_ppa(pblk, paddr, id);
- pos = pblk_ppa_to_pos(geo, ppa);
- }
-
- if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
- pblk_err(pblk, "corrupt emeta line:%d\n",
- line->id);
- bio_put(bio);
- ret = -EINTR;
- goto free_rqd_dma;
- }
+ rqd.opcode = NVM_OP_PREAD;
+ rqd.nr_ppas = lm->smeta_sec;
+ rqd.is_seq = 1;
- for (j = 0; j < min; j++, i++, paddr++)
- rqd.ppa_list[i] =
- addr_to_gen_ppa(pblk, paddr, line->id);
- }
- }
+ for (i = 0; i < lm->smeta_sec; i++, paddr++)
+ rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
ret = pblk_submit_io_sync(pblk, &rqd);
if (ret) {
- pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
+ pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
bio_put(bio);
- goto free_rqd_dma;
+ goto clear_rqd;
}
atomic_dec(&pblk->inflight_io);
- if (rqd.error) {
- if (dir == PBLK_WRITE)
- pblk_log_write_err(pblk, &rqd);
- else
- pblk_log_read_err(pblk, &rqd);
- }
+ if (rqd.error)
+ pblk_log_read_err(pblk, &rqd);
- emeta_buf += rq_len;
- left_ppas -= rq_ppas;
- if (left_ppas)
- goto next_rq;
-free_rqd_dma:
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+clear_rqd:
+ pblk_free_rqd_meta(pblk, &rqd);
return ret;
}
-u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- struct pblk_line_meta *lm = &pblk->lm;
- int bit;
-
- /* This usually only happens on bad lines */
- bit = find_first_zero_bit(line->blk_bitmap, lm->blk_per_line);
- if (bit >= lm->blk_per_line)
- return -1;
-
- return bit * geo->ws_opt;
-}
-
-static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line,
- u64 paddr, int dir)
+static int pblk_line_smeta_write(struct pblk *pblk, struct pblk_line *line,
+ u64 paddr)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_meta *lm = &pblk->lm;
struct bio *bio;
struct nvm_rq rqd;
- __le64 *lba_list = NULL;
+ __le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
+ __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
int i, ret;
- int cmd_op, bio_op;
- int flags;
-
- if (dir == PBLK_WRITE) {
- bio_op = REQ_OP_WRITE;
- cmd_op = NVM_OP_PWRITE;
- flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
- lba_list = emeta_to_lbas(pblk, line->emeta->buf);
- } else if (dir == PBLK_READ_RECOV || dir == PBLK_READ) {
- bio_op = REQ_OP_READ;
- cmd_op = NVM_OP_PREAD;
- flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
- } else
- return -EINVAL;
memset(&rqd, 0, sizeof(struct nvm_rq));
- rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd.dma_meta_list);
- if (!rqd.meta_list)
- return -ENOMEM;
-
- rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
- rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
+ ret = pblk_alloc_rqd_meta(pblk, &rqd);
+ if (ret)
+ return ret;
bio = bio_map_kern(dev->q, line->smeta, lm->smeta_len, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto free_ppa_list;
+ goto clear_rqd;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, bio_op, 0);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
rqd.bio = bio;
- rqd.opcode = cmd_op;
- rqd.flags = flags;
+ rqd.opcode = NVM_OP_PWRITE;
rqd.nr_ppas = lm->smeta_sec;
+ rqd.is_seq = 1;
for (i = 0; i < lm->smeta_sec; i++, paddr++) {
struct pblk_sec_meta *meta_list = rqd.meta_list;
rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
-
- if (dir == PBLK_WRITE) {
- __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
-
- meta_list[i].lba = lba_list[paddr] = addr_empty;
- }
+ meta_list[i].lba = lba_list[paddr] = addr_empty;
}
- /*
- * This I/O is sent by the write thread when a line is replace. Since
- * the write thread is the only one sending write and erase commands,
- * there is no need to take the LUN semaphore.
- */
- ret = pblk_submit_io_sync(pblk, &rqd);
+ ret = pblk_submit_io_sync_sem(pblk, &rqd);
if (ret) {
pblk_err(pblk, "smeta I/O submission failed: %d\n", ret);
bio_put(bio);
- goto free_ppa_list;
+ goto clear_rqd;
}
atomic_dec(&pblk->inflight_io);
if (rqd.error) {
- if (dir == PBLK_WRITE) {
- pblk_log_write_err(pblk, &rqd);
- ret = 1;
- } else if (dir == PBLK_READ)
- pblk_log_read_err(pblk, &rqd);
+ pblk_log_write_err(pblk, &rqd);
+ ret = -EIO;
}
-free_ppa_list:
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
-
+clear_rqd:
+ pblk_free_rqd_meta(pblk, &rqd);
return ret;
}
-int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line)
+int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
+ void *emeta_buf)
{
- u64 bpaddr = pblk_line_smeta_start(pblk, line);
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+ struct pblk_line_meta *lm = &pblk->lm;
+ void *ppa_list, *meta_list;
+ struct bio *bio;
+ struct nvm_rq rqd;
+ u64 paddr = line->emeta_ssec;
+ dma_addr_t dma_ppa_list, dma_meta_list;
+ int min = pblk->min_write_pgs;
+ int left_ppas = lm->emeta_sec[0];
+ int line_id = line->id;
+ int rq_ppas, rq_len;
+ int i, j;
+ int ret;
- return pblk_line_submit_smeta_io(pblk, line, bpaddr, PBLK_READ_RECOV);
-}
+ meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
+ &dma_meta_list);
+ if (!meta_list)
+ return -ENOMEM;
-int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
- void *emeta_buf)
-{
- return pblk_line_submit_emeta_io(pblk, line, emeta_buf,
- line->emeta_ssec, PBLK_READ);
+ ppa_list = meta_list + pblk_dma_meta_size;
+ dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
+
+next_rq:
+ memset(&rqd, 0, sizeof(struct nvm_rq));
+
+ rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
+ rq_len = rq_ppas * geo->csecs;
+
+ bio = pblk_bio_map_addr(pblk, emeta_buf, rq_ppas, rq_len,
+ l_mg->emeta_alloc_type, GFP_KERNEL);
+ if (IS_ERR(bio)) {
+ ret = PTR_ERR(bio);
+ goto free_rqd_dma;
+ }
+
+ bio->bi_iter.bi_sector = 0; /* internal bio */
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
+
+ rqd.bio = bio;
+ rqd.meta_list = meta_list;
+ rqd.ppa_list = ppa_list;
+ rqd.dma_meta_list = dma_meta_list;
+ rqd.dma_ppa_list = dma_ppa_list;
+ rqd.opcode = NVM_OP_PREAD;
+ rqd.nr_ppas = rq_ppas;
+
+ for (i = 0; i < rqd.nr_ppas; ) {
+ struct ppa_addr ppa = addr_to_gen_ppa(pblk, paddr, line_id);
+ int pos = pblk_ppa_to_pos(geo, ppa);
+
+ if (pblk_io_aligned(pblk, rq_ppas))
+ rqd.is_seq = 1;
+
+ while (test_bit(pos, line->blk_bitmap)) {
+ paddr += min;
+ if (pblk_boundary_paddr_checks(pblk, paddr)) {
+ bio_put(bio);
+ ret = -EINTR;
+ goto free_rqd_dma;
+ }
+
+ ppa = addr_to_gen_ppa(pblk, paddr, line_id);
+ pos = pblk_ppa_to_pos(geo, ppa);
+ }
+
+ if (pblk_boundary_paddr_checks(pblk, paddr + min)) {
+ bio_put(bio);
+ ret = -EINTR;
+ goto free_rqd_dma;
+ }
+
+ for (j = 0; j < min; j++, i++, paddr++)
+ rqd.ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line_id);
+ }
+
+ ret = pblk_submit_io_sync(pblk, &rqd);
+ if (ret) {
+ pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
+ bio_put(bio);
+ goto free_rqd_dma;
+ }
+
+ atomic_dec(&pblk->inflight_io);
+
+ if (rqd.error)
+ pblk_log_read_err(pblk, &rqd);
+
+ emeta_buf += rq_len;
+ left_ppas -= rq_ppas;
+ if (left_ppas)
+ goto next_rq;
+
+free_rqd_dma:
+ nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+ return ret;
}
static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -885,16 +929,17 @@ static void pblk_setup_e_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd->opcode = NVM_OP_ERASE;
rqd->ppa_addr = ppa;
rqd->nr_ppas = 1;
- rqd->flags = pblk_set_progr_mode(pblk, PBLK_ERASE);
+ rqd->is_seq = 1;
rqd->bio = NULL;
}
static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
{
- struct nvm_rq rqd;
- int ret = 0;
+ struct nvm_rq rqd = {NULL};
+ int ret;
- memset(&rqd, 0, sizeof(struct nvm_rq));
+ trace_pblk_chunk_reset(pblk_disk_name(pblk), &ppa,
+ PBLK_CHUNK_RESET_START);
pblk_setup_e_rq(pblk, &rqd, ppa);
@@ -902,19 +947,6 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa)
* with writes. Thus, there is no need to take the LUN semaphore.
*/
ret = pblk_submit_io_sync(pblk, &rqd);
- if (ret) {
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
-
- pblk_err(pblk, "could not sync erase line:%d,blk:%d\n",
- pblk_ppa_to_line(ppa),
- pblk_ppa_to_pos(geo, ppa));
-
- rqd.error = ret;
- goto out;
- }
-
-out:
rqd.private = pblk;
__pblk_end_io_erase(pblk, &rqd);
@@ -1008,6 +1040,8 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
spin_lock(&l_mg->free_lock);
spin_lock(&line->lock);
line->state = PBLK_LINESTATE_BAD;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
spin_unlock(&line->lock);
list_add_tail(&line->list, &l_mg->bad_list);
@@ -1071,15 +1105,18 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line,
static int pblk_line_alloc_bitmaps(struct pblk *pblk, struct pblk_line *line)
{
struct pblk_line_meta *lm = &pblk->lm;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
- line->map_bitmap = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
+ line->map_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
if (!line->map_bitmap)
return -ENOMEM;
+ memset(line->map_bitmap, 0, lm->sec_bitmap_len);
+
/* will be initialized using bb info from map_bitmap */
- line->invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
+ line->invalid_bitmap = mempool_alloc(l_mg->bitmap_pool, GFP_KERNEL);
if (!line->invalid_bitmap) {
- kfree(line->map_bitmap);
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
line->map_bitmap = NULL;
return -ENOMEM;
}
@@ -1122,7 +1159,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
line->smeta_ssec = off;
line->cur_sec = off + lm->smeta_sec;
- if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) {
+ if (init && pblk_line_smeta_write(pblk, line, off)) {
pblk_debug(pblk, "line smeta I/O failed. Retry\n");
return 0;
}
@@ -1152,6 +1189,8 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line,
bitmap_weight(line->invalid_bitmap, lm->sec_per_line)) {
spin_lock(&line->lock);
line->state = PBLK_LINESTATE_BAD;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
spin_unlock(&line->lock);
list_add_tail(&line->list, &l_mg->bad_list);
@@ -1204,6 +1243,8 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
if (line->state == PBLK_LINESTATE_NEW) {
blk_to_erase = pblk_prepare_new_line(pblk, line);
line->state = PBLK_LINESTATE_FREE;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
} else {
blk_to_erase = blk_in_line;
}
@@ -1221,6 +1262,8 @@ static int pblk_line_prepare(struct pblk *pblk, struct pblk_line *line)
}
line->state = PBLK_LINESTATE_OPEN;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
atomic_set(&line->left_eblks, blk_to_erase);
atomic_set(&line->left_seblks, blk_to_erase);
@@ -1265,7 +1308,9 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
{
- kfree(line->map_bitmap);
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
@@ -1283,8 +1328,11 @@ static void pblk_line_reinit(struct pblk_line *line)
void pblk_line_free(struct pblk_line *line)
{
- kfree(line->map_bitmap);
- kfree(line->invalid_bitmap);
+ struct pblk *pblk = line->pblk;
+ struct pblk_line_mgmt *l_mg = &pblk->l_mg;
+
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
+ mempool_free(line->invalid_bitmap, l_mg->bitmap_pool);
pblk_line_reinit(line);
}
@@ -1312,6 +1360,8 @@ retry:
if (unlikely(bit >= lm->blk_per_line)) {
spin_lock(&line->lock);
line->state = PBLK_LINESTATE_BAD;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
spin_unlock(&line->lock);
list_add_tail(&line->list, &l_mg->bad_list);
@@ -1446,12 +1496,32 @@ retry_setup:
return line;
}
+void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa)
+{
+ struct pblk_line *line;
+
+ line = pblk_ppa_to_line(pblk, ppa);
+ kref_put(&line->ref, pblk_line_put_wq);
+}
+
+void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd)
+{
+ struct ppa_addr *ppa_list;
+ int i;
+
+ ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+
+ for (i = 0; i < rqd->nr_ppas; i++)
+ pblk_ppa_to_line_put(pblk, ppa_list[i]);
+}
+
static void pblk_stop_writes(struct pblk *pblk, struct pblk_line *line)
{
lockdep_assert_held(&pblk->l_mg.free_lock);
pblk_set_space_limit(pblk);
pblk->state = PBLK_STATE_STOPPING;
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
}
static void pblk_line_close_meta_sync(struct pblk *pblk)
@@ -1501,6 +1571,7 @@ void __pblk_pipeline_flush(struct pblk *pblk)
return;
}
pblk->state = PBLK_STATE_RECOVERING;
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
spin_unlock(&l_mg->free_lock);
pblk_flush_writer(pblk);
@@ -1522,6 +1593,7 @@ void __pblk_pipeline_stop(struct pblk *pblk)
spin_lock(&l_mg->free_lock);
pblk->state = PBLK_STATE_STOPPED;
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
l_mg->data_line = NULL;
l_mg->data_next = NULL;
spin_unlock(&l_mg->free_lock);
@@ -1539,13 +1611,14 @@ struct pblk_line *pblk_line_replace_data(struct pblk *pblk)
struct pblk_line *cur, *new = NULL;
unsigned int left_seblks;
- cur = l_mg->data_line;
new = l_mg->data_next;
if (!new)
goto out;
- l_mg->data_line = new;
spin_lock(&l_mg->free_lock);
+ cur = l_mg->data_line;
+ l_mg->data_line = new;
+
pblk_line_setup_metadata(new, l_mg, &pblk->lm);
spin_unlock(&l_mg->free_lock);
@@ -1612,6 +1685,8 @@ static void __pblk_line_put(struct pblk *pblk, struct pblk_line *line)
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
line->state = PBLK_LINESTATE_FREE;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
line->gc_group = PBLK_LINEGC_NONE;
pblk_line_free(line);
@@ -1680,6 +1755,9 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
rqd->end_io = pblk_end_io_erase;
rqd->private = pblk;
+ trace_pblk_chunk_reset(pblk_disk_name(pblk),
+ &ppa, PBLK_CHUNK_RESET_START);
+
/* The write thread schedules erases so that it minimizes disturbances
* with writes. Thus, there is no need to take the LUN semaphore.
*/
@@ -1689,7 +1767,7 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa)
struct nvm_geo *geo = &dev->geo;
pblk_err(pblk, "could not async erase line:%d,blk:%d\n",
- pblk_ppa_to_line(ppa),
+ pblk_ppa_to_line_id(ppa),
pblk_ppa_to_pos(geo, ppa));
}
@@ -1741,10 +1819,9 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
WARN_ON(line->state != PBLK_LINESTATE_OPEN);
line->state = PBLK_LINESTATE_CLOSED;
move_list = pblk_line_gc_list(pblk, line);
-
list_add_tail(&line->list, move_list);
- kfree(line->map_bitmap);
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
@@ -1760,6 +1837,9 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line)
spin_unlock(&line->lock);
spin_unlock(&l_mg->gc_lock);
+
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
}
void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
@@ -1778,6 +1858,17 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
wa->pad = cpu_to_le64(atomic64_read(&pblk->pad_wa));
wa->gc = cpu_to_le64(atomic64_read(&pblk->gc_wa));
+ if (le32_to_cpu(emeta_buf->header.identifier) != PBLK_MAGIC) {
+ emeta_buf->header.identifier = cpu_to_le32(PBLK_MAGIC);
+ memcpy(emeta_buf->header.uuid, pblk->instance_uuid, 16);
+ emeta_buf->header.id = cpu_to_le32(line->id);
+ emeta_buf->header.type = cpu_to_le16(line->type);
+ emeta_buf->header.version_major = EMETA_VERSION_MAJOR;
+ emeta_buf->header.version_minor = EMETA_VERSION_MINOR;
+ emeta_buf->header.crc = cpu_to_le32(
+ pblk_calc_meta_header_crc(pblk, &emeta_buf->header));
+ }
+
emeta_buf->nr_valid_lbas = cpu_to_le64(line->nr_valid_lbas);
emeta_buf->crc = cpu_to_le32(pblk_calc_emeta_crc(pblk, emeta_buf));
@@ -1795,8 +1886,6 @@ void pblk_line_close_meta(struct pblk *pblk, struct pblk_line *line)
spin_unlock(&l_mg->close_lock);
pblk_line_should_sync_meta(pblk);
-
-
}
static void pblk_save_lba_list(struct pblk *pblk, struct pblk_line *line)
@@ -1847,8 +1936,7 @@ void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
queue_work(wq, &line_ws->ws);
}
-static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
- int nr_ppas, int pos)
+static void __pblk_down_chunk(struct pblk *pblk, int pos)
{
struct pblk_lun *rlun = &pblk->luns[pos];
int ret;
@@ -1857,13 +1945,6 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
* Only send one inflight I/O per LUN. Since we map at a page
* granurality, all ppas in the I/O will map to the same LUN
*/
-#ifdef CONFIG_NVM_PBLK_DEBUG
- int i;
-
- for (i = 1; i < nr_ppas; i++)
- WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
- ppa_list[0].a.ch != ppa_list[i].a.ch);
-#endif
ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000));
if (ret == -ETIME || ret == -EINTR)
@@ -1871,21 +1952,21 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list,
-ret);
}
-void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
+void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
+ int pos = pblk_ppa_to_pos(geo, ppa);
- __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
+ __pblk_down_chunk(pblk, pos);
}
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
+void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
unsigned long *lun_bitmap)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
+ int pos = pblk_ppa_to_pos(geo, ppa);
/* If the LUN has been locked for this same request, do no attempt to
* lock it again
@@ -1893,30 +1974,21 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
if (test_and_set_bit(pos, lun_bitmap))
return;
- __pblk_down_page(pblk, ppa_list, nr_ppas, pos);
+ __pblk_down_chunk(pblk, pos);
}
-void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas)
+void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
struct pblk_lun *rlun;
- int pos = pblk_ppa_to_pos(geo, ppa_list[0]);
-
-#ifdef CONFIG_NVM_PBLK_DEBUG
- int i;
-
- for (i = 1; i < nr_ppas; i++)
- WARN_ON(ppa_list[0].a.lun != ppa_list[i].a.lun ||
- ppa_list[0].a.ch != ppa_list[i].a.ch);
-#endif
+ int pos = pblk_ppa_to_pos(geo, ppa);
rlun = &pblk->luns[pos];
up(&rlun->wr_sem);
}
-void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
- unsigned long *lun_bitmap)
+void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
@@ -2060,8 +2132,7 @@ void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas,
/* If the L2P entry maps to a line, the reference is valid */
if (!pblk_ppa_empty(ppa) && !pblk_addr_in_cache(ppa)) {
- int line_id = pblk_ppa_to_line(ppa);
- struct pblk_line *line = &pblk->lines[line_id];
+ struct pblk_line *line = pblk_ppa_to_line(pblk, ppa);
kref_get(&line->ref);
}
diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c
index 157c2567c9e8..2fa118c8eb71 100644
--- a/drivers/lightnvm/pblk-gc.c
+++ b/drivers/lightnvm/pblk-gc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -16,8 +17,10 @@
*/
#include "pblk.h"
+#include "pblk-trace.h"
#include <linux/delay.h>
+
static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
{
if (gc_rq->data)
@@ -64,6 +67,8 @@ static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_GC);
line->state = PBLK_LINESTATE_CLOSED;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
move_list = pblk_line_gc_list(pblk, line);
spin_unlock(&line->lock);
@@ -144,7 +149,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
if (!emeta_buf)
return NULL;
- ret = pblk_line_read_emeta(pblk, line, emeta_buf);
+ ret = pblk_line_emeta_read(pblk, line, emeta_buf);
if (ret) {
pblk_err(pblk, "line %d read emeta failed (%d)\n",
line->id, ret);
@@ -405,6 +410,8 @@ void pblk_gc_free_full_lines(struct pblk *pblk)
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
line->state = PBLK_LINESTATE_GC;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
spin_unlock(&line->lock);
list_del(&line->list);
@@ -451,6 +458,8 @@ next_gc_group:
spin_lock(&line->lock);
WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
line->state = PBLK_LINESTATE_GC;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
spin_unlock(&line->lock);
list_del(&line->list);
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 537e98f2b24a..13822594647c 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
* Copyright (C) 2016 CNEX Labs
@@ -19,15 +20,31 @@
*/
#include "pblk.h"
+#include "pblk-trace.h"
static unsigned int write_buffer_size;
module_param(write_buffer_size, uint, 0644);
MODULE_PARM_DESC(write_buffer_size, "number of entries in a write buffer");
-static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
- *pblk_w_rq_cache;
-static DECLARE_RWSEM(pblk_lock);
+struct pblk_global_caches {
+ struct kmem_cache *ws;
+ struct kmem_cache *rec;
+ struct kmem_cache *g_rq;
+ struct kmem_cache *w_rq;
+
+ struct kref kref;
+
+ struct mutex mutex; /* Ensures consistency between
+ * caches and kref
+ */
+};
+
+static struct pblk_global_caches pblk_caches = {
+ .mutex = __MUTEX_INITIALIZER(pblk_caches.mutex),
+ .kref = KREF_INIT(0),
+};
+
struct bio_set pblk_bio_set;
static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
@@ -168,36 +185,26 @@ static void pblk_rwb_free(struct pblk *pblk)
if (pblk_rb_tear_down_check(&pblk->rwb))
pblk_err(pblk, "write buffer error on tear down\n");
- pblk_rb_data_free(&pblk->rwb);
- vfree(pblk_rb_entries_ref(&pblk->rwb));
+ pblk_rb_free(&pblk->rwb);
}
static int pblk_rwb_init(struct pblk *pblk)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct pblk_rb_entry *entries;
- unsigned long nr_entries, buffer_size;
- unsigned int power_size, power_seg_sz;
- int pgs_in_buffer;
+ unsigned long buffer_size;
+ int pgs_in_buffer, threshold;
- pgs_in_buffer = max(geo->mw_cunits, geo->ws_opt) * geo->all_luns;
+ threshold = geo->mw_cunits * geo->all_luns;
+ pgs_in_buffer = (max(geo->mw_cunits, geo->ws_opt) + geo->ws_opt)
+ * geo->all_luns;
if (write_buffer_size && (write_buffer_size > pgs_in_buffer))
buffer_size = write_buffer_size;
else
buffer_size = pgs_in_buffer;
- nr_entries = pblk_rb_calculate_size(buffer_size);
-
- entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
- if (!entries)
- return -ENOMEM;
-
- power_size = get_count_order(nr_entries);
- power_seg_sz = get_count_order(geo->csecs);
-
- return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
+ return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
}
/* Minimum pages needed within a lun */
@@ -306,53 +313,80 @@ static int pblk_set_addrf(struct pblk *pblk)
return 0;
}
-static int pblk_init_global_caches(struct pblk *pblk)
+static int pblk_create_global_caches(void)
{
- down_write(&pblk_lock);
- pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
+
+ pblk_caches.ws = kmem_cache_create("pblk_blk_ws",
sizeof(struct pblk_line_ws), 0, 0, NULL);
- if (!pblk_ws_cache) {
- up_write(&pblk_lock);
+ if (!pblk_caches.ws)
return -ENOMEM;
- }
- pblk_rec_cache = kmem_cache_create("pblk_rec",
+ pblk_caches.rec = kmem_cache_create("pblk_rec",
sizeof(struct pblk_rec_ctx), 0, 0, NULL);
- if (!pblk_rec_cache) {
- kmem_cache_destroy(pblk_ws_cache);
- up_write(&pblk_lock);
- return -ENOMEM;
- }
+ if (!pblk_caches.rec)
+ goto fail_destroy_ws;
- pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
+ pblk_caches.g_rq = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
0, 0, NULL);
- if (!pblk_g_rq_cache) {
- kmem_cache_destroy(pblk_ws_cache);
- kmem_cache_destroy(pblk_rec_cache);
- up_write(&pblk_lock);
- return -ENOMEM;
- }
+ if (!pblk_caches.g_rq)
+ goto fail_destroy_rec;
- pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
+ pblk_caches.w_rq = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
0, 0, NULL);
- if (!pblk_w_rq_cache) {
- kmem_cache_destroy(pblk_ws_cache);
- kmem_cache_destroy(pblk_rec_cache);
- kmem_cache_destroy(pblk_g_rq_cache);
- up_write(&pblk_lock);
- return -ENOMEM;
- }
- up_write(&pblk_lock);
+ if (!pblk_caches.w_rq)
+ goto fail_destroy_g_rq;
return 0;
+
+fail_destroy_g_rq:
+ kmem_cache_destroy(pblk_caches.g_rq);
+fail_destroy_rec:
+ kmem_cache_destroy(pblk_caches.rec);
+fail_destroy_ws:
+ kmem_cache_destroy(pblk_caches.ws);
+
+ return -ENOMEM;
}
-static void pblk_free_global_caches(struct pblk *pblk)
+static int pblk_get_global_caches(void)
{
- kmem_cache_destroy(pblk_ws_cache);
- kmem_cache_destroy(pblk_rec_cache);
- kmem_cache_destroy(pblk_g_rq_cache);
- kmem_cache_destroy(pblk_w_rq_cache);
+ int ret;
+
+ mutex_lock(&pblk_caches.mutex);
+
+ if (kref_read(&pblk_caches.kref) > 0) {
+ kref_get(&pblk_caches.kref);
+ mutex_unlock(&pblk_caches.mutex);
+ return 0;
+ }
+
+ ret = pblk_create_global_caches();
+
+ if (!ret)
+ kref_get(&pblk_caches.kref);
+
+ mutex_unlock(&pblk_caches.mutex);
+
+ return ret;
+}
+
+static void pblk_destroy_global_caches(struct kref *ref)
+{
+ struct pblk_global_caches *c;
+
+ c = container_of(ref, struct pblk_global_caches, kref);
+
+ kmem_cache_destroy(c->ws);
+ kmem_cache_destroy(c->rec);
+ kmem_cache_destroy(c->g_rq);
+ kmem_cache_destroy(c->w_rq);
+}
+
+static void pblk_put_global_caches(void)
+{
+ mutex_lock(&pblk_caches.mutex);
+ kref_put(&pblk_caches.kref, pblk_destroy_global_caches);
+ mutex_unlock(&pblk_caches.mutex);
}
static int pblk_core_init(struct pblk *pblk)
@@ -371,23 +405,19 @@ static int pblk_core_init(struct pblk *pblk)
atomic64_set(&pblk->nr_flush, 0);
pblk->nr_flush_rst = 0;
- pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE);
+ pblk->min_write_pgs = geo->ws_opt;
max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
+ pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
+ queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
- if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
- pblk_err(pblk, "vector list too big(%u > %u)\n",
- pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS);
- return -EINVAL;
- }
-
pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
GFP_KERNEL);
if (!pblk->pad_dist)
return -ENOMEM;
- if (pblk_init_global_caches(pblk))
+ if (pblk_get_global_caches())
goto fail_free_pad_dist;
/* Internal bios can be at most the sectors signaled by the device. */
@@ -396,27 +426,27 @@ static int pblk_core_init(struct pblk *pblk)
goto free_global_caches;
ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
- pblk_ws_cache);
+ pblk_caches.ws);
if (ret)
goto free_page_bio_pool;
ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
- pblk_rec_cache);
+ pblk_caches.rec);
if (ret)
goto free_gen_ws_pool;
ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
- pblk_g_rq_cache);
+ pblk_caches.g_rq);
if (ret)
goto free_rec_pool;
ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
- pblk_g_rq_cache);
+ pblk_caches.g_rq);
if (ret)
goto free_r_rq_pool;
ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
- pblk_w_rq_cache);
+ pblk_caches.w_rq);
if (ret)
goto free_e_rq_pool;
@@ -462,7 +492,7 @@ free_gen_ws_pool:
free_page_bio_pool:
mempool_exit(&pblk->page_bio_pool);
free_global_caches:
- pblk_free_global_caches(pblk);
+ pblk_put_global_caches();
fail_free_pad_dist:
kfree(pblk->pad_dist);
return -ENOMEM;
@@ -486,7 +516,7 @@ static void pblk_core_free(struct pblk *pblk)
mempool_exit(&pblk->e_rq_pool);
mempool_exit(&pblk->w_rq_pool);
- pblk_free_global_caches(pblk);
+ pblk_put_global_caches();
kfree(pblk->pad_dist);
}
@@ -504,6 +534,9 @@ static void pblk_line_mg_free(struct pblk *pblk)
pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
kfree(l_mg->eline_meta[i]);
}
+
+ mempool_destroy(l_mg->bitmap_pool);
+ kmem_cache_destroy(l_mg->bitmap_cache);
}
static void pblk_line_meta_free(struct pblk_line_mgmt *l_mg,
@@ -540,67 +573,6 @@ static void pblk_lines_free(struct pblk *pblk)
kfree(pblk->lines);
}
-static int pblk_bb_get_tbl(struct nvm_tgt_dev *dev, struct pblk_lun *rlun,
- u8 *blks, int nr_blks)
-{
- struct ppa_addr ppa;
- int ret;
-
- ppa.ppa = 0;
- ppa.g.ch = rlun->bppa.g.ch;
- ppa.g.lun = rlun->bppa.g.lun;
-
- ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
- if (ret)
- return ret;
-
- nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
- if (nr_blks < 0)
- return -EIO;
-
- return 0;
-}
-
-static void *pblk_bb_get_meta(struct pblk *pblk)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- u8 *meta;
- int i, nr_blks, blk_per_lun;
- int ret;
-
- blk_per_lun = geo->num_chk * geo->pln_mode;
- nr_blks = blk_per_lun * geo->all_luns;
-
- meta = kmalloc(nr_blks, GFP_KERNEL);
- if (!meta)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < geo->all_luns; i++) {
- struct pblk_lun *rlun = &pblk->luns[i];
- u8 *meta_pos = meta + i * blk_per_lun;
-
- ret = pblk_bb_get_tbl(dev, rlun, meta_pos, blk_per_lun);
- if (ret) {
- kfree(meta);
- return ERR_PTR(-EIO);
- }
- }
-
- return meta;
-}
-
-static void *pblk_chunk_get_meta(struct pblk *pblk)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12)
- return pblk_bb_get_meta(pblk);
- else
- return pblk_chunk_get_info(pblk);
-}
-
static int pblk_luns_init(struct pblk *pblk)
{
struct nvm_tgt_dev *dev = pblk->dev;
@@ -699,51 +671,7 @@ static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
}
-static int pblk_setup_line_meta_12(struct pblk *pblk, struct pblk_line *line,
- void *chunk_meta)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- struct pblk_line_meta *lm = &pblk->lm;
- int i, chk_per_lun, nr_bad_chks = 0;
-
- chk_per_lun = geo->num_chk * geo->pln_mode;
-
- for (i = 0; i < lm->blk_per_line; i++) {
- struct pblk_lun *rlun = &pblk->luns[i];
- struct nvm_chk_meta *chunk;
- int pos = pblk_ppa_to_pos(geo, rlun->bppa);
- u8 *lun_bb_meta = chunk_meta + pos * chk_per_lun;
-
- chunk = &line->chks[pos];
-
- /*
- * In 1.2 spec. chunk state is not persisted by the device. Thus
- * some of the values are reset each time pblk is instantiated,
- * so we have to assume that the block is closed.
- */
- if (lun_bb_meta[line->id] == NVM_BLK_T_FREE)
- chunk->state = NVM_CHK_ST_CLOSED;
- else
- chunk->state = NVM_CHK_ST_OFFLINE;
-
- chunk->type = NVM_CHK_TP_W_SEQ;
- chunk->wi = 0;
- chunk->slba = -1;
- chunk->cnlb = geo->clba;
- chunk->wp = 0;
-
- if (!(chunk->state & NVM_CHK_ST_OFFLINE))
- continue;
-
- set_bit(pos, line->blk_bitmap);
- nr_bad_chks++;
- }
-
- return nr_bad_chks;
-}
-
-static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
+static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
struct nvm_chk_meta *meta)
{
struct nvm_tgt_dev *dev = pblk->dev;
@@ -772,6 +700,9 @@ static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
chunk->cnlb = chunk_meta->cnlb;
chunk->wp = chunk_meta->wp;
+ trace_pblk_chunk_state(pblk_disk_name(pblk), &ppa,
+ chunk->state);
+
if (chunk->type & NVM_CHK_TP_SZ_SPEC) {
WARN_ONCE(1, "pblk: custom-sized chunks unsupported\n");
continue;
@@ -790,8 +721,6 @@ static int pblk_setup_line_meta_20(struct pblk *pblk, struct pblk_line *line,
static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
void *chunk_meta, int line_id)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
long nr_bad_chks, chk_in_line;
@@ -804,10 +733,7 @@ static long pblk_setup_line_meta(struct pblk *pblk, struct pblk_line *line,
line->vsc = &l_mg->vsc_list[line_id];
spin_lock_init(&line->lock);
- if (geo->version == NVM_OCSSD_SPEC_12)
- nr_bad_chks = pblk_setup_line_meta_12(pblk, line, chunk_meta);
- else
- nr_bad_chks = pblk_setup_line_meta_20(pblk, line, chunk_meta);
+ nr_bad_chks = pblk_setup_line_meta_chk(pblk, line, chunk_meta);
chk_in_line = lm->blk_per_line - nr_bad_chks;
if (nr_bad_chks < 0 || nr_bad_chks > lm->blk_per_line ||
@@ -913,6 +839,17 @@ static int pblk_line_mg_init(struct pblk *pblk)
goto fail_free_smeta;
}
+ l_mg->bitmap_cache = kmem_cache_create("pblk_lm_bitmap",
+ lm->sec_bitmap_len, 0, 0, NULL);
+ if (!l_mg->bitmap_cache)
+ goto fail_free_smeta;
+
+ /* the bitmap pool is used for both valid and map bitmaps */
+ l_mg->bitmap_pool = mempool_create_slab_pool(PBLK_DATA_LINES * 2,
+ l_mg->bitmap_cache);
+ if (!l_mg->bitmap_pool)
+ goto fail_destroy_bitmap_cache;
+
/* emeta allocates three different buffers for managing metadata with
* in-memory and in-media layouts
*/
@@ -965,6 +902,10 @@ fail_free_emeta:
kfree(l_mg->eline_meta[i]->buf);
kfree(l_mg->eline_meta[i]);
}
+
+ mempool_destroy(l_mg->bitmap_pool);
+fail_destroy_bitmap_cache:
+ kmem_cache_destroy(l_mg->bitmap_cache);
fail_free_smeta:
for (i = 0; i < PBLK_DATA_LINES; i++)
kfree(l_mg->sline_meta[i]);
@@ -1058,7 +999,7 @@ static int pblk_lines_init(struct pblk *pblk)
if (ret)
goto fail_free_meta;
- chunk_meta = pblk_chunk_get_meta(pblk);
+ chunk_meta = pblk_get_chunk_meta(pblk);
if (IS_ERR(chunk_meta)) {
ret = PTR_ERR(chunk_meta);
goto fail_free_luns;
@@ -1079,16 +1020,20 @@ static int pblk_lines_init(struct pblk *pblk)
goto fail_free_lines;
nr_free_chks += pblk_setup_line_meta(pblk, line, chunk_meta, i);
+
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
}
if (!nr_free_chks) {
pblk_err(pblk, "too many bad blocks prevent for sane instance\n");
- return -EINTR;
+ ret = -EINTR;
+ goto fail_free_lines;
}
pblk_set_provision(pblk, nr_free_chks);
- kfree(chunk_meta);
+ vfree(chunk_meta);
return 0;
fail_free_lines:
@@ -1165,7 +1110,6 @@ static void pblk_exit(void *private, bool graceful)
{
struct pblk *pblk = private;
- down_write(&pblk_lock);
pblk_gc_exit(pblk, graceful);
pblk_tear_down(pblk, graceful);
@@ -1174,7 +1118,6 @@ static void pblk_exit(void *private, bool graceful)
#endif
pblk_free(pblk);
- up_write(&pblk_lock);
}
static sector_t pblk_capacity(void *private)
@@ -1200,6 +1143,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
pblk->dev = dev;
pblk->disk = tdisk;
pblk->state = PBLK_STATE_RUNNING;
+ trace_pblk_state(pblk_disk_name(pblk), pblk->state);
pblk->gc.gc_enabled = 0;
if (!(geo->version == NVM_OCSSD_SPEC_12 ||
@@ -1210,13 +1154,6 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
return ERR_PTR(-EINVAL);
}
- if (geo->version == NVM_OCSSD_SPEC_12 && geo->dom & NVM_RSP_L2P) {
- pblk_err(pblk, "host-side L2P table not supported. (%x)\n",
- geo->dom);
- kfree(pblk);
- return ERR_PTR(-EINVAL);
- }
-
spin_lock_init(&pblk->resubmit_lock);
spin_lock_init(&pblk->trans_lock);
spin_lock_init(&pblk->lock);
diff --git a/drivers/lightnvm/pblk-map.c b/drivers/lightnvm/pblk-map.c
index 953ca31dda68..6dcbd44e3acb 100644
--- a/drivers/lightnvm/pblk-map.c
+++ b/drivers/lightnvm/pblk-map.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -79,7 +80,7 @@ static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
}
}
- pblk_down_rq(pblk, ppa_list, nr_secs, lun_bitmap);
+ pblk_down_rq(pblk, ppa_list[0], lun_bitmap);
return 0;
}
@@ -88,13 +89,14 @@ void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
unsigned int off)
{
struct pblk_sec_meta *meta_list = rqd->meta_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
unsigned int map_secs;
int min = pblk->min_write_pgs;
int i;
for (i = off; i < rqd->nr_ppas; i += min) {
map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
- if (pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
+ if (pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
lun_bitmap, &meta_list[i], map_secs)) {
bio_put(rqd->bio);
pblk_free_rqd(pblk, rqd, PBLK_WRITE);
@@ -112,6 +114,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct nvm_geo *geo = &dev->geo;
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_sec_meta *meta_list = rqd->meta_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
struct pblk_line *e_line, *d_line;
unsigned int map_secs;
int min = pblk->min_write_pgs;
@@ -119,14 +122,14 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
for (i = 0; i < rqd->nr_ppas; i += min) {
map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
- if (pblk_map_page_data(pblk, sentry + i, &rqd->ppa_list[i],
+ if (pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
lun_bitmap, &meta_list[i], map_secs)) {
bio_put(rqd->bio);
pblk_free_rqd(pblk, rqd, PBLK_WRITE);
pblk_pipeline_stop(pblk);
}
- erase_lun = pblk_ppa_to_pos(geo, rqd->ppa_list[i]);
+ erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]);
/* line can change after page map. We might also be writing the
* last line.
@@ -141,7 +144,7 @@ void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
set_bit(erase_lun, e_line->erase_bitmap);
atomic_dec(&e_line->left_eblks);
- *erase_ppa = rqd->ppa_list[i];
+ *erase_ppa = ppa_list[i];
erase_ppa->a.blk = e_line->id;
spin_unlock(&e_line->lock);
diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
index f6eec0212dfc..b1f4b51783f4 100644
--- a/drivers/lightnvm/pblk-rb.c
+++ b/drivers/lightnvm/pblk-rb.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -22,7 +23,7 @@
static DECLARE_RWSEM(pblk_rb_lock);
-void pblk_rb_data_free(struct pblk_rb *rb)
+static void pblk_rb_data_free(struct pblk_rb *rb)
{
struct pblk_rb_pages *p, *t;
@@ -35,25 +36,51 @@ void pblk_rb_data_free(struct pblk_rb *rb)
up_write(&pblk_rb_lock);
}
+void pblk_rb_free(struct pblk_rb *rb)
+{
+ pblk_rb_data_free(rb);
+ vfree(rb->entries);
+}
+
+/*
+ * pblk_rb_calculate_size -- calculate the size of the write buffer
+ */
+static unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
+{
+ /* Alloc a write buffer that can at least fit 128 entries */
+ return (1 << max(get_count_order(nr_entries), 7));
+}
+
/*
* Initialize ring buffer. The data and metadata buffers must be previously
* allocated and their size must be a power of two
* (Documentation/core-api/circular-buffers.rst)
*/
-int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
- unsigned int power_size, unsigned int power_seg_sz)
+int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
+ unsigned int seg_size)
{
struct pblk *pblk = container_of(rb, struct pblk, rwb);
+ struct pblk_rb_entry *entries;
unsigned int init_entry = 0;
- unsigned int alloc_order = power_size;
unsigned int max_order = MAX_ORDER - 1;
- unsigned int order, iter;
+ unsigned int power_size, power_seg_sz;
+ unsigned int alloc_order, order, iter;
+ unsigned int nr_entries;
+
+ nr_entries = pblk_rb_calculate_size(size);
+ entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
+ if (!entries)
+ return -ENOMEM;
+
+ power_size = get_count_order(size);
+ power_seg_sz = get_count_order(seg_size);
down_write(&pblk_rb_lock);
- rb->entries = rb_entry_base;
+ rb->entries = entries;
rb->seg_size = (1 << power_seg_sz);
rb->nr_entries = (1 << power_size);
rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
+ rb->back_thres = threshold;
rb->flush_point = EMPTY_ENTRY;
spin_lock_init(&rb->w_lock);
@@ -61,6 +88,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
INIT_LIST_HEAD(&rb->pages);
+ alloc_order = power_size;
if (alloc_order >= max_order) {
order = max_order;
iter = (1 << (alloc_order - max_order));
@@ -79,6 +107,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
if (!page_set) {
up_write(&pblk_rb_lock);
+ vfree(entries);
return -ENOMEM;
}
@@ -88,6 +117,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
kfree(page_set);
pblk_rb_data_free(rb);
up_write(&pblk_rb_lock);
+ vfree(entries);
return -ENOMEM;
}
kaddr = page_address(page_set->pages);
@@ -124,20 +154,6 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
return 0;
}
-/*
- * pblk_rb_calculate_size -- calculate the size of the write buffer
- */
-unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
-{
- /* Alloc a write buffer that can at least fit 128 entries */
- return (1 << max(get_count_order(nr_entries), 7));
-}
-
-void *pblk_rb_entries_ref(struct pblk_rb *rb)
-{
- return rb->entries;
-}
-
static void clean_wctx(struct pblk_w_ctx *w_ctx)
{
int flags;
@@ -168,6 +184,12 @@ static unsigned int pblk_rb_space(struct pblk_rb *rb)
return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
}
+unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
+ unsigned int nr_entries)
+{
+ return (p + nr_entries) & (rb->nr_entries - 1);
+}
+
/*
* Buffer count is calculated with respect to the submission entry signaling the
* entries that are available to send to the media
@@ -194,8 +216,7 @@ unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
subm = READ_ONCE(rb->subm);
/* Commit read means updating submission pointer */
- smp_store_release(&rb->subm,
- (subm + nr_entries) & (rb->nr_entries - 1));
+ smp_store_release(&rb->subm, pblk_rb_ptr_wrap(rb, subm, nr_entries));
return subm;
}
@@ -225,10 +246,10 @@ static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
entry->cacheline);
- line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
+ line = pblk_ppa_to_line(pblk, w_ctx->ppa);
kref_put(&line->ref, pblk_line_put);
clean_wctx(w_ctx);
- rb->l2p_update = (rb->l2p_update + 1) & (rb->nr_entries - 1);
+ rb->l2p_update = pblk_rb_ptr_wrap(rb, rb->l2p_update, 1);
}
pblk_rl_out(&pblk->rl, user_io, gc_io);
@@ -385,11 +406,14 @@ static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
{
unsigned int mem;
unsigned int sync;
+ unsigned int threshold;
sync = READ_ONCE(rb->sync);
mem = READ_ONCE(rb->mem);
- if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < nr_entries)
+ threshold = nr_entries + rb->back_thres;
+
+ if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < threshold)
return 0;
if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
@@ -407,7 +431,7 @@ static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
return 0;
/* Protect from read count */
- smp_store_release(&rb->mem, (*pos + nr_entries) & (rb->nr_entries - 1));
+ smp_store_release(&rb->mem, pblk_rb_ptr_wrap(rb, *pos, nr_entries));
return 1;
}
@@ -431,7 +455,7 @@ static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
if (!__pblk_rb_may_write(rb, nr_entries, pos))
return 0;
- mem = (*pos + nr_entries) & (rb->nr_entries - 1);
+ mem = pblk_rb_ptr_wrap(rb, *pos, nr_entries);
*io_ret = NVM_IO_DONE;
if (bio->bi_opf & REQ_PREFLUSH) {
@@ -571,7 +595,7 @@ try:
/* Release flags on context. Protect from writes */
smp_store_release(&entry->w_ctx.flags, flags);
- pos = (pos + 1) & (rb->nr_entries - 1);
+ pos = pblk_rb_ptr_wrap(rb, pos, 1);
}
if (pad) {
@@ -651,7 +675,7 @@ out:
struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
{
- unsigned int entry = pos & (rb->nr_entries - 1);
+ unsigned int entry = pblk_rb_ptr_wrap(rb, pos, 0);
return &rb->entries[entry].w_ctx;
}
@@ -697,7 +721,7 @@ unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
}
}
- sync = (sync + nr_entries) & (rb->nr_entries - 1);
+ sync = pblk_rb_ptr_wrap(rb, sync, nr_entries);
/* Protect from counts */
smp_store_release(&rb->sync, sync);
@@ -728,32 +752,6 @@ unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
return (submitted < to_flush) ? (to_flush - submitted) : 0;
}
-/*
- * Scan from the current position of the sync pointer to find the entry that
- * corresponds to the given ppa. This is necessary since write requests can be
- * completed out of order. The assumption is that the ppa is close to the sync
- * pointer thus the search will not take long.
- *
- * The caller of this function must guarantee that the sync pointer will no
- * reach the entry while it is using the metadata associated with it. With this
- * assumption in mind, there is no need to take the sync lock.
- */
-struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
- struct ppa_addr *ppa)
-{
- unsigned int sync, subm, count;
- unsigned int i;
-
- sync = READ_ONCE(rb->sync);
- subm = READ_ONCE(rb->subm);
- count = pblk_rb_ring_count(subm, sync, rb->nr_entries);
-
- for (i = 0; i < count; i++)
- sync = (sync + 1) & (rb->nr_entries - 1);
-
- return NULL;
-}
-
int pblk_rb_tear_down_check(struct pblk_rb *rb)
{
struct pblk_rb_entry *entry;
diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
index 5a46d7f9302f..9fba614adeeb 100644
--- a/drivers/lightnvm/pblk-read.c
+++ b/drivers/lightnvm/pblk-read.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -43,7 +44,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
unsigned long *read_bitmap)
{
struct pblk_sec_meta *meta_list = rqd->meta_list;
- struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
+ struct ppa_addr ppas[NVM_MAX_VLBA];
int nr_secs = rqd->nr_ppas;
bool advanced_bio = false;
int i, j = 0;
@@ -93,9 +94,7 @@ next:
}
if (pblk_io_aligned(pblk, nr_secs))
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
- else
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+ rqd->is_seq = 1;
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(nr_secs, &pblk->inflight_reads);
@@ -118,10 +117,9 @@ static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd,
if (lba != blba + i) {
#ifdef CONFIG_NVM_PBLK_DEBUG
- struct ppa_addr *p;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
- p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr;
- print_ppa(pblk, p, "seq", i);
+ print_ppa(pblk, &ppa_list[i], "seq", i);
#endif
pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
lba, (u64)blba + i);
@@ -150,14 +148,12 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
if (lba != meta_lba) {
#ifdef CONFIG_NVM_PBLK_DEBUG
- struct ppa_addr *p;
- int nr_ppas = rqd->nr_ppas;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
- p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr;
- print_ppa(pblk, p, "seq", j);
+ print_ppa(pblk, &ppa_list[j], "rnd", j);
#endif
pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n",
- lba, meta_lba);
+ meta_lba, lba);
WARN_ON(1);
}
@@ -167,22 +163,6 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd,
WARN_ONCE(j != rqd->nr_ppas, "pblk: corrupted random request\n");
}
-static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd)
-{
- struct ppa_addr *ppa_list;
- int i;
-
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
-
- for (i = 0; i < rqd->nr_ppas; i++) {
- struct ppa_addr ppa = ppa_list[i];
- struct pblk_line *line;
-
- line = &pblk->lines[pblk_ppa_to_line(ppa)];
- kref_put(&line->ref, pblk_line_put_wq);
- }
-}
-
static void pblk_end_user_read(struct bio *bio)
{
#ifdef CONFIG_NVM_PBLK_DEBUG
@@ -210,7 +190,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd,
bio_put(int_bio);
if (put_line)
- pblk_read_put_rqd_kref(pblk, rqd);
+ pblk_rq_to_line_put(pblk, rqd);
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_add(rqd->nr_ppas, &pblk->sync_reads);
@@ -270,9 +250,9 @@ static void pblk_end_partial_read(struct nvm_rq *rqd)
i = 0;
hole = find_first_zero_bit(read_bitmap, nr_secs);
do {
- int line_id = pblk_ppa_to_line(rqd->ppa_list[i]);
- struct pblk_line *line = &pblk->lines[line_id];
+ struct pblk_line *line;
+ line = pblk_ppa_to_line(pblk, rqd->ppa_list[i]);
kref_put(&line->ref, pblk_line_put);
meta_list[hole].lba = lba_list_media[i];
@@ -344,7 +324,6 @@ static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd,
rqd->bio = new_bio;
rqd->nr_ppas = nr_holes;
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
pr_ctx->ppa_ptr = NULL;
pr_ctx->orig_bio = bio;
@@ -438,8 +417,6 @@ retry:
} else {
rqd->ppa_addr = ppa;
}
-
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
}
int pblk_submit_read(struct pblk *pblk, struct bio *bio)
@@ -454,13 +431,6 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA);
int ret = NVM_IO_ERR;
- /* logic error: lba out-of-bounds. Ignore read request */
- if (blba >= pblk->rl.nr_secs || nr_secs > PBLK_MAX_REQ_ADDRS) {
- WARN(1, "pblk: read lba out of bounds (lba:%llu, nr:%d)\n",
- (unsigned long long)blba, nr_secs);
- return NVM_IO_ERR;
- }
-
generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio),
&pblk->disk->part0);
@@ -484,21 +454,13 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
*/
bio_init_idx = pblk_get_bi_idx(bio);
- rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd->dma_meta_list);
- if (!rqd->meta_list) {
- pblk_err(pblk, "not able to allocate ppa list\n");
+ if (pblk_alloc_rqd_meta(pblk, rqd))
goto fail_rqd_free;
- }
-
- if (nr_secs > 1) {
- rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
- rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
+ if (nr_secs > 1)
pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap);
- } else {
+ else
pblk_read_rq(pblk, rqd, bio, blba, read_bitmap);
- }
if (bitmap_full(read_bitmap, nr_secs)) {
atomic_inc(&pblk->inflight_io);
@@ -552,7 +514,7 @@ static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_line *line, u64 *lba_list,
u64 *paddr_list_gc, unsigned int nr_secs)
{
- struct ppa_addr ppa_list_l2p[PBLK_MAX_REQ_ADDRS];
+ struct ppa_addr ppa_list_l2p[NVM_MAX_VLBA];
struct ppa_addr ppa_gc;
int valid_secs = 0;
int i;
@@ -625,15 +587,11 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
memset(&rqd, 0, sizeof(struct nvm_rq));
- rqd.meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd.dma_meta_list);
- if (!rqd.meta_list)
- return -ENOMEM;
+ ret = pblk_alloc_rqd_meta(pblk, &rqd);
+ if (ret)
+ return ret;
if (gc_rq->nr_secs > 1) {
- rqd.ppa_list = rqd.meta_list + pblk_dma_meta_size;
- rqd.dma_ppa_list = rqd.dma_meta_list + pblk_dma_meta_size;
-
gc_rq->secs_to_gc = read_ppalist_rq_gc(pblk, &rqd, gc_rq->line,
gc_rq->lba_list,
gc_rq->paddr_list,
@@ -654,7 +612,8 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
pblk_err(pblk, "could not allocate GC bio (%lu)\n",
- PTR_ERR(bio));
+ PTR_ERR(bio));
+ ret = PTR_ERR(bio);
goto err_free_dma;
}
@@ -663,7 +622,6 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
rqd.opcode = NVM_OP_PREAD;
rqd.nr_ppas = gc_rq->secs_to_gc;
- rqd.flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
rqd.bio = bio;
if (pblk_submit_io_sync(pblk, &rqd)) {
@@ -690,12 +648,12 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
#endif
out:
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+ pblk_free_rqd_meta(pblk, &rqd);
return ret;
err_free_bio:
bio_put(bio);
err_free_dma:
- nvm_dev_dma_free(dev->parent, rqd.meta_list, rqd.dma_meta_list);
+ pblk_free_rqd_meta(pblk, &rqd);
return ret;
}
diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
index e232e47e1353..5740b7509bd8 100644
--- a/drivers/lightnvm/pblk-recovery.c
+++ b/drivers/lightnvm/pblk-recovery.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial: Javier Gonzalez <javier@cnexlabs.com>
@@ -15,6 +16,7 @@
*/
#include "pblk.h"
+#include "pblk-trace.h"
int pblk_recov_check_emeta(struct pblk *pblk, struct line_emeta *emeta_buf)
{
@@ -85,15 +87,39 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line)
return 0;
}
-static int pblk_calc_sec_in_line(struct pblk *pblk, struct pblk_line *line)
+static void pblk_update_line_wp(struct pblk *pblk, struct pblk_line *line,
+ u64 written_secs)
+{
+ int i;
+
+ for (i = 0; i < written_secs; i += pblk->min_write_pgs)
+ pblk_alloc_page(pblk, line, pblk->min_write_pgs);
+}
+
+static u64 pblk_sec_in_open_line(struct pblk *pblk, struct pblk_line *line)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
struct pblk_line_meta *lm = &pblk->lm;
int nr_bb = bitmap_weight(line->blk_bitmap, lm->blk_per_line);
+ u64 written_secs = 0;
+ int valid_chunks = 0;
+ int i;
+
+ for (i = 0; i < lm->blk_per_line; i++) {
+ struct nvm_chk_meta *chunk = &line->chks[i];
+
+ if (chunk->state & NVM_CHK_ST_OFFLINE)
+ continue;
+
+ written_secs += chunk->wp;
+ valid_chunks++;
+ }
+
+ if (lm->blk_per_line - nr_bb != valid_chunks)
+ pblk_err(pblk, "recovery line %d is bad\n", line->id);
- return lm->sec_per_line - lm->smeta_sec - lm->emeta_sec[0] -
- nr_bb * geo->clba;
+ pblk_update_line_wp(pblk, line, written_secs - lm->smeta_sec);
+
+ return written_secs;
}
struct pblk_recov_alloc {
@@ -105,115 +131,6 @@ struct pblk_recov_alloc {
dma_addr_t dma_meta_list;
};
-static int pblk_recov_read_oob(struct pblk *pblk, struct pblk_line *line,
- struct pblk_recov_alloc p, u64 r_ptr)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- struct ppa_addr *ppa_list;
- struct pblk_sec_meta *meta_list;
- struct nvm_rq *rqd;
- struct bio *bio;
- void *data;
- dma_addr_t dma_ppa_list, dma_meta_list;
- u64 r_ptr_int;
- int left_ppas;
- int rq_ppas, rq_len;
- int i, j;
- int ret = 0;
-
- ppa_list = p.ppa_list;
- meta_list = p.meta_list;
- rqd = p.rqd;
- data = p.data;
- dma_ppa_list = p.dma_ppa_list;
- dma_meta_list = p.dma_meta_list;
-
- left_ppas = line->cur_sec - r_ptr;
- if (!left_ppas)
- return 0;
-
- r_ptr_int = r_ptr;
-
-next_read_rq:
- memset(rqd, 0, pblk_g_rq_size);
-
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
- if (!rq_ppas)
- rq_ppas = pblk->min_write_pgs;
- rq_len = rq_ppas * geo->csecs;
-
- bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
-
- rqd->bio = bio;
- rqd->opcode = NVM_OP_PREAD;
- rqd->meta_list = meta_list;
- rqd->nr_ppas = rq_ppas;
- rqd->ppa_list = ppa_list;
- rqd->dma_ppa_list = dma_ppa_list;
- rqd->dma_meta_list = dma_meta_list;
-
- if (pblk_io_aligned(pblk, rq_ppas))
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
- else
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
-
- for (i = 0; i < rqd->nr_ppas; ) {
- struct ppa_addr ppa;
- int pos;
-
- ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_ppa_to_pos(geo, ppa);
-
- while (test_bit(pos, line->blk_bitmap)) {
- r_ptr_int += pblk->min_write_pgs;
- ppa = addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- pos = pblk_ppa_to_pos(geo, ppa);
- }
-
- for (j = 0; j < pblk->min_write_pgs; j++, i++, r_ptr_int++)
- rqd->ppa_list[i] =
- addr_to_gen_ppa(pblk, r_ptr_int, line->id);
- }
-
- /* If read fails, more padding is needed */
- ret = pblk_submit_io_sync(pblk, rqd);
- if (ret) {
- pblk_err(pblk, "I/O submission failed: %d\n", ret);
- return ret;
- }
-
- atomic_dec(&pblk->inflight_io);
-
- /* At this point, the read should not fail. If it does, it is a problem
- * we cannot recover from here. Need FTL log.
- */
- if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) {
- pblk_err(pblk, "L2P recovery failed (%d)\n", rqd->error);
- return -EINTR;
- }
-
- for (i = 0; i < rqd->nr_ppas; i++) {
- u64 lba = le64_to_cpu(meta_list[i].lba);
-
- if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
- continue;
-
- pblk_update_map(pblk, lba, rqd->ppa_list[i]);
- }
-
- left_ppas -= rq_ppas;
- if (left_ppas > 0)
- goto next_read_rq;
-
- return 0;
-}
-
static void pblk_recov_complete(struct kref *ref)
{
struct pblk_pad_rq *pad_rq = container_of(ref, struct pblk_pad_rq, ref);
@@ -223,10 +140,11 @@ static void pblk_recov_complete(struct kref *ref)
static void pblk_end_io_recov(struct nvm_rq *rqd)
{
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
struct pblk_pad_rq *pad_rq = rqd->private;
struct pblk *pblk = pad_rq->pblk;
- pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ pblk_up_chunk(pblk, ppa_list[0]);
pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
@@ -234,18 +152,17 @@ static void pblk_end_io_recov(struct nvm_rq *rqd)
kref_put(&pad_rq->ref, pblk_recov_complete);
}
-static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line,
- int left_ppas)
+/* pad line using line bitmap. */
+static int pblk_recov_pad_line(struct pblk *pblk, struct pblk_line *line,
+ int left_ppas)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct ppa_addr *ppa_list;
struct pblk_sec_meta *meta_list;
struct pblk_pad_rq *pad_rq;
struct nvm_rq *rqd;
struct bio *bio;
void *data;
- dma_addr_t dma_ppa_list, dma_meta_list;
__le64 *lba_list = emeta_to_lbas(pblk, line->emeta->buf);
u64 w_ptr = line->cur_sec;
int left_line_ppas, rq_ppas, rq_len;
@@ -279,20 +196,11 @@ next_pad_rq:
rq_len = rq_ppas * geo->csecs;
- meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
- if (!meta_list) {
- ret = -ENOMEM;
- goto fail_free_pad;
- }
-
- ppa_list = (void *)(meta_list) + pblk_dma_meta_size;
- dma_ppa_list = dma_meta_list + pblk_dma_meta_size;
-
bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
PBLK_VMALLOC_META, GFP_KERNEL);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- goto fail_free_meta;
+ goto fail_free_pad;
}
bio->bi_iter.bi_sector = 0; /* internal bio */
@@ -300,17 +208,19 @@ next_pad_rq:
rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
+ ret = pblk_alloc_rqd_meta(pblk, rqd);
+ if (ret)
+ goto fail_free_rqd;
+
rqd->bio = bio;
rqd->opcode = NVM_OP_PWRITE;
- rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
- rqd->meta_list = meta_list;
+ rqd->is_seq = 1;
rqd->nr_ppas = rq_ppas;
- rqd->ppa_list = ppa_list;
- rqd->dma_ppa_list = dma_ppa_list;
- rqd->dma_meta_list = dma_meta_list;
rqd->end_io = pblk_end_io_recov;
rqd->private = pad_rq;
+ meta_list = rqd->meta_list;
+
for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa;
int pos;
@@ -338,13 +248,13 @@ next_pad_rq:
}
kref_get(&pad_rq->ref);
- pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ pblk_down_chunk(pblk, rqd->ppa_list[0]);
ret = pblk_submit_io(pblk, rqd);
if (ret) {
pblk_err(pblk, "I/O submission failed: %d\n", ret);
- pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
- goto fail_free_bio;
+ pblk_up_chunk(pblk, rqd->ppa_list[0]);
+ goto fail_free_rqd;
}
left_line_ppas -= rq_ppas;
@@ -368,157 +278,60 @@ free_rq:
kfree(pad_rq);
return ret;
-fail_free_bio:
+fail_free_rqd:
+ pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
bio_put(bio);
-fail_free_meta:
- nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
fail_free_pad:
kfree(pad_rq);
vfree(data);
return ret;
}
-/* When this function is called, it means that not all upper pages have been
- * written in a page that contains valid data. In order to recover this data, we
- * first find the write pointer on the device, then we pad all necessary
- * sectors, and finally attempt to read the valid data
- */
-static int pblk_recov_scan_all_oob(struct pblk *pblk, struct pblk_line *line,
- struct pblk_recov_alloc p)
+static int pblk_pad_distance(struct pblk *pblk, struct pblk_line *line)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
- struct ppa_addr *ppa_list;
- struct pblk_sec_meta *meta_list;
- struct nvm_rq *rqd;
- struct bio *bio;
- void *data;
- dma_addr_t dma_ppa_list, dma_meta_list;
- u64 w_ptr = 0, r_ptr;
- int rq_ppas, rq_len;
- int i, j;
- int ret = 0;
- int rec_round;
- int left_ppas = pblk_calc_sec_in_line(pblk, line) - line->cur_sec;
-
- ppa_list = p.ppa_list;
- meta_list = p.meta_list;
- rqd = p.rqd;
- data = p.data;
- dma_ppa_list = p.dma_ppa_list;
- dma_meta_list = p.dma_meta_list;
-
- /* we could recover up until the line write pointer */
- r_ptr = line->cur_sec;
- rec_round = 0;
-
-next_rq:
- memset(rqd, 0, pblk_g_rq_size);
+ int distance = geo->mw_cunits * geo->all_luns * geo->ws_opt;
- rq_ppas = pblk_calc_secs(pblk, left_ppas, 0);
- if (!rq_ppas)
- rq_ppas = pblk->min_write_pgs;
- rq_len = rq_ppas * geo->csecs;
-
- bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL);
- if (IS_ERR(bio))
- return PTR_ERR(bio);
-
- bio->bi_iter.bi_sector = 0; /* internal bio */
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ return (distance > line->left_msecs) ? line->left_msecs : distance;
+}
- rqd->bio = bio;
- rqd->opcode = NVM_OP_PREAD;
- rqd->meta_list = meta_list;
- rqd->nr_ppas = rq_ppas;
- rqd->ppa_list = ppa_list;
- rqd->dma_ppa_list = dma_ppa_list;
- rqd->dma_meta_list = dma_meta_list;
+static int pblk_line_wp_is_unbalanced(struct pblk *pblk,
+ struct pblk_line *line)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line_meta *lm = &pblk->lm;
+ struct pblk_lun *rlun;
+ struct nvm_chk_meta *chunk;
+ struct ppa_addr ppa;
+ u64 line_wp;
+ int pos, i;
- if (pblk_io_aligned(pblk, rq_ppas))
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
- else
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+ rlun = &pblk->luns[0];
+ ppa = rlun->bppa;
+ pos = pblk_ppa_to_pos(geo, ppa);
+ chunk = &line->chks[pos];
- for (i = 0; i < rqd->nr_ppas; ) {
- struct ppa_addr ppa;
- int pos;
+ line_wp = chunk->wp;
- w_ptr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
- ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
+ for (i = 1; i < lm->blk_per_line; i++) {
+ rlun = &pblk->luns[i];
+ ppa = rlun->bppa;
pos = pblk_ppa_to_pos(geo, ppa);
+ chunk = &line->chks[pos];
- while (test_bit(pos, line->blk_bitmap)) {
- w_ptr += pblk->min_write_pgs;
- ppa = addr_to_gen_ppa(pblk, w_ptr, line->id);
- pos = pblk_ppa_to_pos(geo, ppa);
- }
-
- for (j = 0; j < pblk->min_write_pgs; j++, i++, w_ptr++)
- rqd->ppa_list[i] =
- addr_to_gen_ppa(pblk, w_ptr, line->id);
- }
-
- ret = pblk_submit_io_sync(pblk, rqd);
- if (ret) {
- pblk_err(pblk, "I/O submission failed: %d\n", ret);
- return ret;
- }
-
- atomic_dec(&pblk->inflight_io);
-
- /* This should not happen since the read failed during normal recovery,
- * but the media works funny sometimes...
- */
- if (!rec_round++ && !rqd->error) {
- rec_round = 0;
- for (i = 0; i < rqd->nr_ppas; i++, r_ptr++) {
- u64 lba = le64_to_cpu(meta_list[i].lba);
-
- if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
- continue;
-
- pblk_update_map(pblk, lba, rqd->ppa_list[i]);
- }
- }
-
- /* Reached the end of the written line */
- if (rqd->error == NVM_RSP_ERR_EMPTYPAGE) {
- int pad_secs, nr_error_bits, bit;
- int ret;
-
- bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
- nr_error_bits = rqd->nr_ppas - bit;
-
- /* Roll back failed sectors */
- line->cur_sec -= nr_error_bits;
- line->left_msecs += nr_error_bits;
- bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
-
- pad_secs = pblk_pad_distance(pblk);
- if (pad_secs > line->left_msecs)
- pad_secs = line->left_msecs;
-
- ret = pblk_recov_pad_oob(pblk, line, pad_secs);
- if (ret)
- pblk_err(pblk, "OOB padding failed (err:%d)\n", ret);
-
- ret = pblk_recov_read_oob(pblk, line, p, r_ptr);
- if (ret)
- pblk_err(pblk, "OOB read failed (err:%d)\n", ret);
-
- left_ppas = 0;
+ if (chunk->wp > line_wp)
+ return 1;
+ else if (chunk->wp < line_wp)
+ line_wp = chunk->wp;
}
- left_ppas -= rq_ppas;
- if (left_ppas > 0)
- goto next_rq;
-
- return ret;
+ return 0;
}
static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
- struct pblk_recov_alloc p, int *done)
+ struct pblk_recov_alloc p)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo;
@@ -528,11 +341,16 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
struct bio *bio;
void *data;
dma_addr_t dma_ppa_list, dma_meta_list;
- u64 paddr;
+ __le64 *lba_list;
+ u64 paddr = 0;
+ bool padded = false;
int rq_ppas, rq_len;
int i, j;
- int ret = 0;
- int left_ppas = pblk_calc_sec_in_line(pblk, line);
+ int ret;
+ u64 left_ppas = pblk_sec_in_open_line(pblk, line);
+
+ if (pblk_line_wp_is_unbalanced(pblk, line))
+ pblk_warn(pblk, "recovering unbalanced line (%d)\n", line->id);
ppa_list = p.ppa_list;
meta_list = p.meta_list;
@@ -541,7 +359,7 @@ static int pblk_recov_scan_oob(struct pblk *pblk, struct pblk_line *line,
dma_ppa_list = p.dma_ppa_list;
dma_meta_list = p.dma_meta_list;
- *done = 1;
+ lba_list = emeta_to_lbas(pblk, line->emeta->buf);
next_rq:
memset(rqd, 0, pblk_g_rq_size);
@@ -567,15 +385,13 @@ next_rq:
rqd->dma_meta_list = dma_meta_list;
if (pblk_io_aligned(pblk, rq_ppas))
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_SEQUENTIAL);
- else
- rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM);
+ rqd->is_seq = 1;
+retry_rq:
for (i = 0; i < rqd->nr_ppas; ) {
struct ppa_addr ppa;
int pos;
- paddr = pblk_alloc_page(pblk, line, pblk->min_write_pgs);
ppa = addr_to_gen_ppa(pblk, paddr, line->id);
pos = pblk_ppa_to_pos(geo, ppa);
@@ -585,9 +401,9 @@ next_rq:
pos = pblk_ppa_to_pos(geo, ppa);
}
- for (j = 0; j < pblk->min_write_pgs; j++, i++, paddr++)
+ for (j = 0; j < pblk->min_write_pgs; j++, i++)
rqd->ppa_list[i] =
- addr_to_gen_ppa(pblk, paddr, line->id);
+ addr_to_gen_ppa(pblk, paddr + j, line->id);
}
ret = pblk_submit_io_sync(pblk, rqd);
@@ -599,31 +415,33 @@ next_rq:
atomic_dec(&pblk->inflight_io);
- /* Reached the end of the written line */
+ /* If a read fails, do a best effort by padding the line and retrying */
if (rqd->error) {
- int nr_error_bits, bit;
+ int pad_distance, ret;
- bit = find_first_bit((void *)&rqd->ppa_status, rqd->nr_ppas);
- nr_error_bits = rqd->nr_ppas - bit;
-
- /* Roll back failed sectors */
- line->cur_sec -= nr_error_bits;
- line->left_msecs += nr_error_bits;
- bitmap_clear(line->map_bitmap, line->cur_sec, nr_error_bits);
+ if (padded) {
+ pblk_log_read_err(pblk, rqd);
+ return -EINTR;
+ }
- left_ppas = 0;
- rqd->nr_ppas = bit;
+ pad_distance = pblk_pad_distance(pblk, line);
+ ret = pblk_recov_pad_line(pblk, line, pad_distance);
+ if (ret)
+ return ret;
- if (rqd->error != NVM_RSP_ERR_EMPTYPAGE)
- *done = 0;
+ padded = true;
+ goto retry_rq;
}
for (i = 0; i < rqd->nr_ppas; i++) {
u64 lba = le64_to_cpu(meta_list[i].lba);
+ lba_list[paddr++] = cpu_to_le64(lba);
+
if (lba == ADDR_EMPTY || lba > pblk->rl.nr_secs)
continue;
+ line->nr_valid_lbas++;
pblk_update_map(pblk, lba, rqd->ppa_list[i]);
}
@@ -631,7 +449,11 @@ next_rq:
if (left_ppas > 0)
goto next_rq;
- return ret;
+#ifdef CONFIG_NVM_PBLK_DEBUG
+ WARN_ON(padded && !pblk_line_is_full(line));
+#endif
+
+ return 0;
}
/* Scan line for lbas on out of bound area */
@@ -645,7 +467,7 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
struct pblk_recov_alloc p;
void *data;
dma_addr_t dma_ppa_list, dma_meta_list;
- int done, ret = 0;
+ int ret = 0;
meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list);
if (!meta_list)
@@ -660,7 +482,8 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
goto free_meta_list;
}
- rqd = pblk_alloc_rqd(pblk, PBLK_READ);
+ rqd = mempool_alloc(&pblk->r_rq_pool, GFP_KERNEL);
+ memset(rqd, 0, pblk_g_rq_size);
p.ppa_list = ppa_list;
p.meta_list = meta_list;
@@ -669,24 +492,17 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line)
p.dma_ppa_list = dma_ppa_list;
p.dma_meta_list = dma_meta_list;
- ret = pblk_recov_scan_oob(pblk, line, p, &done);
+ ret = pblk_recov_scan_oob(pblk, line, p);
if (ret) {
- pblk_err(pblk, "could not recover L2P from OOB\n");
+ pblk_err(pblk, "could not recover L2P form OOB\n");
goto out;
}
- if (!done) {
- ret = pblk_recov_scan_all_oob(pblk, line, p);
- if (ret) {
- pblk_err(pblk, "could not recover L2P from OOB\n");
- goto out;
- }
- }
-
if (pblk_line_is_full(line))
pblk_line_recov_close(pblk, line);
out:
+ mempool_free(rqd, &pblk->r_rq_pool);
kfree(data);
free_meta_list:
nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list);
@@ -775,7 +591,7 @@ static void pblk_recov_wa_counters(struct pblk *pblk,
}
static int pblk_line_was_written(struct pblk_line *line,
- struct pblk *pblk)
+ struct pblk *pblk)
{
struct pblk_line_meta *lm = &pblk->lm;
@@ -801,6 +617,18 @@ static int pblk_line_was_written(struct pblk_line *line,
return 1;
}
+static bool pblk_line_is_open(struct pblk *pblk, struct pblk_line *line)
+{
+ struct pblk_line_meta *lm = &pblk->lm;
+ int i;
+
+ for (i = 0; i < lm->blk_per_line; i++)
+ if (line->chks[i].state & NVM_CHK_ST_OPEN)
+ return true;
+
+ return false;
+}
+
struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
{
struct pblk_line_meta *lm = &pblk->lm;
@@ -841,7 +669,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
continue;
/* Lines that cannot be read are assumed as not written here */
- if (pblk_line_read_smeta(pblk, line))
+ if (pblk_line_smeta_read(pblk, line))
continue;
crc = pblk_calc_smeta_crc(pblk, smeta_buf);
@@ -911,7 +739,12 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk)
line->emeta = emeta;
memset(line->emeta->buf, 0, lm->emeta_len[0]);
- if (pblk_line_read_emeta(pblk, line, line->emeta->buf)) {
+ if (pblk_line_is_open(pblk, line)) {
+ pblk_recov_l2p_from_oob(pblk, line);
+ goto next;
+ }
+
+ if (pblk_line_emeta_read(pblk, line, line->emeta->buf)) {
pblk_recov_l2p_from_oob(pblk, line);
goto next;
}
@@ -935,6 +768,8 @@ next:
spin_lock(&line->lock);
line->state = PBLK_LINESTATE_CLOSED;
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
move_list = pblk_line_gc_list(pblk, line);
spin_unlock(&line->lock);
@@ -942,26 +777,36 @@ next:
list_move_tail(&line->list, move_list);
spin_unlock(&l_mg->gc_lock);
- kfree(line->map_bitmap);
+ mempool_free(line->map_bitmap, l_mg->bitmap_pool);
line->map_bitmap = NULL;
line->smeta = NULL;
line->emeta = NULL;
} else {
- if (open_lines > 1)
- pblk_err(pblk, "failed to recover L2P\n");
+ spin_lock(&line->lock);
+ line->state = PBLK_LINESTATE_OPEN;
+ spin_unlock(&line->lock);
+
+ line->emeta->mem = 0;
+ atomic_set(&line->emeta->sync, 0);
+
+ trace_pblk_line_state(pblk_disk_name(pblk), line->id,
+ line->state);
- open_lines++;
- line->meta_line = meta_line;
data_line = line;
+ line->meta_line = meta_line;
+
+ open_lines++;
}
}
- spin_lock(&l_mg->free_lock);
if (!open_lines) {
+ spin_lock(&l_mg->free_lock);
WARN_ON_ONCE(!test_and_clear_bit(meta_line,
&l_mg->meta_bitmap));
+ spin_unlock(&l_mg->free_lock);
pblk_line_replace_data(pblk);
} else {
+ spin_lock(&l_mg->free_lock);
/* Allocate next line for preparation */
l_mg->data_next = pblk_line_get(pblk);
if (l_mg->data_next) {
@@ -969,8 +814,8 @@ next:
l_mg->data_next->type = PBLK_LINETYPE_DATA;
is_next = 1;
}
+ spin_unlock(&l_mg->free_lock);
}
- spin_unlock(&l_mg->free_lock);
if (is_next)
pblk_line_erase(pblk, l_mg->data_next);
@@ -998,7 +843,7 @@ int pblk_recov_pad(struct pblk *pblk)
left_msecs = line->left_msecs;
spin_unlock(&l_mg->free_lock);
- ret = pblk_recov_pad_oob(pblk, line, left_msecs);
+ ret = pblk_recov_pad_line(pblk, line, left_msecs);
if (ret) {
pblk_err(pblk, "tear down padding failed (%d)\n", ret);
return ret;
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index 6a0616a6fcaf..db55a1c89997 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -127,7 +128,7 @@ static void __pblk_rl_update_rates(struct pblk_rl *rl,
} else if (free_blocks < rl->high) {
int shift = rl->high_pw - rl->rb_windows_pw;
int user_windows = free_blocks >> shift;
- int user_max = user_windows << PBLK_MAX_REQ_ADDRS_PW;
+ int user_max = user_windows << ilog2(NVM_MAX_VLBA);
rl->rb_user_max = user_max;
rl->rb_gc_max = max - user_max;
@@ -228,7 +229,7 @@ void pblk_rl_init(struct pblk_rl *rl, int budget)
rl->rsv_blocks = min_blocks;
/* This will always be a power-of-2 */
- rb_windows = budget / PBLK_MAX_REQ_ADDRS;
+ rb_windows = budget / NVM_MAX_VLBA;
rl->rb_windows_pw = get_count_order(rb_windows);
/* To start with, all buffer is available to user I/O writers */
diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c
index 9fc3dfa168b4..2d2818155aa8 100644
--- a/drivers/lightnvm/pblk-sysfs.c
+++ b/drivers/lightnvm/pblk-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -262,8 +263,14 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page)
sec_in_line = l_mg->data_line->sec_in_line;
meta_weight = bitmap_weight(&l_mg->meta_bitmap,
PBLK_DATA_LINES);
- map_weight = bitmap_weight(l_mg->data_line->map_bitmap,
+
+ spin_lock(&l_mg->data_line->lock);
+ if (l_mg->data_line->map_bitmap)
+ map_weight = bitmap_weight(l_mg->data_line->map_bitmap,
lm->sec_per_line);
+ else
+ map_weight = 0;
+ spin_unlock(&l_mg->data_line->lock);
}
spin_unlock(&l_mg->free_lock);
@@ -337,7 +344,6 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
{
int sz;
-
sz = snprintf(page, PAGE_SIZE,
"user:%lld gc:%lld pad:%lld WA:",
user, gc, pad);
@@ -349,7 +355,7 @@ static ssize_t pblk_get_write_amp(u64 user, u64 gc, u64 pad,
u32 wa_frac;
wa_int = (user + gc + pad) * 100000;
- wa_int = div_u64(wa_int, user);
+ wa_int = div64_u64(wa_int, user);
wa_int = div_u64_rem(wa_int, 100000, &wa_frac);
sz += snprintf(page + sz, PAGE_SIZE - sz, "%llu.%05u\n",
diff --git a/drivers/lightnvm/pblk-trace.h b/drivers/lightnvm/pblk-trace.h
new file mode 100644
index 000000000000..679e5c458ca6
--- /dev/null
+++ b/drivers/lightnvm/pblk-trace.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pblk
+
+#if !defined(_TRACE_PBLK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PBLK_H
+
+#include <linux/tracepoint.h>
+
+struct ppa_addr;
+
+#define show_chunk_flags(state) __print_flags(state, "", \
+ { NVM_CHK_ST_FREE, "FREE", }, \
+ { NVM_CHK_ST_CLOSED, "CLOSED", }, \
+ { NVM_CHK_ST_OPEN, "OPEN", }, \
+ { NVM_CHK_ST_OFFLINE, "OFFLINE", })
+
+#define show_line_state(state) __print_symbolic(state, \
+ { PBLK_LINESTATE_NEW, "NEW", }, \
+ { PBLK_LINESTATE_FREE, "FREE", }, \
+ { PBLK_LINESTATE_OPEN, "OPEN", }, \
+ { PBLK_LINESTATE_CLOSED, "CLOSED", }, \
+ { PBLK_LINESTATE_GC, "GC", }, \
+ { PBLK_LINESTATE_BAD, "BAD", }, \
+ { PBLK_LINESTATE_CORRUPT, "CORRUPT" })
+
+
+#define show_pblk_state(state) __print_symbolic(state, \
+ { PBLK_STATE_RUNNING, "RUNNING", }, \
+ { PBLK_STATE_STOPPING, "STOPPING", }, \
+ { PBLK_STATE_RECOVERING, "RECOVERING", }, \
+ { PBLK_STATE_STOPPED, "STOPPED" })
+
+#define show_chunk_erase_state(state) __print_symbolic(state, \
+ { PBLK_CHUNK_RESET_START, "START", }, \
+ { PBLK_CHUNK_RESET_DONE, "OK", }, \
+ { PBLK_CHUNK_RESET_FAILED, "FAILED" })
+
+
+TRACE_EVENT(pblk_chunk_reset,
+
+ TP_PROTO(const char *name, struct ppa_addr *ppa, int state),
+
+ TP_ARGS(name, ppa, state),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(u64, ppa)
+ __field(int, state);
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->ppa = ppa->ppa;
+ __entry->state = state;
+ ),
+
+ TP_printk("dev=%s grp=%llu pu=%llu chk=%llu state=%s", __get_str(name),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.grp),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.pu),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.chk),
+ show_chunk_erase_state((int)__entry->state))
+
+);
+
+TRACE_EVENT(pblk_chunk_state,
+
+ TP_PROTO(const char *name, struct ppa_addr *ppa, int state),
+
+ TP_ARGS(name, ppa, state),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(u64, ppa)
+ __field(int, state);
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->ppa = ppa->ppa;
+ __entry->state = state;
+ ),
+
+ TP_printk("dev=%s grp=%llu pu=%llu chk=%llu state=%s", __get_str(name),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.grp),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.pu),
+ (u64)(((struct ppa_addr *)(&__entry->ppa))->m.chk),
+ show_chunk_flags((int)__entry->state))
+
+);
+
+TRACE_EVENT(pblk_line_state,
+
+ TP_PROTO(const char *name, int line, int state),
+
+ TP_ARGS(name, line, state),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(int, line)
+ __field(int, state);
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->line = line;
+ __entry->state = state;
+ ),
+
+ TP_printk("dev=%s line=%d state=%s", __get_str(name),
+ (int)__entry->line,
+ show_line_state((int)__entry->state))
+
+);
+
+TRACE_EVENT(pblk_state,
+
+ TP_PROTO(const char *name, int state),
+
+ TP_ARGS(name, state),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ __field(int, state);
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ __entry->state = state;
+ ),
+
+ TP_printk("dev=%s state=%s", __get_str(name),
+ show_pblk_state((int)__entry->state))
+
+);
+
+#endif /* !defined(_TRACE_PBLK_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../../drivers/lightnvm
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE pblk-trace
+#include <trace/define_trace.h>
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index ee774a86cf1e..fa8726493b39 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2016 CNEX Labs
* Initial release: Javier Gonzalez <javier@cnexlabs.com>
@@ -16,6 +17,7 @@
*/
#include "pblk.h"
+#include "pblk-trace.h"
static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx)
@@ -81,8 +83,7 @@ static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
#ifdef CONFIG_NVM_PBLK_DEBUG
atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
#endif
-
- pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
+ pblk_up_rq(pblk, c_ctx->lun_bitmap);
pos = pblk_rb_sync_init(&pblk->rwb, &flags);
if (pos == c_ctx->sentry) {
@@ -106,14 +107,12 @@ retry:
/* Map remaining sectors in chunk, starting from ppa */
static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
struct pblk_line *line;
struct ppa_addr map_ppa = *ppa;
u64 paddr;
int done = 0;
- line = &pblk->lines[pblk_ppa_to_line(*ppa)];
+ line = pblk_ppa_to_line(pblk, *ppa);
spin_lock(&line->lock);
while (!done) {
@@ -125,15 +124,7 @@ static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa)
if (!test_and_set_bit(paddr, line->invalid_bitmap))
le32_add_cpu(line->vsc, -1);
- if (geo->version == NVM_OCSSD_SPEC_12) {
- map_ppa.ppa++;
- if (map_ppa.g.pg == geo->num_pg)
- done = 1;
- } else {
- map_ppa.m.sec++;
- if (map_ppa.m.sec == geo->clba)
- done = 1;
- }
+ done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
}
line->w_err_gc->has_write_err = 1;
@@ -149,12 +140,11 @@ static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
struct pblk_w_ctx *w_ctx;
struct ppa_addr ppa_l2p;
int flags;
- unsigned int pos, i;
+ unsigned int i;
spin_lock(&pblk->trans_lock);
- pos = sentry;
for (i = 0; i < nr_entries; i++) {
- entry = &rb->entries[pos];
+ entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
w_ctx = &entry->w_ctx;
/* Check if the lba has been overwritten */
@@ -168,13 +158,11 @@ static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
/* Release flags on write context. Protect from writes */
smp_store_release(&w_ctx->flags, flags);
- /* Decrese the reference count to the line as we will
+ /* Decrease the reference count to the line as we will
* re-map these entries
*/
- line = &pblk->lines[pblk_ppa_to_line(w_ctx->ppa)];
+ line = pblk_ppa_to_line(pblk, w_ctx->ppa);
kref_put(&line->ref, pblk_line_put);
-
- pos = (pos + 1) & (rb->nr_entries - 1);
}
spin_unlock(&pblk->trans_lock);
}
@@ -208,19 +196,14 @@ static void pblk_submit_rec(struct work_struct *work)
struct pblk *pblk = recovery->pblk;
struct nvm_rq *rqd = recovery->rqd;
struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
- struct ppa_addr *ppa_list;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
pblk_log_write_err(pblk, rqd);
- if (rqd->nr_ppas == 1)
- ppa_list = &rqd->ppa_addr;
- else
- ppa_list = rqd->ppa_list;
-
pblk_map_remaining(pblk, ppa_list);
pblk_queue_resubmit(pblk, c_ctx);
- pblk_up_rq(pblk, rqd->ppa_list, rqd->nr_ppas, c_ctx->lun_bitmap);
+ pblk_up_rq(pblk, c_ctx->lun_bitmap);
if (c_ctx->nr_padded)
pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
c_ctx->nr_padded);
@@ -257,11 +240,13 @@ static void pblk_end_io_write(struct nvm_rq *rqd)
if (rqd->error) {
pblk_end_w_fail(pblk, rqd);
return;
- }
+ } else {
+ if (trace_pblk_chunk_state_enabled())
+ pblk_check_chunk_state_update(pblk, rqd);
#ifdef CONFIG_NVM_PBLK_DEBUG
- else
WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
#endif
+ }
pblk_complete_write(pblk, rqd, c_ctx);
atomic_dec(&pblk->inflight_io);
@@ -273,14 +258,18 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
struct pblk_line *line = m_ctx->private;
struct pblk_emeta *emeta = line->emeta;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
int sync;
- pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ pblk_up_chunk(pblk, ppa_list[0]);
if (rqd->error) {
pblk_log_write_err(pblk, rqd);
pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
line->w_err_gc->has_write_err = 1;
+ } else {
+ if (trace_pblk_chunk_state_enabled())
+ pblk_check_chunk_state_update(pblk, rqd);
}
sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
@@ -294,27 +283,16 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd)
}
static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
- unsigned int nr_secs,
- nvm_end_io_fn(*end_io))
+ unsigned int nr_secs, nvm_end_io_fn(*end_io))
{
- struct nvm_tgt_dev *dev = pblk->dev;
-
/* Setup write request */
rqd->opcode = NVM_OP_PWRITE;
rqd->nr_ppas = nr_secs;
- rqd->flags = pblk_set_progr_mode(pblk, PBLK_WRITE);
+ rqd->is_seq = 1;
rqd->private = pblk;
rqd->end_io = end_io;
- rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL,
- &rqd->dma_meta_list);
- if (!rqd->meta_list)
- return -ENOMEM;
-
- rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size;
- rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size;
-
- return 0;
+ return pblk_alloc_rqd_meta(pblk, rqd);
}
static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
@@ -375,6 +353,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
struct pblk_emeta *emeta = meta_line->emeta;
+ struct ppa_addr *ppa_list;
struct pblk_g_ctx *m_ctx;
struct bio *bio;
struct nvm_rq *rqd;
@@ -409,22 +388,22 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
if (ret)
goto fail_free_bio;
+ ppa_list = nvm_rq_to_ppa_list(rqd);
for (i = 0; i < rqd->nr_ppas; ) {
spin_lock(&meta_line->lock);
paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
spin_unlock(&meta_line->lock);
for (j = 0; j < rq_ppas; j++, i++, paddr++)
- rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
+ ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
}
+ spin_lock(&l_mg->close_lock);
emeta->mem += rq_len;
- if (emeta->mem >= lm->emeta_len[0]) {
- spin_lock(&l_mg->close_lock);
+ if (emeta->mem >= lm->emeta_len[0])
list_del(&meta_line->list);
- spin_unlock(&l_mg->close_lock);
- }
+ spin_unlock(&l_mg->close_lock);
- pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ pblk_down_chunk(pblk, ppa_list[0]);
ret = pblk_submit_io(pblk, rqd);
if (ret) {
@@ -435,7 +414,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
return NVM_IO_OK;
fail_rollback:
- pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas);
+ pblk_up_chunk(pblk, ppa_list[0]);
spin_lock(&l_mg->close_lock);
pblk_dealloc_page(pblk, meta_line, rq_ppas);
list_add(&meta_line->list, &meta_line->list);
@@ -491,14 +470,15 @@ static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
struct pblk_line *meta_line;
spin_lock(&l_mg->close_lock);
-retry:
if (list_empty(&l_mg->emeta_list)) {
spin_unlock(&l_mg->close_lock);
return NULL;
}
meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
- if (meta_line->emeta->mem >= lm->emeta_len[0])
- goto retry;
+ if (meta_line->emeta->mem >= lm->emeta_len[0]) {
+ spin_unlock(&l_mg->close_lock);
+ return NULL;
+ }
spin_unlock(&l_mg->close_lock);
if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 4760af7b6499..02bb2e98f8a9 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2015 IT University of Copenhagen (rrpc.h)
* Copyright (C) 2016 CNEX Labs
@@ -37,8 +38,6 @@
#define PBLK_SECTOR (512)
#define PBLK_EXPOSED_PAGE_SIZE (4096)
-#define PBLK_MAX_REQ_ADDRS (64)
-#define PBLK_MAX_REQ_ADDRS_PW (6)
#define PBLK_NR_CLOSE_JOBS (4)
@@ -81,6 +80,12 @@ enum {
PBLK_BLK_ST_CLOSED = 0x2,
};
+enum {
+ PBLK_CHUNK_RESET_START,
+ PBLK_CHUNK_RESET_DONE,
+ PBLK_CHUNK_RESET_FAILED,
+};
+
struct pblk_sec_meta {
u64 reserved;
__le64 lba;
@@ -99,8 +104,8 @@ enum {
PBLK_RL_LOW = 4
};
-#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * PBLK_MAX_REQ_ADDRS)
-#define pblk_dma_ppa_size (sizeof(u64) * PBLK_MAX_REQ_ADDRS)
+#define pblk_dma_meta_size (sizeof(struct pblk_sec_meta) * NVM_MAX_VLBA)
+#define pblk_dma_ppa_size (sizeof(u64) * NVM_MAX_VLBA)
/* write buffer completion context */
struct pblk_c_ctx {
@@ -198,6 +203,11 @@ struct pblk_rb {
* will be 4KB
*/
+ unsigned int back_thres; /* Threshold that shall be maintained by
+ * the backpointer in order to respect
+ * geo->mw_cunits on a per chunk basis
+ */
+
struct list_head pages; /* List of data pages */
spinlock_t w_lock; /* Write lock */
@@ -218,8 +228,8 @@ struct pblk_lun {
struct pblk_gc_rq {
struct pblk_line *line;
void *data;
- u64 paddr_list[PBLK_MAX_REQ_ADDRS];
- u64 lba_list[PBLK_MAX_REQ_ADDRS];
+ u64 paddr_list[NVM_MAX_VLBA];
+ u64 lba_list[NVM_MAX_VLBA];
int nr_secs;
int secs_to_gc;
struct list_head list;
@@ -532,6 +542,10 @@ struct pblk_line_mgmt {
struct pblk_emeta *eline_meta[PBLK_DATA_LINES];
unsigned long meta_bitmap;
+ /* Cache and mempool for map/invalid bitmaps */
+ struct kmem_cache *bitmap_cache;
+ mempool_t *bitmap_pool;
+
/* Helpers for fast bitmap calculations */
unsigned long *bb_template;
unsigned long *bb_aux;
@@ -725,10 +739,8 @@ struct pblk_line_ws {
/*
* pblk ring buffer operations
*/
-int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
- unsigned int power_size, unsigned int power_seg_sz);
-unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
-void *pblk_rb_entries_ref(struct pblk_rb *rb);
+int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
+ unsigned int seg_sz);
int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
unsigned int nr_entries, unsigned int *pos);
int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
@@ -751,8 +763,8 @@ unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries);
-struct pblk_rb_entry *pblk_rb_sync_scan_entry(struct pblk_rb *rb,
- struct ppa_addr *ppa);
+unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
+ unsigned int nr_entries);
void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags);
unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb);
@@ -762,7 +774,7 @@ unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
int pblk_rb_tear_down_check(struct pblk_rb *rb);
int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
-void pblk_rb_data_free(struct pblk_rb *rb);
+void pblk_rb_free(struct pblk_rb *rb);
ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
/*
@@ -770,11 +782,13 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
*/
struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type);
void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type);
+int pblk_alloc_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
+void pblk_free_rqd_meta(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_set_sec_per_write(struct pblk *pblk, int sec_per_write);
int pblk_setup_w_rec_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct pblk_c_ctx *c_ctx);
void pblk_discard(struct pblk *pblk, struct bio *bio);
-struct nvm_chk_meta *pblk_chunk_get_info(struct pblk *pblk);
+struct nvm_chk_meta *pblk_get_chunk_meta(struct pblk *pblk);
struct nvm_chk_meta *pblk_chunk_get_off(struct pblk *pblk,
struct nvm_chk_meta *lp,
struct ppa_addr ppa);
@@ -782,13 +796,17 @@ void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd);
void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd);
+int pblk_submit_io_sync_sem(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line);
+void pblk_check_chunk_state_update(struct pblk *pblk, struct nvm_rq *rqd);
struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data,
unsigned int nr_secs, unsigned int len,
int alloc_type, gfp_t gfp_mask);
struct pblk_line *pblk_line_get(struct pblk *pblk);
struct pblk_line *pblk_line_get_first_data(struct pblk *pblk);
struct pblk_line *pblk_line_replace_data(struct pblk *pblk);
+void pblk_ppa_to_line_put(struct pblk *pblk, struct ppa_addr ppa);
+void pblk_rq_to_line_put(struct pblk *pblk, struct nvm_rq *rqd);
int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line);
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line);
struct pblk_line *pblk_line_get_data(struct pblk *pblk);
@@ -806,8 +824,8 @@ void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
void (*work)(struct work_struct *), gfp_t gfp_mask,
struct workqueue_struct *wq);
u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
-int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,
+int pblk_line_smeta_read(struct pblk *pblk, struct pblk_line *line);
+int pblk_line_emeta_read(struct pblk *pblk, struct pblk_line *line,
void *emeta_buf);
int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr erase_ppa);
void pblk_line_put(struct kref *ref);
@@ -819,12 +837,11 @@ u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs);
int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail,
unsigned long secs_to_flush);
-void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
-void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
+void pblk_down_rq(struct pblk *pblk, struct ppa_addr ppa,
unsigned long *lun_bitmap);
-void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas);
-void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas,
- unsigned long *lun_bitmap);
+void pblk_down_chunk(struct pblk *pblk, struct ppa_addr ppa);
+void pblk_up_chunk(struct pblk *pblk, struct ppa_addr ppa);
+void pblk_up_rq(struct pblk *pblk, unsigned long *lun_bitmap);
int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
int nr_pages);
void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
@@ -976,17 +993,15 @@ static inline int pblk_line_vsc(struct pblk_line *line)
return le32_to_cpu(*line->vsc);
}
-static inline int pblk_pad_distance(struct pblk *pblk)
+static inline int pblk_ppa_to_line_id(struct ppa_addr p)
{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
-
- return geo->mw_cunits * geo->all_luns * geo->ws_opt;
+ return p.a.blk;
}
-static inline int pblk_ppa_to_line(struct ppa_addr p)
+static inline struct pblk_line *pblk_ppa_to_line(struct pblk *pblk,
+ struct ppa_addr p)
{
- return p.a.blk;
+ return &pblk->lines[pblk_ppa_to_line_id(p)];
}
static inline int pblk_ppa_to_pos(struct nvm_geo *geo, struct ppa_addr p)
@@ -1034,6 +1049,25 @@ static inline struct ppa_addr addr_to_gen_ppa(struct pblk *pblk, u64 paddr,
return ppa;
}
+static inline struct nvm_chk_meta *pblk_dev_ppa_to_chunk(struct pblk *pblk,
+ struct ppa_addr p)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+ struct nvm_geo *geo = &dev->geo;
+ struct pblk_line *line = pblk_ppa_to_line(pblk, p);
+ int pos = pblk_ppa_to_pos(geo, p);
+
+ return &line->chks[pos];
+}
+
+static inline u64 pblk_dev_ppa_to_chunk_addr(struct pblk *pblk,
+ struct ppa_addr p)
+{
+ struct nvm_tgt_dev *dev = pblk->dev;
+
+ return dev_to_chunk_addr(dev->parent, &pblk->addrf, p);
+}
+
static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
struct ppa_addr p)
{
@@ -1067,86 +1101,16 @@ static inline u64 pblk_dev_ppa_to_line_addr(struct pblk *pblk,
static inline struct ppa_addr pblk_ppa32_to_ppa64(struct pblk *pblk, u32 ppa32)
{
- struct ppa_addr ppa64;
-
- ppa64.ppa = 0;
-
- if (ppa32 == -1) {
- ppa64.ppa = ADDR_EMPTY;
- } else if (ppa32 & (1U << 31)) {
- ppa64.c.line = ppa32 & ((~0U) >> 1);
- ppa64.c.is_cached = 1;
- } else {
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf =
- (struct nvm_addrf_12 *)&pblk->addrf;
-
- ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
- ppaf->ch_offset;
- ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
- ppaf->lun_offset;
- ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
- ppaf->blk_offset;
- ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
- ppaf->pg_offset;
- ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
- ppaf->pln_offset;
- ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
- ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = &pblk->addrf;
-
- ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
- lbaf->ch_offset;
- ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
- lbaf->lun_offset;
- ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
- lbaf->chk_offset;
- ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
- lbaf->sec_offset;
- }
- }
+ struct nvm_tgt_dev *dev = pblk->dev;
- return ppa64;
+ return nvm_ppa32_to_ppa64(dev->parent, &pblk->addrf, ppa32);
}
static inline u32 pblk_ppa64_to_ppa32(struct pblk *pblk, struct ppa_addr ppa64)
{
- u32 ppa32 = 0;
-
- if (ppa64.ppa == ADDR_EMPTY) {
- ppa32 = ~0U;
- } else if (ppa64.c.is_cached) {
- ppa32 |= ppa64.c.line;
- ppa32 |= 1U << 31;
- } else {
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
-
- if (geo->version == NVM_OCSSD_SPEC_12) {
- struct nvm_addrf_12 *ppaf =
- (struct nvm_addrf_12 *)&pblk->addrf;
-
- ppa32 |= ppa64.g.ch << ppaf->ch_offset;
- ppa32 |= ppa64.g.lun << ppaf->lun_offset;
- ppa32 |= ppa64.g.blk << ppaf->blk_offset;
- ppa32 |= ppa64.g.pg << ppaf->pg_offset;
- ppa32 |= ppa64.g.pl << ppaf->pln_offset;
- ppa32 |= ppa64.g.sec << ppaf->sec_offset;
- } else {
- struct nvm_addrf *lbaf = &pblk->addrf;
-
- ppa32 |= ppa64.m.grp << lbaf->ch_offset;
- ppa32 |= ppa64.m.pu << lbaf->lun_offset;
- ppa32 |= ppa64.m.chk << lbaf->chk_offset;
- ppa32 |= ppa64.m.sec << lbaf->sec_offset;
- }
- }
+ struct nvm_tgt_dev *dev = pblk->dev;
- return ppa32;
+ return nvm_ppa64_to_ppa32(dev->parent, &pblk->addrf, ppa64);
}
static inline struct ppa_addr pblk_trans_map_get(struct pblk *pblk,
@@ -1255,44 +1219,6 @@ static inline u32 pblk_calc_emeta_crc(struct pblk *pblk,
return crc;
}
-static inline int pblk_set_progr_mode(struct pblk *pblk, int type)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- int flags;
-
- if (geo->version == NVM_OCSSD_SPEC_20)
- return 0;
-
- flags = geo->pln_mode >> 1;
-
- if (type == PBLK_WRITE)
- flags |= NVM_IO_SCRAMBLE_ENABLE;
-
- return flags;
-}
-
-enum {
- PBLK_READ_RANDOM = 0,
- PBLK_READ_SEQUENTIAL = 1,
-};
-
-static inline int pblk_set_read_mode(struct pblk *pblk, int type)
-{
- struct nvm_tgt_dev *dev = pblk->dev;
- struct nvm_geo *geo = &dev->geo;
- int flags;
-
- if (geo->version == NVM_OCSSD_SPEC_20)
- return 0;
-
- flags = NVM_IO_SUSPEND | NVM_IO_SCRAMBLE_ENABLE;
- if (type == PBLK_READ_SEQUENTIAL)
- flags |= geo->pln_mode >> 1;
-
- return flags;
-}
-
static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs)
{
return !(nr_secs % pblk->min_write_pgs);
@@ -1375,9 +1301,7 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev,
static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
{
struct nvm_tgt_dev *dev = pblk->dev;
- struct ppa_addr *ppa_list;
-
- ppa_list = (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+ struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
if (pblk_boundary_ppa_checks(dev, ppa_list, rqd->nr_ppas)) {
WARN_ON(1);
@@ -1386,12 +1310,10 @@ static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd)
if (rqd->opcode == NVM_OP_PWRITE) {
struct pblk_line *line;
- struct ppa_addr ppa;
int i;
for (i = 0; i < rqd->nr_ppas; i++) {
- ppa = ppa_list[i];
- line = &pblk->lines[pblk_ppa_to_line(ppa)];
+ line = pblk_ppa_to_line(pblk, ppa_list[i]);
spin_lock(&line->lock);
if (line->state != PBLK_LINESTATE_OPEN) {
@@ -1441,4 +1363,11 @@ static inline void pblk_setup_uuid(struct pblk *pblk)
uuid_le_gen(&uuid);
memcpy(pblk->instance_uuid, uuid.b, 16);
}
+
+static inline char *pblk_disk_name(struct pblk *pblk)
+{
+ struct gendisk *disk = pblk->disk;
+
+ return disk->disk_name;
+}
#endif /* PBLK_H_ */
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 7a28232d868b..5002838ea476 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -484,7 +484,7 @@ int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
int i;
lockdep_assert_held(&c->bucket_lock);
- BUG_ON(!n || n > c->caches_loaded || n > 8);
+ BUG_ON(!n || n > c->caches_loaded || n > MAX_CACHES_PER_SET);
bkey_init(k);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 83504dd8100a..b61b83bbcfff 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
extern struct workqueue_struct *bcache_wq;
+extern struct workqueue_struct *bch_journal_wq;
extern struct mutex bch_register_lock;
extern struct list_head bch_cache_sets;
@@ -1003,7 +1004,7 @@ void bch_open_buckets_free(struct cache_set *c);
int bch_cache_allocator_start(struct cache *ca);
void bch_debug_exit(void);
-void bch_debug_init(struct kobject *kobj);
+void bch_debug_init(void);
void bch_request_exit(void);
int bch_request_init(void);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index e7d4817681f2..3f4211b5cd33 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2434,7 +2434,7 @@ static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
struct keybuf *buf = refill->buf;
int ret = MAP_CONTINUE;
- if (bkey_cmp(k, refill->end) >= 0) {
+ if (bkey_cmp(k, refill->end) > 0) {
ret = MAP_DONE;
goto out;
}
diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
index eca0d496b686..c88cdc4ae4ec 100644
--- a/drivers/md/bcache/closure.h
+++ b/drivers/md/bcache/closure.h
@@ -345,7 +345,8 @@ do { \
} while (0)
/**
- * closure_return - finish execution of a closure, with destructor
+ * closure_return_with_destructor - finish execution of a closure,
+ * with destructor
*
* Works like closure_return(), except @destructor will be called when all
* outstanding refs on @cl have been dropped; @destructor may be used to safely
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 06da66b2488a..8f448b9c96a1 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -253,7 +253,7 @@ void bch_debug_exit(void)
debugfs_remove_recursive(bcache_debug);
}
-void __init bch_debug_init(struct kobject *kobj)
+void __init bch_debug_init(void)
{
/*
* it is unnecessary to check return value of
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index c809724e6571..956004366699 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -553,7 +553,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
for (i = 0; i < KEY_PTRS(k); i++) {
stale = ptr_stale(b->c, k, i);
- btree_bug_on(stale > 96, b,
+ btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
"key too stale: %i, need_gc %u",
stale, b->c->need_gc);
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6116bbf870d8..522c7426f3a0 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
closure_get(&ca->set->cl);
INIT_WORK(&ja->discard_work, journal_discard_work);
- schedule_work(&ja->discard_work);
+ queue_work(bch_journal_wq, &ja->discard_work);
}
}
@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
: &j->w[0];
__closure_wake_up(&w->wait);
- continue_at_nobarrier(cl, journal_write, system_wq);
+ continue_at_nobarrier(cl, journal_write, bch_journal_wq);
}
static void journal_write_unlock(struct closure *cl)
@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
spin_unlock(&c->journal.lock);
btree_flush_write(c);
- continue_at(cl, journal_write, system_wq);
+ continue_at(cl, journal_write, bch_journal_wq);
return;
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 51be355a3309..3bf35914bb57 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -395,7 +395,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
* unless the read-ahead request is for metadata (eg, for gfs2).
*/
if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
- !(bio->bi_opf & REQ_META))
+ !(bio->bi_opf & REQ_PRIO))
goto skip;
if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
@@ -850,7 +850,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
bch_mark_cache_accounting(s->iop.c, s->d,
!s->cache_missed, s->iop.bypass);
- trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
+ trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass);
if (s->iop.status)
continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
@@ -877,7 +877,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
}
if (!(bio->bi_opf & REQ_RAHEAD) &&
- !(bio->bi_opf & REQ_META) &&
+ !(bio->bi_opf & REQ_PRIO) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9,
get_capacity(bio->bi_disk) - bio_end_sector(bio));
@@ -1218,6 +1218,9 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
{
struct cached_dev *dc = container_of(d, struct cached_dev, disk);
+ if (dc->io_disable)
+ return -EIO;
+
return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
}
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index aa055cfeb099..721bf336ed1a 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -39,6 +39,6 @@ void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
void bch_flash_dev_request_init(struct bcache_device *d);
-extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
+extern struct kmem_cache *bch_search_cache;
#endif /* _BCACHE_REQUEST_H_ */
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 94c756c66bd7..7bbd670a5a84 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -47,6 +47,7 @@ static int bcache_major;
static DEFINE_IDA(bcache_device_idx);
static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq;
+struct workqueue_struct *bch_journal_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
/* limitation of partitions number on single bcache device */
@@ -417,6 +418,7 @@ static int __uuid_write(struct cache_set *c)
{
BKEY_PADDED(key) k;
struct closure cl;
+ struct cache *ca;
closure_init_stack(&cl);
lockdep_assert_held(&bch_register_lock);
@@ -428,6 +430,10 @@ static int __uuid_write(struct cache_set *c)
uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
closure_sync(&cl);
+ /* Only one bucket used for uuid write */
+ ca = PTR_CACHE(c, &k.key, 0);
+ atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
+
bkey_copy(&c->uuid_bucket, &k.key);
bkey_put(c, &k.key);
return 0;
@@ -642,10 +648,6 @@ static int ioctl_dev(struct block_device *b, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct bcache_device *d = b->bd_disk->private_data;
- struct cached_dev *dc = container_of(d, struct cached_dev, disk);
-
- if (dc->io_disable)
- return -EIO;
return d->ioctl(d, mode, cmd, arg);
}
@@ -1007,6 +1009,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
bch_write_bdev_super(dc, &cl);
closure_sync(&cl);
+ calc_cached_dev_sectors(dc->disk.c);
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
@@ -1151,11 +1154,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
}
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
bch_writeback_queue(dc);
}
+ bch_sectors_dirty_init(&dc->disk);
+
bch_cached_dev_run(dc);
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
@@ -2048,6 +2052,8 @@ static int cache_alloc(struct cache *ca)
size_t free;
size_t btree_buckets;
struct bucket *b;
+ int ret = -ENOMEM;
+ const char *err = NULL;
__module_get(THIS_MODULE);
kobject_init(&ca->kobj, &bch_cache_ktype);
@@ -2065,27 +2071,93 @@ static int cache_alloc(struct cache *ca)
*/
btree_buckets = ca->sb.njournal_buckets ?: 8;
free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
+ if (!free) {
+ ret = -EPERM;
+ err = "ca->sb.nbuckets is too small";
+ goto err_free;
+ }
- if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
- !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
- !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
- !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
- !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
- !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
- !(ca->buckets = vzalloc(array_size(sizeof(struct bucket),
- ca->sb.nbuckets))) ||
- !(ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
- prio_buckets(ca), 2),
- GFP_KERNEL)) ||
- !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)))
- return -ENOMEM;
+ if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
+ GFP_KERNEL)) {
+ err = "ca->free[RESERVE_BTREE] alloc failed";
+ goto err_btree_alloc;
+ }
+
+ if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
+ GFP_KERNEL)) {
+ err = "ca->free[RESERVE_PRIO] alloc failed";
+ goto err_prio_alloc;
+ }
+
+ if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
+ err = "ca->free[RESERVE_MOVINGGC] alloc failed";
+ goto err_movinggc_alloc;
+ }
+
+ if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
+ err = "ca->free[RESERVE_NONE] alloc failed";
+ goto err_none_alloc;
+ }
+
+ if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
+ err = "ca->free_inc alloc failed";
+ goto err_free_inc_alloc;
+ }
+
+ if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
+ err = "ca->heap alloc failed";
+ goto err_heap_alloc;
+ }
+
+ ca->buckets = vzalloc(array_size(sizeof(struct bucket),
+ ca->sb.nbuckets));
+ if (!ca->buckets) {
+ err = "ca->buckets alloc failed";
+ goto err_buckets_alloc;
+ }
+
+ ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
+ prio_buckets(ca), 2),
+ GFP_KERNEL);
+ if (!ca->prio_buckets) {
+ err = "ca->prio_buckets alloc failed";
+ goto err_prio_buckets_alloc;
+ }
+
+ ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca);
+ if (!ca->disk_buckets) {
+ err = "ca->disk_buckets alloc failed";
+ goto err_disk_buckets_alloc;
+ }
ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
for_each_bucket(b, ca)
atomic_set(&b->pin, 0);
-
return 0;
+
+err_disk_buckets_alloc:
+ kfree(ca->prio_buckets);
+err_prio_buckets_alloc:
+ vfree(ca->buckets);
+err_buckets_alloc:
+ free_heap(&ca->heap);
+err_heap_alloc:
+ free_fifo(&ca->free_inc);
+err_free_inc_alloc:
+ free_fifo(&ca->free[RESERVE_NONE]);
+err_none_alloc:
+ free_fifo(&ca->free[RESERVE_MOVINGGC]);
+err_movinggc_alloc:
+ free_fifo(&ca->free[RESERVE_PRIO]);
+err_prio_alloc:
+ free_fifo(&ca->free[RESERVE_BTREE]);
+err_btree_alloc:
+err_free:
+ module_put(THIS_MODULE);
+ if (err)
+ pr_notice("error %s: %s", ca->cache_dev_name, err);
+ return ret;
}
static int register_cache(struct cache_sb *sb, struct page *sb_page,
@@ -2111,6 +2183,8 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
if (ret == -ENOMEM)
err = "cache_alloc(): -ENOMEM";
+ else if (ret == -EPERM)
+ err = "cache_alloc(): cache device is too small";
else
err = "cache_alloc(): unknown error";
goto err;
@@ -2341,6 +2415,9 @@ static void bcache_exit(void)
kobject_put(bcache_kobj);
if (bcache_wq)
destroy_workqueue(bcache_wq);
+ if (bch_journal_wq)
+ destroy_workqueue(bch_journal_wq);
+
if (bcache_major)
unregister_blkdev(bcache_major, "bcache");
unregister_reboot_notifier(&reboot);
@@ -2370,6 +2447,10 @@ static int __init bcache_init(void)
if (!bcache_wq)
goto err;
+ bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+ if (!bch_journal_wq)
+ goto err;
+
bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
if (!bcache_kobj)
goto err;
@@ -2378,7 +2459,7 @@ static int __init bcache_init(void)
sysfs_create_files(bcache_kobj, files))
goto err;
- bch_debug_init(bcache_kobj);
+ bch_debug_init();
closure_debug_init();
return 0;
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 150cf4f4cf74..26f035a0c5b9 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -285,6 +285,7 @@ STORE(__cached_dev)
1, WRITEBACK_RATE_UPDATE_SECS_MAX);
d_strtoul(writeback_rate_i_term_inverse);
d_strtoul_nonzero(writeback_rate_p_term_inverse);
+ d_strtoul_nonzero(writeback_rate_minimum);
sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
@@ -412,6 +413,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_update_seconds,
&sysfs_writeback_rate_i_term_inverse,
&sysfs_writeback_rate_p_term_inverse,
+ &sysfs_writeback_rate_minimum,
&sysfs_writeback_rate_debug,
&sysfs_errors,
&sysfs_io_error_limit,
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 69dddeab124c..5936de71883f 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1455,8 +1455,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
if (hints_valid) {
r = dm_array_cursor_next(&cmd->hint_cursor);
if (r) {
- DMERR("dm_array_cursor_next for hint failed");
- goto out;
+ dm_array_cursor_end(&cmd->hint_cursor);
+ hints_valid = false;
}
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index a53413371725..b29a8327eed1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3009,8 +3009,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
- if (from_cblock(new_size) > from_cblock(cache->cache_size))
- return true;
+ if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+ if (cache->sized) {
+ DMERR("%s: unable to extend cache due to missing cache table reload",
+ cache_device_name(cache));
+ return false;
+ }
+ }
/*
* We can't drop a dirty block when shrinking the cache.
@@ -3479,14 +3484,13 @@ static int __init dm_cache_init(void)
int r;
migration_cache = KMEM_CACHE(dm_cache_migration, 0);
- if (!migration_cache) {
- dm_unregister_target(&cache_target);
+ if (!migration_cache)
return -ENOMEM;
- }
r = dm_register_target(&cache_target);
if (r) {
DMERR("cache target registration failed: %d", r);
+ kmem_cache_destroy(migration_cache);
return r;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f266c81f396f..0481223b1deb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -332,7 +332,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
int err;
desc->tfm = essiv->hash_tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->flags = 0;
err = crypto_shash_digest(desc, cc->key, cc->key_size, essiv->salt);
shash_desc_zero(desc);
@@ -606,7 +606,7 @@ static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
int i, r;
desc->tfm = lmk->hash_tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->flags = 0;
r = crypto_shash_init(desc);
if (r)
@@ -768,7 +768,7 @@ static int crypt_iv_tcw_whitening(struct crypt_config *cc,
/* calculate crc32 for every 32bit part and xor it */
desc->tfm = tcw->crc32_tfm;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->flags = 0;
for (i = 0; i < 4; i++) {
r = crypto_shash_init(desc);
if (r)
@@ -1251,7 +1251,7 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc,
* requests if driver request queue is full.
*/
skcipher_request_set_callback(ctx->r.req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req));
}
@@ -1268,7 +1268,7 @@ static void crypt_alloc_req_aead(struct crypt_config *cc,
* requests if driver request queue is full.
*/
aead_request_set_callback(ctx->r.req_aead,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead));
}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 21d126a5078c..32aabe27b37c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -467,7 +467,9 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
.version = {1, 5, 0},
+#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM,
+#endif
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 378878599466..e1fa6baf4e8e 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -532,7 +532,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
unsigned j, size;
desc->tfm = ic->journal_mac;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+ desc->flags = 0;
r = crypto_shash_init(desc);
if (unlikely(r)) {
@@ -676,7 +676,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
{
int r;
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
complete_journal_encrypt, comp);
if (likely(encrypt))
r = crypto_skcipher_encrypt(req);
@@ -3462,7 +3462,8 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- ic->recalc_tags = kvmalloc((RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size, GFP_KERNEL);
+ ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
+ ic->tag_size, GFP_KERNEL);
if (!ic->recalc_tags) {
ti->error = "Cannot allocate tags for recalculating";
r = -ENOMEM;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index d10964d41fd7..2f7c44a006c4 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -102,6 +102,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
+#ifdef CONFIG_BLK_DEV_ZONED
static int linear_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
@@ -112,6 +113,7 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE;
}
+#endif
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
@@ -208,12 +210,16 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
static struct target_type linear_target = {
.name = "linear",
.version = {1, 4, 0},
+#ifdef CONFIG_BLK_DEV_ZONED
+ .end_io = linear_end_io,
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+#else
+ .features = DM_TARGET_PASSES_INTEGRITY,
+#endif
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
.map = linear_map,
- .end_io = linear_end_io,
.status = linear_status,
.prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d94ba6f72ff5..419362c2d8ac 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -806,19 +806,19 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
}
static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
- const char *attached_handler_name, char **error)
+ const char **attached_handler_name, char **error)
{
struct request_queue *q = bdev_get_queue(bdev);
int r;
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
retain:
- if (attached_handler_name) {
+ if (*attached_handler_name) {
/*
* Clear any hw_handler_params associated with a
* handler that isn't already attached.
*/
- if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
+ if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
kfree(m->hw_handler_params);
m->hw_handler_params = NULL;
}
@@ -830,7 +830,8 @@ retain:
* handler instead of the original table passed in.
*/
kfree(m->hw_handler_name);
- m->hw_handler_name = attached_handler_name;
+ m->hw_handler_name = *attached_handler_name;
+ *attached_handler_name = NULL;
}
}
@@ -867,7 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
struct pgpath *p;
struct multipath *m = ti->private;
struct request_queue *q;
- const char *attached_handler_name;
+ const char *attached_handler_name = NULL;
/* we need at least a path arg */
if (as->argc < 1) {
@@ -890,7 +891,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
if (attached_handler_name || m->hw_handler_name) {
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
- r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
+ r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
if (r) {
dm_put_device(ti, p->path.dev);
goto bad;
@@ -905,6 +906,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
return p;
bad:
+ kfree(attached_handler_name);
free_pgpath(p);
return ERR_PTR(r);
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index cae689de75fd..c44925e4e481 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010-2011 Neil Brown
- * Copyright (C) 2010-2017 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
*
* This file is released under the GPL.
*/
@@ -29,9 +29,6 @@
*/
#define MIN_RAID456_JOURNAL_SPACE (4*2048)
-/* Global list of all raid sets */
-static LIST_HEAD(raid_sets);
-
static bool devices_handle_discard_safely = false;
/*
@@ -227,7 +224,6 @@ struct rs_layout {
struct raid_set {
struct dm_target *ti;
- struct list_head list;
uint32_t stripe_cache_entries;
unsigned long ctr_flags;
@@ -273,19 +269,6 @@ static void rs_config_restore(struct raid_set *rs, struct rs_layout *l)
mddev->new_chunk_sectors = l->new_chunk_sectors;
}
-/* Find any raid_set in active slot for @rs on global list */
-static struct raid_set *rs_find_active(struct raid_set *rs)
-{
- struct raid_set *r;
- struct mapped_device *md = dm_table_get_md(rs->ti->table);
-
- list_for_each_entry(r, &raid_sets, list)
- if (r != rs && dm_table_get_md(r->ti->table) == md)
- return r;
-
- return NULL;
-}
-
/* raid10 algorithms (i.e. formats) */
#define ALGORITHM_RAID10_DEFAULT 0
#define ALGORITHM_RAID10_NEAR 1
@@ -764,7 +747,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
mddev_init(&rs->md);
- INIT_LIST_HEAD(&rs->list);
rs->raid_disks = raid_devs;
rs->delta_disks = 0;
@@ -782,9 +764,6 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
for (i = 0; i < raid_devs; i++)
md_rdev_init(&rs->dev[i].rdev);
- /* Add @rs to global list. */
- list_add(&rs->list, &raid_sets);
-
/*
* Remaining items to be initialized by further RAID params:
* rs->md.persistent
@@ -797,7 +776,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
return rs;
}
-/* Free all @rs allocations and remove it from global list. */
+/* Free all @rs allocations */
static void raid_set_free(struct raid_set *rs)
{
int i;
@@ -815,8 +794,6 @@ static void raid_set_free(struct raid_set *rs)
dm_put_device(rs->ti, rs->dev[i].data_dev);
}
- list_del(&rs->list);
-
kfree(rs);
}
@@ -2649,7 +2626,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
return 0;
}
- /* HM FIXME: get InSync raid_dev? */
+ /* HM FIXME: get In_Sync raid_dev? */
rdev = &rs->dev[0].rdev;
if (rs->delta_disks < 0) {
@@ -3149,6 +3126,11 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
rs_set_new(rs);
} else if (rs_is_recovering(rs)) {
+ /* Rebuild particular devices */
+ if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ rs_setup_recovery(rs, MaxSector);
+ }
/* A recovering raid set may be resized */
; /* skip setup rs */
} else if (rs_is_reshaping(rs)) {
@@ -3242,6 +3224,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Start raid set read-only and assumed clean to change in raid_resume() */
rs->md.ro = 1;
rs->md.in_sync = 1;
+
+ /* Keep array frozen */
set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
/* Has to be held on running the array */
@@ -3265,7 +3249,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
rs->callbacks.congested_fn = raid_is_congested;
dm_table_add_target_callbacks(ti->table, &rs->callbacks);
- /* If raid4/5/6 journal mode explictely requested (only possible with journal dev) -> set it */
+ /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) {
r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
if (r) {
@@ -3350,32 +3334,53 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
-/* Return string describing the current sync action of @mddev */
-static const char *decipher_sync_action(struct mddev *mddev, unsigned long recovery)
+/* Return sync state string for @state */
+enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle };
+static const char *sync_str(enum sync_state state)
+{
+ /* Has to be in above sync_state order! */
+ static const char *sync_strs[] = {
+ "frozen",
+ "reshape",
+ "resync",
+ "check",
+ "repair",
+ "recover",
+ "idle"
+ };
+
+ return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef";
+};
+
+/* Return enum sync_state for @mddev derived from @recovery flags */
+static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
{
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
- return "frozen";
+ return st_frozen;
- /* The MD sync thread can be done with io but still be running */
+ /* The MD sync thread can be done with io or be interrupted but still be running */
if (!test_bit(MD_RECOVERY_DONE, &recovery) &&
(test_bit(MD_RECOVERY_RUNNING, &recovery) ||
(!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) {
if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
- return "reshape";
+ return st_reshape;
if (test_bit(MD_RECOVERY_SYNC, &recovery)) {
if (!test_bit(MD_RECOVERY_REQUESTED, &recovery))
- return "resync";
- else if (test_bit(MD_RECOVERY_CHECK, &recovery))
- return "check";
- return "repair";
+ return st_resync;
+ if (test_bit(MD_RECOVERY_CHECK, &recovery))
+ return st_check;
+ return st_repair;
}
if (test_bit(MD_RECOVERY_RECOVER, &recovery))
- return "recover";
+ return st_recover;
+
+ if (mddev->reshape_position != MaxSector)
+ return st_reshape;
}
- return "idle";
+ return st_idle;
}
/*
@@ -3409,6 +3414,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
sector_t resync_max_sectors)
{
sector_t r;
+ enum sync_state state;
struct mddev *mddev = &rs->md;
clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
@@ -3419,20 +3425,14 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
} else {
- if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) &&
- !test_bit(MD_RECOVERY_INTR, &recovery) &&
- (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
- test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
- test_bit(MD_RECOVERY_RUNNING, &recovery)))
- r = mddev->curr_resync_completed;
- else
+ state = decipher_sync_action(mddev, recovery);
+
+ if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
r = mddev->recovery_cp;
+ else
+ r = mddev->curr_resync_completed;
- if (r >= resync_max_sectors &&
- (!test_bit(MD_RECOVERY_REQUESTED, &recovery) ||
- (!test_bit(MD_RECOVERY_FROZEN, &recovery) &&
- !test_bit(MD_RECOVERY_NEEDED, &recovery) &&
- !test_bit(MD_RECOVERY_RUNNING, &recovery)))) {
+ if (state == st_idle && r >= resync_max_sectors) {
/*
* Sync complete.
*/
@@ -3440,24 +3440,20 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
if (test_bit(MD_RECOVERY_RECOVER, &recovery))
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
- } else if (test_bit(MD_RECOVERY_RECOVER, &recovery)) {
+ } else if (state == st_recover)
/*
* In case we are recovering, the array is not in sync
* and health chars should show the recovering legs.
*/
;
-
- } else if (test_bit(MD_RECOVERY_SYNC, &recovery) &&
- !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ else if (state == st_resync)
/*
* If "resync" is occurring, the raid set
* is or may be out of sync hence the health
* characters shall be 'a'.
*/
set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
-
- } else if (test_bit(MD_RECOVERY_RESHAPE, &recovery) &&
- !test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ else if (state == st_reshape)
/*
* If "reshape" is occurring, the raid set
* is or may be out of sync hence the health
@@ -3465,7 +3461,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
*/
set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
- } else if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) {
+ else if (state == st_check || state == st_repair)
/*
* If "check" or "repair" is occurring, the raid set has
* undergone an initial sync and the health characters
@@ -3473,12 +3469,12 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
*/
set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
- } else {
+ else {
struct md_rdev *rdev;
/*
* We are idle and recovery is needed, prevent 'A' chars race
- * caused by components still set to in-sync by constrcuctor.
+ * caused by components still set to in-sync by constructor.
*/
if (test_bit(MD_RECOVERY_NEEDED, &recovery))
set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags);
@@ -3542,7 +3538,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
progress = rs_get_progress(rs, recovery, resync_max_sectors);
resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ?
atomic64_read(&mddev->resync_mismatches) : 0;
- sync_action = decipher_sync_action(&rs->md, recovery);
+ sync_action = sync_str(decipher_sync_action(&rs->md, recovery));
/* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */
for (i = 0; i < rs->raid_disks; i++)
@@ -3892,14 +3888,13 @@ static int rs_start_reshape(struct raid_set *rs)
struct mddev *mddev = &rs->md;
struct md_personality *pers = mddev->pers;
+ /* Don't allow the sync thread to work until the table gets reloaded. */
+ set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
+
r = rs_setup_reshape(rs);
if (r)
return r;
- /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */
- if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
- mddev_resume(mddev);
-
/*
* Check any reshape constraints enforced by the personalility
*
@@ -3923,10 +3918,6 @@ static int rs_start_reshape(struct raid_set *rs)
}
}
- /* Suspend because a resume will happen in raid_resume() */
- set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags);
- mddev_suspend(mddev);
-
/*
* Now reshape got set up, update superblocks to
* reflect the fact so that a table reload will
@@ -3947,29 +3938,6 @@ static int raid_preresume(struct dm_target *ti)
if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags))
return 0;
- if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
- struct raid_set *rs_active = rs_find_active(rs);
-
- if (rs_active) {
- /*
- * In case no rebuilds have been requested
- * and an active table slot exists, copy
- * current resynchonization completed and
- * reshape position pointers across from
- * suspended raid set in the active slot.
- *
- * This resumes the new mapping at current
- * offsets to continue recover/reshape without
- * necessarily redoing a raid set partially or
- * causing data corruption in case of a reshape.
- */
- if (rs_active->md.curr_resync_completed != MaxSector)
- mddev->curr_resync_completed = rs_active->md.curr_resync_completed;
- if (rs_active->md.reshape_position != MaxSector)
- mddev->reshape_position = rs_active->md.reshape_position;
- }
- }
-
/*
* The superblocks need to be updated on disk if the
* array is new or new devices got added (thus zeroed
@@ -4046,7 +4014,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 13, 2},
+ .version = {1, 14, 0},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 72142021b5c9..20b0776e39ef 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -189,6 +189,12 @@ struct dm_pool_metadata {
sector_t data_block_size;
/*
+ * We reserve a section of the metadata for commit overhead.
+ * All reported space does *not* include this.
+ */
+ dm_block_t metadata_reserve;
+
+ /*
* Set if a transaction has to be aborted but the attempt to roll back
* to the previous (good) transaction failed. The only pool metadata
* operation possible in this state is the closing of the device.
@@ -816,6 +822,20 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
return dm_tm_commit(pmd->tm, sblock);
}
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+ int r;
+ dm_block_t total;
+ dm_block_t max_blocks = 4096; /* 16M */
+
+ r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+ if (r) {
+ DMERR("could not get size of metadata device");
+ pmd->metadata_reserve = max_blocks;
+ } else
+ pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
+}
+
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool format_device)
@@ -849,6 +869,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
return ERR_PTR(r);
}
+ __set_metadata_reserve(pmd);
+
return pmd;
}
@@ -1820,6 +1842,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+ if (!r) {
+ if (*result < pmd->metadata_reserve)
+ *result = 0;
+ else
+ *result -= pmd->metadata_reserve;
+ }
up_read(&pmd->root_lock);
return r;
@@ -1932,8 +1961,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
int r = -EINVAL;
down_write(&pmd->root_lock);
- if (!pmd->fail_io)
+ if (!pmd->fail_io) {
r = __resize_space_map(pmd->metadata_sm, new_count);
+ if (!r)
+ __set_metadata_reserve(pmd);
+ }
up_write(&pmd->root_lock);
return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 7bd60a150f8f..aaf1ad481ee8 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
enum pool_mode {
PM_WRITE, /* metadata may be changed */
PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
+
+ /*
+ * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+ */
+ PM_OUT_OF_METADATA_SPACE,
PM_READ_ONLY, /* metadata may not be changed */
+
PM_FAIL, /* all I/O fails */
};
@@ -1371,7 +1377,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
static void requeue_bios(struct pool *pool);
-static void check_for_space(struct pool *pool)
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+ return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+ return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+ int r;
+ const char *ooms_reason = NULL;
+ dm_block_t nr_free;
+
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+ if (r)
+ ooms_reason = "Could not get free metadata blocks";
+ else if (!nr_free)
+ ooms_reason = "No free metadata blocks";
+
+ if (ooms_reason && !is_read_only(pool)) {
+ DMERR("%s", ooms_reason);
+ set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+ }
+}
+
+static void check_for_data_space(struct pool *pool)
{
int r;
dm_block_t nr_free;
@@ -1397,14 +1431,16 @@ static int commit(struct pool *pool)
{
int r;
- if (get_pool_mode(pool) >= PM_READ_ONLY)
+ if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
return -EINVAL;
r = dm_pool_commit_metadata(pool->pmd);
if (r)
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
- else
- check_for_space(pool);
+ else {
+ check_for_metadata_space(pool);
+ check_for_data_space(pool);
+ }
return r;
}
@@ -1470,6 +1506,19 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
return r;
}
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
+ return r;
+ }
+
+ if (!free_blocks) {
+ /* Let's commit before we use up the metadata reserve. */
+ r = commit(pool);
+ if (r)
+ return r;
+ }
+
return 0;
}
@@ -1501,6 +1550,7 @@ static blk_status_t should_error_unserviceable_bio(struct pool *pool)
case PM_OUT_OF_DATA_SPACE:
return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0;
+ case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
case PM_FAIL:
return BLK_STS_IOERR;
@@ -2464,8 +2514,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
error_retry_list(pool);
break;
+ case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
- if (old_mode != new_mode)
+ if (!is_read_only_pool_mode(old_mode))
notify_of_pool_mode_change(pool, "read-only");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only;
@@ -3403,6 +3454,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
sb_metadata_dev_size, metadata_dev_size);
+
+ if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+ set_pool_mode(pool, PM_WRITE);
+
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3707,7 +3762,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- if (get_pool_mode(pool) >= PM_READ_ONLY) {
+ if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
dm_device_name(pool->pool_md));
return -EOPNOTSUPP;
@@ -3781,6 +3836,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
dm_block_t nr_blocks_data;
dm_block_t nr_blocks_metadata;
dm_block_t held_root;
+ enum pool_mode mode;
char buf[BDEVNAME_SIZE];
char buf2[BDEVNAME_SIZE];
struct pool_c *pt = ti->private;
@@ -3851,9 +3907,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("- ");
- if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+ mode = get_pool_mode(pool);
+ if (mode == PM_OUT_OF_DATA_SPACE)
DMEMIT("out_of_data_space ");
- else if (pool->pf.mode == PM_READ_ONLY)
+ else if (is_read_only_pool_mode(mode))
DMEMIT("ro ");
else
DMEMIT("rw ");
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 12decdbd722d..fc65f0dedf7f 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -99,10 +99,26 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
{
struct scatterlist sg;
- sg_init_one(&sg, data, len);
- ahash_request_set_crypt(req, &sg, NULL, len);
-
- return crypto_wait_req(crypto_ahash_update(req), wait);
+ if (likely(!is_vmalloc_addr(data))) {
+ sg_init_one(&sg, data, len);
+ ahash_request_set_crypt(req, &sg, NULL, len);
+ return crypto_wait_req(crypto_ahash_update(req), wait);
+ } else {
+ do {
+ int r;
+ size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
+ flush_kernel_vmap_range((void *)data, this_step);
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
+ ahash_request_set_crypt(req, &sg, NULL, this_step);
+ r = crypto_wait_req(crypto_ahash_update(req), wait);
+ if (unlikely(r))
+ return r;
+ data += this_step;
+ len -= this_step;
+ } while (len);
+ return 0;
+ }
}
/*
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 20f7e4ef5342..45abb54037fc 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1155,12 +1155,14 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/*
- * The zone descriptors obtained with a zone report indicate
- * zone positions within the target device. The zone descriptors
- * must be remapped to match their position within the dm device.
- * A target may call dm_remap_zone_report after completion of a
- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained
- * from the target device mapping to the dm device.
+ * The zone descriptors obtained with a zone report indicate zone positions
+ * within the target backing device, regardless of that device is a partition
+ * and regardless of the target mapping start sector on the device or partition.
+ * The zone descriptors start sector and write pointer position must be adjusted
+ * to match their relative position within the dm device.
+ * A target may call dm_remap_zone_report() after completion of a
+ * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
+ * backing device.
*/
void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
{
@@ -1171,6 +1173,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
struct blk_zone *zone;
unsigned int nr_rep = 0;
unsigned int ofst;
+ sector_t part_offset;
struct bio_vec bvec;
struct bvec_iter iter;
void *addr;
@@ -1179,6 +1182,15 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
return;
/*
+ * bio sector was incremented by the request size on completion. Taking
+ * into account the original request sector, the target start offset on
+ * the backing device and the target mapping offset (ti->begin), the
+ * start sector of the backing device. The partition offset is always 0
+ * if the target uses a whole device.
+ */
+ part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
+
+ /*
* Remap the start sector of the reported zones. For sequential zones,
* also remap the write pointer position.
*/
@@ -1195,6 +1207,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
/* Set zones start sector */
while (hdr->nr_zones && ofst < bvec.bv_len) {
zone = addr + ofst;
+ zone->start -= part_offset;
if (zone->start >= start + ti->len) {
hdr->nr_zones = 0;
break;
@@ -1206,7 +1219,7 @@ void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
else if (zone->cond == BLK_ZONE_COND_EMPTY)
zone->wp = zone->start;
else
- zone->wp = zone->wp + ti->begin - start;
+ zone->wp = zone->wp + ti->begin - start - part_offset;
}
ofst += sizeof(struct blk_zone);
hdr->nr_zones--;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index ac1cffd2a09b..f3fb5bb8c82a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -542,7 +542,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
!discard_bio)
continue;
bio_chain(discard_bio, bio);
- bio_clone_blkcg_association(discard_bio, bio);
+ bio_clone_blkg_association(discard_bio, bio);
if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rdev->bdev),
discard_bio, disk_devt(mddev->gendisk),
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index b5410aeb5fe2..bb41bea950ac 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client)
V4L2_CID_AUTO_WHITE_BALANCE,
0, 1, 1,
V4L2_WHITE_BALANCE_AUTO);
- if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
- ret = PTR_ERR(mt9v111->auto_awb);
- goto error_free_ctrls;
- }
-
mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
&mt9v111_ctrl_ops,
V4L2_CID_EXPOSURE_AUTO,
V4L2_EXPOSURE_MANUAL,
0, V4L2_EXPOSURE_AUTO);
- if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
- ret = PTR_ERR(mt9v111->auto_exp);
- goto error_free_ctrls;
- }
-
- /* Initialize timings */
mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
V4L2_CID_HBLANK,
MT9V111_CORE_R05_MIN_HBLANK,
MT9V111_CORE_R05_MAX_HBLANK, 1,
MT9V111_CORE_R05_DEF_HBLANK);
- if (IS_ERR_OR_NULL(mt9v111->hblank)) {
- ret = PTR_ERR(mt9v111->hblank);
- goto error_free_ctrls;
- }
-
mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
V4L2_CID_VBLANK,
MT9V111_CORE_R06_MIN_VBLANK,
MT9V111_CORE_R06_MAX_VBLANK, 1,
MT9V111_CORE_R06_DEF_VBLANK);
- if (IS_ERR_OR_NULL(mt9v111->vblank)) {
- ret = PTR_ERR(mt9v111->vblank);
- goto error_free_ctrls;
- }
/* PIXEL_RATE is fixed: just expose it to user space. */
v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
@@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client)
DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
+ if (mt9v111->ctrls.error) {
+ ret = mt9v111->ctrls.error;
+ goto error_free_ctrls;
+ }
mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
/* Start with default configuration: 640x480 UYVY. */
@@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client)
mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
if (ret)
- goto error_free_ctrls;
+ goto error_free_entity;
#endif
ret = mt9v111_chip_probe(mt9v111);
if (ret)
- goto error_free_ctrls;
+ goto error_free_entity;
ret = v4l2_async_register_subdev(&mt9v111->sd);
if (ret)
- goto error_free_ctrls;
+ goto error_free_entity;
return 0;
-error_free_ctrls:
- v4l2_ctrl_handler_free(&mt9v111->ctrls);
-
+error_free_entity:
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&mt9v111->sd.entity);
#endif
+error_free_ctrls:
+ v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex);
@@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(sd);
- v4l2_ctrl_handler_free(&mt9v111->ctrls);
-
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&sd->entity);
#endif
+ v4l2_ctrl_handler_free(&mt9v111->ctrls);
+
mutex_destroy(&mt9v111->pwr_mutex);
mutex_destroy(&mt9v111->stream_mutex);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 94c1fe0e9787..54fe90acb5b2 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC
depends on MFD_CROS_EC
select CEC_CORE
select CEC_NOTIFIER
+ select CHROME_PLATFORMS
+ select CROS_EC_PROTO
---help---
If you say yes here you will get support for the
ChromeOS Embedded Controller's CEC.
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 729b31891466..a5ae85674ffb 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
index c832539397d7..12bce391d71f 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index bcd0dfd33618..2e65caf1ecae 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 4559f3b1b38c..008afb85023b 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index 7f269021d08c..1f33b4eb198c 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -10,6 +10,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
@@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
else
return -EINVAL;
- ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line),
- GFP_KERNEL);
+ ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line),
+ GFP_KERNEL);
if (!ispif->line)
return -ENOMEM;
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index da3a9fed9f2d..174a36be6f5d 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -9,6 +9,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss-vfe.h"
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index 4c584bffd179..0dca8bf9281e 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -9,6 +9,7 @@
*/
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include "camss-vfe.h"
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index dcc0c30ef1b1..669615fff6a0 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev)
return -EINVAL;
}
- camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy),
- GFP_KERNEL);
+ camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
+ sizeof(*camss->csiphy), GFP_KERNEL);
if (!camss->csiphy)
return -ENOMEM;
- camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid),
- GFP_KERNEL);
+ camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
+ GFP_KERNEL);
if (!camss->csid)
return -ENOMEM;
- camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL);
+ camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
+ GFP_KERNEL);
if (!camss->vfe)
return -ENOMEM;
@@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = {
MODULE_DEVICE_TABLE(of, camss_dt_match);
-static int camss_runtime_suspend(struct device *dev)
+static int __maybe_unused camss_runtime_suspend(struct device *dev)
{
return 0;
}
-static int camss_runtime_resume(struct device *dev)
+static int __maybe_unused camss_runtime_resume(struct device *dev)
{
return 0;
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 666d319d3d1a..1f6c1eefe389 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
if (msg[0].addr == state->af9033_i2c_addr[1])
reg |= 0x100000;
- ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
- msg[0].len - 3);
+ ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+ &msg[0].buf[3],
+ msg[0].len - 3)
+ : -EOPNOTSUPP;
} else {
/* I2C write */
u8 buf[MAX_XFER_SIZE];
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 127fe6eb91d9..a3ef1f50a4b3 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (sev == NULL)
return;
- /*
- * If the event has been added to the fh->subscribed list, but its
- * add op has not completed yet elems will be 0, treat this as
- * not being subscribed.
- */
- if (!sev->elems)
- return;
-
/* Increase event sequence number on fh. */
fh->sequence++;
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned i;
+ int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
+ sev->elems = elems;
+
+ mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
- if (!found_ev)
- list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
+ /* Already listening */
kvfree(sev);
- return 0; /* Already listening */
+ goto out_unlock;
}
if (sev->ops && sev->ops->add) {
- int ret = sev->ops->add(sev, elems);
+ ret = sev->ops->add(sev, elems);
if (ret) {
- sev->ops = NULL;
- v4l2_event_unsubscribe(fh, sub);
- return ret;
+ kvfree(sev);
+ goto out_unlock;
}
}
- /* Mark as ready for use */
- sev->elems = elems;
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add(&sev->list, &fh->subscribed);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- return 0;
+out_unlock:
+ mutex_unlock(&fh->subscribe_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
return 0;
}
+ mutex_lock(&fh->subscribe_lock);
+
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
+ mutex_unlock(&fh->subscribe_lock);
+
kvfree(sev);
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 3895999bf880..c91a7bd3ecfc 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
INIT_LIST_HEAD(&fh->available);
INIT_LIST_HEAD(&fh->subscribed);
fh->sequence = -1;
+ mutex_init(&fh->subscribe_lock);
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
return;
v4l_disable_media_source(fh->vdev);
v4l2_event_unsubscribe_all(fh);
+ mutex_destroy(&fh->subscribe_lock);
fh->vdev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
index 31112f622b88..475e5b3790ed 100644
--- a/drivers/memory/ti-aemif.c
+++ b/drivers/memory/ti-aemif.c
@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
if (ret < 0)
goto error;
}
- } else {
+ } else if (pdata) {
for (i = 0; i < pdata->num_sub_devices; i++) {
pdata->sub_devices[i].dev.parent = dev;
ret = platform_device_register(&pdata->sub_devices[i]);
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index 716fc8ed31d3..8a02f11076f9 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -2146,7 +2146,7 @@ static int msb_init_disk(struct memstick_dev *card)
set_disk_ro(msb->disk, 1);
msb_start(card);
- device_add_disk(&card->dev, msb->disk);
+ device_add_disk(&card->dev, msb->disk, NULL);
dbg("Disk added");
return 0;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 5ee932631fae..0cd30dcb6801 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1236,7 +1236,7 @@ static int mspro_block_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
- device_add_disk(&card->dev, msb->disk);
+ device_add_disk(&card->dev, msb->disk, NULL);
msb->active = 1;
return 0;
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index e11ab12fbdf2..800986a79704 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -528,8 +528,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
}
static const struct of_device_id usbhs_child_match_table[] = {
- { .compatible = "ti,omap-ehci", },
- { .compatible = "ti,omap-ohci", },
+ { .compatible = "ti,ehci-omap", },
+ { .compatible = "ti,ohci-omap3", },
{ }
};
@@ -855,6 +855,7 @@ static struct platform_driver usbhs_omap_driver = {
.pm = &usbhsomap_dev_pm_ops,
.of_match_table = usbhs_omap_dt_ids,
},
+ .probe = usbhs_omap_probe,
.remove = usbhs_omap_remove,
};
@@ -864,9 +865,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
-static int __init omap_usbhs_drvinit(void)
+static int omap_usbhs_drvinit(void)
{
- return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
+ return platform_driver_register(&usbhs_omap_driver);
}
/*
@@ -878,7 +879,7 @@ static int __init omap_usbhs_drvinit(void)
*/
fs_initcall_sync(omap_usbhs_drvinit);
-static void __exit omap_usbhs_drvexit(void)
+static void omap_usbhs_drvexit(void)
{
platform_driver_unregister(&usbhs_omap_driver);
}
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index eeb7eef62174..38f90e179927 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -27,6 +27,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
+#include <linux/nospec.h>
static DEFINE_MUTEX(compass_mutex);
@@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count,
return ret;
if (val >= strlen(map))
return -EINVAL;
+ val = array_index_nospec(val, strlen(map));
mutex_lock(&compass_mutex);
ret = compass_command(c, map[val]);
mutex_unlock(&compass_mutex);
diff --git a/drivers/misc/ibmvmc.c b/drivers/misc/ibmvmc.c
index 8f82bb9d11e2..b8aaa684c397 100644
--- a/drivers/misc/ibmvmc.c
+++ b/drivers/misc/ibmvmc.c
@@ -2131,7 +2131,7 @@ static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
retrc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
- retrc = rc;
+ rc = retrc;
if (rc == H_RESOURCE)
rc = ibmvmc_reset_crq_queue(adapter);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 7bba62a72921..fc3872fe7b25 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -521,17 +521,15 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
cl = cldev->cl;
+ mutex_lock(&bus->device_lock);
if (cl->state == MEI_FILE_UNINITIALIZED) {
- mutex_lock(&bus->device_lock);
ret = mei_cl_link(cl);
- mutex_unlock(&bus->device_lock);
if (ret)
- return ret;
+ goto out;
/* update pointers */
cl->cldev = cldev;
}
- mutex_lock(&bus->device_lock);
if (mei_cl_is_connected(cl)) {
ret = 0;
goto out;
@@ -616,9 +614,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
if (err < 0)
dev_err(bus->dev, "Could not disconnect from the ME client\n");
-out:
mei_cl_bus_module_put(cldev);
-
+out:
/* Flush queues and remove any pending read */
mei_cl_flush_queues(cl, NULL);
mei_cl_unlink(cl);
@@ -876,12 +873,13 @@ static void mei_cl_bus_dev_release(struct device *dev)
mei_me_cl_put(cldev->me_cl);
mei_dev_bus_put(cldev->bus);
+ mei_cl_unlink(cldev->cl);
kfree(cldev->cl);
kfree(cldev);
}
static const struct device_type mei_cl_device_type = {
- .release = mei_cl_bus_dev_release,
+ .release = mei_cl_bus_dev_release,
};
/**
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 4ab6251d418e..ebdcf0b450e2 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -1767,7 +1767,7 @@ out:
}
}
- rets = buf->size;
+ rets = len;
err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
pm_runtime_mark_last_busy(dev->dev);
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 09e233d4c0de..e56f3e72d57a 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1161,15 +1161,18 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
props_res = (struct hbm_props_response *)mei_msg;
- if (props_res->status) {
+ if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
+ dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
+ props_res->me_addr);
+ } else if (props_res->status) {
dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
props_res->status,
mei_hbm_status_str(props_res->status));
return -EPROTO;
+ } else {
+ mei_hbm_me_cl_add(dev, props_res);
}
- mei_hbm_me_cl_add(dev, props_res);
-
/* request property for the next client */
if (mei_hbm_prop_req(dev, props_res->me_addr + 1))
return -EIO;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index a0b9102c4c6e..c35b5b08bb33 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1371,6 +1371,16 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
if (brq->data.blocks > 1) {
/*
+ * Some SD cards in SPI mode return a CRC error or even lock up
+ * completely when trying to read the last block using a
+ * multiblock read command.
+ */
+ if (mmc_host_is_spi(card->host) && (rq_data_dir(req) == READ) &&
+ (blk_rq_pos(req) + blk_rq_sectors(req) ==
+ get_capacity(md->disk)))
+ brq->data.blocks--;
+
+ /*
* After a read error, we redo the request one sector
* at a time in order to accurately determine which
* sectors can be read successfully.
@@ -2698,7 +2708,7 @@ static int mmc_add_disk(struct mmc_blk_data *md)
int ret;
struct mmc_card *card = md->queue.card;
- device_add_disk(md->parent, md->disk);
+ device_add_disk(md->parent, md->disk, NULL);
md->force_ro.show = force_ro_show;
md->force_ro.store = force_ro_store;
sysfs_attr_init(&md->force_ro.attr);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abf9e884386c..f57f5de54206 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
host->caps |= MMC_CAP_NEEDS_POLL;
ret = mmc_gpiod_request_cd(host, "cd", 0, true,
- cd_debounce_delay_ms,
+ cd_debounce_delay_ms * 1000,
&cd_gpio_invert);
if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 2a833686784b..86803a3a04dc 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
- ctx->cd_debounce_delay_ms = debounce;
+ ctx->cd_debounce_delay_ms = debounce / 1000;
}
if (gpio_invert)
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 09cb89645d06..2cfec33178c1 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -517,19 +517,23 @@ static struct mmc_host_ops meson_mx_mmc_ops = {
static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent)
{
struct device_node *slot_node;
+ struct platform_device *pdev;
/*
* TODO: the MMC core framework currently does not support
* controllers with multiple slots properly. So we only register
* the first slot for now
*/
- slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot");
+ slot_node = of_get_compatible_child(parent->of_node, "mmc-slot");
if (!slot_node) {
dev_warn(parent, "no 'mmc-slot' sub-node found\n");
return ERR_PTR(-ENOENT);
}
- return of_platform_device_create(slot_node, NULL, parent);
+ pdev = of_platform_device_create(slot_node, NULL, parent);
+ of_node_put(slot_node);
+
+ return pdev;
}
static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 071693ebfe18..68760d4a5d3d 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2177,6 +2177,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
dma_release_channel(host->tx_chan);
dma_release_channel(host->rx_chan);
+ dev_pm_clear_wake_irq(host->dev);
pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 890f192dedbd..5389c4821882 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -498,7 +498,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
{
- if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
+ if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
+ of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
!soc_device_match(gen3_soc_whitelist))
return -ENODEV;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cbfafc453274..270d3c9580c5 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_IN(len, val, 1));
+ SPI_MEM_OP_DATA_IN(len, NULL, 1));
+ void *scratchbuf;
int ret;
+ scratchbuf = kmalloc(len, GFP_KERNEL);
+ if (!scratchbuf)
+ return -ENOMEM;
+
+ op.data.buf.in = scratchbuf;
ret = spi_mem_exec_op(flash->spimem, &op);
if (ret < 0)
dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
code);
+ else
+ memcpy(val, scratchbuf, len);
+
+ kfree(scratchbuf);
return ret;
}
@@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
SPI_MEM_OP_NO_ADDR,
SPI_MEM_OP_NO_DUMMY,
- SPI_MEM_OP_DATA_OUT(len, buf, 1));
+ SPI_MEM_OP_DATA_OUT(len, NULL, 1));
+ void *scratchbuf;
+ int ret;
- return spi_mem_exec_op(flash->spimem, &op);
+ scratchbuf = kmemdup(buf, len, GFP_KERNEL);
+ if (!scratchbuf)
+ return -ENOMEM;
+
+ op.data.buf.out = scratchbuf;
+ ret = spi_mem_exec_op(flash->spimem, &op);
+ kfree(scratchbuf);
+
+ return ret;
}
static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 29c0bfd74e8a..b0d44f9214b0 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -27,6 +27,7 @@
#include <linux/mtd/blktrans.h>
#include <linux/mtd/mtd.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/spinlock.h>
#include <linux/hdreg.h>
@@ -45,6 +46,8 @@ static void blktrans_dev_release(struct kref *kref)
dev->disk->private_data = NULL;
blk_cleanup_queue(dev->rq);
+ blk_mq_free_tag_set(dev->tag_set);
+ kfree(dev->tag_set);
put_disk(dev->disk);
list_del(&dev->list);
kfree(dev);
@@ -134,28 +137,39 @@ int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
}
EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
-static void mtd_blktrans_work(struct work_struct *work)
+static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
+{
+ struct request *rq;
+
+ rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
+ if (rq) {
+ list_del_init(&rq->queuelist);
+ blk_mq_start_request(rq);
+ return rq;
+ }
+
+ return NULL;
+}
+
+static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
+ __releases(&dev->queue_lock)
+ __acquires(&dev->queue_lock)
{
- struct mtd_blktrans_dev *dev =
- container_of(work, struct mtd_blktrans_dev, work);
struct mtd_blktrans_ops *tr = dev->tr;
- struct request_queue *rq = dev->rq;
struct request *req = NULL;
int background_done = 0;
- spin_lock_irq(rq->queue_lock);
-
while (1) {
blk_status_t res;
dev->bg_stop = false;
- if (!req && !(req = blk_fetch_request(rq))) {
+ if (!req && !(req = mtd_next_request(dev))) {
if (tr->background && !background_done) {
- spin_unlock_irq(rq->queue_lock);
+ spin_unlock_irq(&dev->queue_lock);
mutex_lock(&dev->lock);
tr->background(dev);
mutex_unlock(&dev->lock);
- spin_lock_irq(rq->queue_lock);
+ spin_lock_irq(&dev->queue_lock);
/*
* Do background processing just once per idle
* period.
@@ -166,35 +180,39 @@ static void mtd_blktrans_work(struct work_struct *work)
break;
}
- spin_unlock_irq(rq->queue_lock);
+ spin_unlock_irq(&dev->queue_lock);
mutex_lock(&dev->lock);
res = do_blktrans_request(dev->tr, dev, req);
mutex_unlock(&dev->lock);
- spin_lock_irq(rq->queue_lock);
-
- if (!__blk_end_request_cur(req, res))
+ if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
+ __blk_mq_end_request(req, res);
req = NULL;
+ }
background_done = 0;
+ spin_lock_irq(&dev->queue_lock);
}
-
- spin_unlock_irq(rq->queue_lock);
}
-static void mtd_blktrans_request(struct request_queue *rq)
+static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
{
struct mtd_blktrans_dev *dev;
- struct request *req = NULL;
- dev = rq->queuedata;
+ dev = hctx->queue->queuedata;
+ if (!dev) {
+ blk_mq_start_request(bd->rq);
+ return BLK_STS_IOERR;
+ }
+
+ spin_lock_irq(&dev->queue_lock);
+ list_add_tail(&bd->rq->queuelist, &dev->rq_list);
+ mtd_blktrans_work(dev);
+ spin_unlock_irq(&dev->queue_lock);
- if (!dev)
- while ((req = blk_fetch_request(rq)) != NULL)
- __blk_end_request_all(req, BLK_STS_IOERR);
- else
- queue_work(dev->wq, &dev->work);
+ return BLK_STS_OK;
}
static int blktrans_open(struct block_device *bdev, fmode_t mode)
@@ -329,6 +347,10 @@ static const struct block_device_operations mtd_block_ops = {
.getgeo = blktrans_getgeo,
};
+static const struct blk_mq_ops mtd_mq_ops = {
+ .queue_rq = mtd_queue_rq,
+};
+
int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
struct mtd_blktrans_ops *tr = new->tr;
@@ -416,11 +438,20 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
/* Create the request queue */
spin_lock_init(&new->queue_lock);
- new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);
+ INIT_LIST_HEAD(&new->rq_list);
- if (!new->rq)
+ new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
+ if (!new->tag_set)
goto error3;
+ new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2,
+ BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
+ if (IS_ERR(new->rq)) {
+ ret = PTR_ERR(new->rq);
+ new->rq = NULL;
+ goto error4;
+ }
+
if (tr->flush)
blk_queue_write_cache(new->rq, true, false);
@@ -437,17 +468,10 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->queue = new->rq;
- /* Create processing workqueue */
- new->wq = alloc_workqueue("%s%d", 0, 0,
- tr->name, new->mtd->index);
- if (!new->wq)
- goto error4;
- INIT_WORK(&new->work, mtd_blktrans_work);
-
if (new->readonly)
set_disk_ro(gd, 1);
- device_add_disk(&new->mtd->dev, gd);
+ device_add_disk(&new->mtd->dev, gd, NULL);
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
@@ -456,7 +480,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
return 0;
error4:
- blk_cleanup_queue(new->rq);
+ kfree(new->tag_set);
error3:
put_disk(new->disk);
error2:
@@ -481,15 +505,17 @@ int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
/* Stop new requests to arrive */
del_gendisk(old->disk);
- /* Stop workqueue. This will perform any pending request. */
- destroy_workqueue(old->wq);
-
/* Kill current requests */
spin_lock_irqsave(&old->queue_lock, flags);
old->rq->queuedata = NULL;
- blk_start_queue(old->rq);
spin_unlock_irqrestore(&old->queue_lock, flags);
+ /* freeze+quiesce queue to ensure all requests are flushed */
+ blk_mq_freeze_queue(old->rq);
+ blk_mq_quiesce_queue(old->rq);
+ blk_mq_unquiesce_queue(old->rq);
+ blk_mq_unfreeze_queue(old->rq);
+
/* If the device is currently open, tell trans driver to close it,
then put mtd device, and don't touch it again */
mutex_lock(&old->lock);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 52e2cb35fc79..99c460facd5e 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master,
int ret, err = 0;
np = mtd_get_of_node(master);
- if (!mtd_is_partition(master))
+ if (mtd_is_partition(master))
+ of_node_get(np);
+ else
np = of_get_child_by_name(np, "partitions");
+
of_property_for_each_string(np, "compatible", prop, compat) {
parser = mtd_part_get_compatible_parser(compat);
if (!parser)
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 67b2065e7a19..b864b93dd289 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -596,6 +596,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
}
iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
+ /*
+ * The ->setup_dma() hook kicks DMA by using the data/command
+ * interface, which belongs to a different AXI port from the
+ * register interface. Read back the register to avoid a race.
+ */
+ ioread32(denali->reg + DMA_ENABLE);
denali_reset_irq(denali);
denali->setup_dma(denali, dma_addr, page, write);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 7af4d6213ee5..bc2ef5209783 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
for (op_id = 0; op_id < subop->ninstrs; op_id++) {
unsigned int offset, naddrs;
const u8 *addrs;
- int len = nand_subop_get_data_len(subop, op_id);
+ int len;
instr = &subop->instrs[op_id];
@@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
nfc_op->ndcb[0] |=
NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
}
nfc_op->data_delay_ns = instr->delay_ns;
@@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
nfc_op->ndcb[0] |=
NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
NDCB0_LEN_OVRD;
+ len = nand_subop_get_data_len(subop, op_id);
nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
}
nfc_op->data_delay_ns = instr->delay_ns;
diff --git a/drivers/mux/adgs1408.c b/drivers/mux/adgs1408.c
index 0f7cf54e3234..89096f10f4c4 100644
--- a/drivers/mux/adgs1408.c
+++ b/drivers/mux/adgs1408.c
@@ -128,4 +128,4 @@ module_spi_driver(adgs1408_driver);
MODULE_AUTHOR("Mircea Caprioru <mircea.caprioru@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADGS1408 MUX driver");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 9375cef22420..3d27616d9c85 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCFINDIPDDPRT:
spin_lock_bh(&ipddp_route_lock);
rp = __ipddp_find_route(&rcp);
- if (rp)
- memcpy(&rcp2, rp, sizeof(rcp2));
+ if (rp) {
+ memset(&rcp2, 0, sizeof(rcp2));
+ rcp2.ip = rp->ip;
+ rcp2.at = rp->at;
+ rcp2.flags = rp->flags;
+ }
spin_unlock_bh(&ipddp_route_lock);
if (rp) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a764a83f99da..ee28ec9e0aba 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
static void bond_slave_arr_handler(struct work_struct *work);
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
/*---------------------------- General routines -----------------------------*/
@@ -971,16 +972,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
struct slave *slave = NULL;
struct list_head *iter;
struct ad_info ad_info;
- struct netpoll_info *ni;
- const struct net_device_ops *ops;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
if (bond_3ad_get_active_agg_info(bond, &ad_info))
return;
bond_for_each_slave_rcu(bond, slave, iter) {
- ops = slave->dev->netdev_ops;
- if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
+ if (!bond_slave_is_up(slave))
continue;
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -992,11 +990,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
continue;
}
- ni = rcu_dereference_bh(slave->dev->npinfo);
- if (down_trylock(&ni->dev_lock))
- continue;
- ops->ndo_poll_controller(slave->dev);
- up(&ni->dev_lock);
+ netpoll_poll_dev(slave->dev);
}
}
@@ -1177,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
}
}
- /* don't change skb->dev for link-local packets */
- if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ /* Link-local multicast packets should be passed to the
+ * stack on the link they arrive as well as pass them to the
+ * bond-master device. These packets are mostly usable when
+ * stack receives it with the link on which they arrive
+ * (e.g. LLDP) they also must be available on master. Some of
+ * the use cases include (but are not limited to): LLDP agents
+ * that must be able to operate both on enslaved interfaces as
+ * well as on bonds themselves; linux bridges that must be able
+ * to process/pass BPDUs from attached bonds when any kind of
+ * STP version is enabled on the network.
+ */
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (nskb) {
+ nskb->dev = bond->dev;
+ nskb->queue_mapping = 0;
+ netif_rx(nskb);
+ }
return RX_HANDLER_PASS;
+ }
if (bond_should_deliver_exact_match(skb, slave, bond))
return RX_HANDLER_EXACT;
@@ -1276,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
return NULL;
}
}
+ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
return slave;
}
@@ -1283,6 +1297,7 @@ static void bond_free_slave(struct slave *slave)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
+ cancel_delayed_work_sync(&slave->notify_work);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
kfree(SLAVE_AD_INFO(slave));
@@ -1304,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
info->link_failure_count = slave->link_failure_count;
}
-static void bond_netdev_notify(struct net_device *dev,
- struct netdev_bonding_info *info)
-{
- rtnl_lock();
- netdev_bonding_info_change(dev, info);
- rtnl_unlock();
-}
-
static void bond_netdev_notify_work(struct work_struct *_work)
{
- struct netdev_notify_work *w =
- container_of(_work, struct netdev_notify_work, work.work);
+ struct slave *slave = container_of(_work, struct slave,
+ notify_work.work);
+
+ if (rtnl_trylock()) {
+ struct netdev_bonding_info binfo;
- bond_netdev_notify(w->dev, &w->bonding_info);
- dev_put(w->dev);
- kfree(w);
+ bond_fill_ifslave(slave, &binfo.slave);
+ bond_fill_ifbond(slave->bond, &binfo.master);
+ netdev_bonding_info_change(slave->dev, &binfo);
+ rtnl_unlock();
+ } else {
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+ }
}
void bond_queue_slave_event(struct slave *slave)
{
- struct bonding *bond = slave->bond;
- struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
- if (!nnw)
- return;
-
- dev_hold(slave->dev);
- nnw->dev = slave->dev;
- bond_fill_ifslave(slave, &nnw->bonding_info.slave);
- bond_fill_ifbond(bond, &nnw->bonding_info.master);
- INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
- queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
}
void bond_lower_state_changed(struct slave *slave)
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index d93c790bfbe8..ad534b90ef21 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1107,7 +1107,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
b53_get_vlan_entry(dev, vid, vl);
vl->members |= BIT(port);
- if (untagged)
+ if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag |= BIT(port);
else
vl->untag &= ~BIT(port);
@@ -1149,7 +1149,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
pvid = 0;
}
- if (untagged)
+ if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag &= ~(BIT(port));
b53_set_vlan_entry(dev, vid, vl);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index e0066adcd2f3..fc8b48adf38b 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -703,7 +703,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- unsigned int port;
int ret;
ret = bcm_sf2_sw_rst(priv);
@@ -715,14 +714,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
- for (port = 0; port < DSA_MAX_PORTS; port++) {
- if (dsa_is_user_port(ds, port))
- bcm_sf2_port_setup(ds, port, NULL);
- else if (dsa_is_cpu_port(ds, port))
- bcm_sf2_imp_setup(ds, port);
- }
-
- bcm_sf2_enable_acb(ds);
+ ds->ops->setup(ds);
return 0;
}
@@ -1173,10 +1165,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
- /* Disable all ports and interrupts */
priv->wol_ports_mask = 0;
- bcm_sf2_sw_suspend(priv->dev->ds);
dsa_unregister_switch(priv->dev->ds);
+ /* Disable all ports and interrupts */
+ bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
return 0;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 7c791c1da4b9..bef01331266f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -128,7 +128,7 @@
#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7)
#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6)
-#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5)
+#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5)
#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4)
/* Offset 0x0C: ATU Data Register */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 307410898fc9..5200e4bdce93 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
chip->ports[entry.portvec].atu_member_violation++;
}
- if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
+ if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
dev_err_ratelimited(chip->dev,
"ATU miss violation for %pM portvec %x\n",
entry.mac, entry.portvec);
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 17f12c18d225..7635c38e77dd 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -459,12 +459,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
cqe = &admin_queue->cq.entries[head_masked];
/* Go over all the completions */
- while ((cqe->acq_common_descriptor.flags &
+ while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Do not read the rest of the completion entry before the
* phase bit was validated
*/
- rmb();
+ dma_rmb();
ena_com_handle_single_admin_completion(admin_queue, cqe);
head_masked++;
@@ -627,17 +627,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
mmio_read_reg |= mmio_read->seq_num &
ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
- /* make sure read_resp->req_id get updated before the hw can write
- * there
- */
- wmb();
-
- writel_relaxed(mmio_read_reg,
- ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
+ writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
- mmiowb();
for (i = 0; i < timeout; i++) {
- if (read_resp->req_id == mmio_read->seq_num)
+ if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
break;
udelay(1);
@@ -1796,8 +1789,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
aenq_common = &aenq_e->aenq_common_desc;
/* Go over all the events */
- while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
- phase) {
+ while ((READ_ONCE(aenq_common->flags) &
+ ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /* Make sure the phase bit (ownership) is as expected before
+ * reading the rest of the descriptor.
+ */
+ dma_rmb();
+
pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrom,
(u64)aenq_common->timestamp_low +
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index ea149c134e15..2b3ff0c20155 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
if (desc_phase != expected_phase)
return NULL;
+ /* Make sure we read the rest of the descriptor after the phase bit
+ * has been read
+ */
+ dma_rmb();
+
return cdesc;
}
@@ -240,11 +245,11 @@ static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_L4_PROTO_IDX_SHIFT;
ena_rx_ctx->l3_csum_err =
- (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT;
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L3_CSUM_ERR_SHIFT);
ena_rx_ctx->l4_csum_err =
- (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT;
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
ena_rx_ctx->hash = cdesc->hash;
ena_rx_ctx->frag =
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
if (cdesc_phase != expected_phase)
return -EAGAIN;
+ dma_rmb();
if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
pr_err("Invalid req id %d\n", cdesc->req_id);
return -EINVAL;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 6fdc753d9483..2f7657227cfe 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
return io_sq->q_depth - 1 - cnt;
}
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
- bool relaxed)
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
{
u16 tail;
@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
- if (relaxed)
- writel_relaxed(tail, io_sq->db_addr);
- else
- writel(tail, io_sq->db_addr);
+ writel(tail, io_sq->db_addr);
return 0;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index c673ac2df65b..d906293ce07d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter);
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev)
@@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
return -ENOMEM;
}
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+ dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
u64_stats_update_begin(&rx_ring->syncp);
@@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
rx_info->page_offset = 0;
ena_buf = &rx_info->ena_buf;
ena_buf->paddr = dma;
- ena_buf->len = PAGE_SIZE;
+ ena_buf->len = ENA_PAGE_SIZE;
return 0;
}
@@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
return;
}
- dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+ dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(page);
@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rx_ring->qid, i, num);
}
- if (likely(i)) {
- /* Add memory barrier to make sure the desc were written before
- * issue a doorbell
- */
- wmb();
- ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
- mmiowb();
- }
+ /* ena_com_write_sq_doorbell issues a wmb() */
+ if (likely(i))
+ ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
rx_ring->next_to_use = next_to_use;
@@ -916,10 +911,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
do {
dma_unmap_page(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr),
- PAGE_SIZE, DMA_FROM_DEVICE);
+ ENA_PAGE_SIZE, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
- rx_info->page_offset, len, PAGE_SIZE);
+ rx_info->page_offset, len, ENA_PAGE_SIZE);
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx skb updated. len %d. data_len %d\n",
@@ -1580,8 +1575,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
if (rc)
return rc;
- ena_init_napi(adapter);
-
ena_change_mtu(adapter->netdev, adapter->netdev->mtu);
ena_refill_all_rx_bufs(adapter);
@@ -1735,6 +1728,13 @@ static int ena_up(struct ena_adapter *adapter)
ena_setup_io_intr(adapter);
+ /* napi poll functions should be initialized before running
+ * request_irq(), to handle a rare condition where there is a pending
+ * interrupt, causing the ISR to fire immediately while the poll
+ * function wasn't set yet, causing a null dereference
+ */
+ ena_init_napi(adapter);
+
rc = ena_request_io_irq(adapter);
if (rc)
goto err_req_irq;
@@ -1900,7 +1900,7 @@ static int ena_close(struct net_device *netdev)
"Destroy failure, restarting device\n");
ena_dump_stats_to_dmesg(adapter);
/* rtnl lock already obtained in dev_ioctl() layer */
- ena_destroy_device(adapter);
+ ena_destroy_device(adapter, false);
ena_restore_device(adapter);
}
@@ -2112,12 +2112,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
tx_ring->ring_size);
- /* This WMB is aimed to:
- * 1 - perform smp barrier before reading next_to_completion
- * 2 - make sure the desc were written before trigger DB
- */
- wmb();
-
/* stop the queue when no more space available, the packet can have up
* to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size).
@@ -2136,10 +2130,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
* stop the queue but meanwhile clean_tx_irq updates
* next_to_completion and terminates.
* The queue will remain stopped forever.
- * To solve this issue this function perform rmb, check
- * the wakeup condition and wake up the queue if needed.
+ * To solve this issue add a mb() to make sure that
+ * netif_tx_stop_queue() write is vissible before checking if
+ * there is additional space in the queue.
*/
- smp_rmb();
+ smp_mb();
if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
> ENA_TX_WAKEUP_THRESH) {
@@ -2151,8 +2146,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (netif_xmit_stopped(txq) || !skb->xmit_more) {
- /* trigger the dma engine */
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
+ /* trigger the dma engine. ena_com_write_sq_doorbell()
+ * has a mb
+ */
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.doorbells++;
u64_stats_update_end(&tx_ring->syncp);
@@ -2193,25 +2190,6 @@ error_drop_packet:
return NETDEV_TX_OK;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ena_netpoll(struct net_device *netdev)
-{
- struct ena_adapter *adapter = netdev_priv(netdev);
- int i;
-
- /* Dont schedule NAPI if the driver is in the middle of reset
- * or netdev is down.
- */
-
- if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
- test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
- return;
-
- for (i = 0; i < adapter->num_queues; i++)
- napi_schedule(&adapter->ena_napi[i].napi);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev,
select_queue_fallback_t fallback)
@@ -2377,9 +2355,6 @@ static const struct net_device_ops ena_netdev_ops = {
.ndo_change_mtu = ena_change_mtu,
.ndo_set_mac_address = NULL,
.ndo_validate_addr = eth_validate_addr,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ena_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
};
static int ena_device_validate_params(struct ena_adapter *adapter,
@@ -2550,12 +2525,15 @@ err_disable_msix:
return rc;
}
-static void ena_destroy_device(struct ena_adapter *adapter)
+static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
bool dev_up;
+ if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ return;
+
netif_carrier_off(netdev);
del_timer_sync(&adapter->timer_service);
@@ -2563,7 +2541,8 @@ static void ena_destroy_device(struct ena_adapter *adapter)
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
adapter->dev_up_before_reset = dev_up;
- ena_com_set_admin_running_state(ena_dev, false);
+ if (!graceful)
+ ena_com_set_admin_running_state(ena_dev, false);
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
@@ -2591,6 +2570,7 @@ static void ena_destroy_device(struct ena_adapter *adapter)
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+ clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
}
static int ena_restore_device(struct ena_adapter *adapter)
@@ -2635,6 +2615,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
}
}
+ set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_err(&pdev->dev, "Device reset completed successfully\n");
@@ -2643,7 +2624,11 @@ err_disable_msix:
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
err_device_destroy:
+ ena_com_abort_admin_commands(ena_dev);
+ ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
+ ena_com_mmio_reg_read_request_destroy(ena_dev);
+ ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
err:
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -2665,7 +2650,7 @@ static void ena_fw_reset_device(struct work_struct *work)
return;
}
rtnl_lock();
- ena_destroy_device(adapter);
+ ena_destroy_device(adapter, false);
ena_restore_device(adapter);
rtnl_unlock();
}
@@ -3123,15 +3108,8 @@ err_rss_init:
static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
- int release_bars;
+ int release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
- if (ena_dev->mem_bar)
- devm_iounmap(&pdev->dev, ena_dev->mem_bar);
-
- if (ena_dev->reg_bar)
- devm_iounmap(&pdev->dev, ena_dev->reg_bar);
-
- release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK;
pci_release_selected_regions(pdev, release_bars);
}
@@ -3409,30 +3387,24 @@ static void ena_remove(struct pci_dev *pdev)
netdev->rx_cpu_rmap = NULL;
}
#endif /* CONFIG_RFS_ACCEL */
-
- unregister_netdev(netdev);
del_timer_sync(&adapter->timer_service);
cancel_work_sync(&adapter->reset_task);
- /* Reset the device only if the device is running. */
- if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
- ena_com_dev_reset(ena_dev, adapter->reset_reason);
+ unregister_netdev(netdev);
- ena_free_mgmnt_irq(adapter);
+ /* If the device is running then we want to make sure the device will be
+ * reset to make sure no more events will be issued by the device.
+ */
+ if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
- ena_disable_msix(adapter);
+ rtnl_lock();
+ ena_destroy_device(adapter, true);
+ rtnl_unlock();
free_netdev(netdev);
- ena_com_mmio_reg_read_request_destroy(ena_dev);
-
- ena_com_abort_admin_commands(ena_dev);
-
- ena_com_wait_for_abort_completion(ena_dev);
-
- ena_com_admin_destroy(ena_dev);
-
ena_com_rss_destroy(ena_dev);
ena_com_delete_debug_area(ena_dev);
@@ -3467,7 +3439,7 @@ static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
"ignoring device reset request as the device is being suspended\n");
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
- ena_destroy_device(adapter);
+ ena_destroy_device(adapter, true);
rtnl_unlock();
return 0;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index f1972b5ab650..7c7ae56c52cf 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_get_sset_count(struct net_device *netdev, int sset);
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 116997a8b593..00332a1ea84b 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ const char *desc;
if (dec_lance_debug && version_printed++ == 0)
printk(version);
@@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
*/
switch (type) {
case ASIC_LANCE:
- printk("%s: IOASIC onboard LANCE", name);
+ desc = "IOASIC onboard LANCE";
break;
case PMAD_LANCE:
- printk("%s: PMAD-AA", name);
+ desc = "PMAD-AA";
break;
case PMAX_LANCE:
- printk("%s: PMAX onboard LANCE", name);
+ desc = "PMAX onboard LANCE";
break;
}
for (i = 0; i < 6; i++)
dev->dev_addr[i] = esar[i * 4];
- printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
+ printk("%s: %s, addr = %pM, irq = %d\n",
+ name, desc, dev->dev_addr, dev->irq);
dev->netdev_ops = &lance_netdev_ops;
dev->watchdog_timeo = 5*HZ;
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 024998d6d8c6..6a8e2567f2bd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
static void bmac_set_timeout(struct net_device *dev);
static void bmac_tx_timeout(struct timer_list *t);
-static int bmac_output(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
static void bmac_start(struct net_device *dev);
#define DBDMA_SET(x) ( ((x) | (x) << 16) )
@@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
}
-static int
+static netdev_tx_t
bmac_output(struct sk_buff *skb, struct net_device *dev)
{
struct bmac_data *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 0b5429d76bcf..68b9ee489489 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -78,7 +78,7 @@ struct mace_data {
static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev);
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
static void mace_set_multicast(struct net_device *dev);
static void mace_reset(struct net_device *dev);
static int mace_set_address(struct net_device *dev, void *addr);
@@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
mp->timeout_active = 1;
}
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 137cbb470af2..376f2c2613e7 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -89,7 +89,7 @@ struct mace_frame {
static int mace_open(struct net_device *dev);
static int mace_close(struct net_device *dev);
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
static void mace_set_multicast(struct net_device *dev);
static int mace_set_address(struct net_device *dev, void *addr);
static void mace_reset(struct net_device *dev);
@@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
* Transmit a frame
*/
-static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index b5f1f62e8e25..d1e1a0ba8615 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
/* for single fragment packets use build_skb() */
- if (buff->is_eop) {
+ if (buff->is_eop &&
+ buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
skb = build_skb(page_address(buff->page),
- buff->len + AQ_SKB_ALIGN);
+ AQ_CFG_RX_FRAME_MAX);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
@@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
buff->len - ETH_HLEN,
SKB_TRUESIZE(buff->len - ETH_HLEN));
- for (i = 1U, next_ = buff->next,
- buff_ = &self->buff_ring[next_]; true;
- next_ = buff_->next,
- buff_ = &self->buff_ring[next_], ++i) {
- skb_add_rx_frag(skb, i, buff_->page, 0,
- buff_->len,
- SKB_TRUESIZE(buff->len -
- ETH_HLEN));
- buff_->is_cleaned = 1;
-
- if (buff_->is_eop)
- break;
+ if (!buff->is_eop) {
+ for (i = 1U, next_ = buff->next,
+ buff_ = &self->buff_ring[next_];
+ true; next_ = buff_->next,
+ buff_ = &self->buff_ring[next_], ++i) {
+ skb_add_rx_frag(skb, i,
+ buff_->page, 0,
+ buff_->len,
+ SKB_TRUESIZE(buff->len -
+ ETH_HLEN));
+ buff_->is_cleaned = 1;
+
+ if (buff_->is_eop)
+ break;
+ }
}
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 147045757b10..c57238fce863 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1069,9 +1069,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
u32 reg;
- /* Stop monitoring MPD interrupt */
- intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
/* Disable RXCHK, active filters and Broadcom tag matching */
reg = rxchk_readl(priv, RXCHK_CONTROL);
reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
@@ -1081,6 +1078,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
/* Clear the MagicPacket detection logic */
mpd_enable_set(priv, false);
+ reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+ if (reg & INTRL2_0_MPD)
+ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+ if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+ RXCHK_BRCM_TAG_MATCH_MASK;
+ netdev_info(priv->netdev,
+ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+ }
+
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@@ -1105,7 +1113,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *txr;
unsigned int ring, ring_bit;
- u32 reg;
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -1131,16 +1138,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD)
- netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
-
- if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
- reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
- RXCHK_BRCM_TAG_MATCH_MASK;
- netdev_info(priv->netdev,
- "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
- }
-
if (!priv->is_lite)
goto out;
@@ -2641,9 +2638,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
/* UniMAC receive needs to be turned on */
umac_enable_set(priv, CMD_RX_EN, 1);
- /* Enable the interrupt wake-up source */
- intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
-
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 71362b7f6040..fcc2328bb0d9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void poll_bnx2x(struct net_device *dev)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int i;
-
- for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- napi_schedule(&bnx2x_fp(bp, fp->index, napi));
- }
-}
-#endif
-
static int bnx2x_validate_addr(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_tx_timeout = bnx2x_tx_timeout,
.ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = poll_bnx2x,
-#endif
.ndo_setup_tc = __bnx2x_setup_tc,
#ifdef CONFIG_BNX2X_SRIOV
.ndo_set_vf_mac = bnx2x_set_vf_mac,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index cecbb1d1f587..e2d92548226a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
rx_pkts = budget;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ break;
+ }
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
if (likely(budget))
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
@@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (rx_pkts == budget)
+ if (rx_pkts && rx_pkts == budget)
break;
}
@@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
- if (work_done >= budget)
+ if (work_done >= budget) {
+ if (!budget)
+ BNXT_CP_DB_REARM(cpr->cp_doorbell,
+ cpr->cp_raw_cons);
break;
+ }
if (!bnxt_has_work(bp, cpr)) {
if (napi_complete_done(napi, work_done))
@@ -3010,10 +3017,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
{
struct pci_dev *pdev = bp->pdev;
- dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
- bp->hwrm_cmd_resp_dma_addr);
-
- bp->hwrm_cmd_resp_addr = NULL;
+ if (bp->hwrm_cmd_resp_addr) {
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
+ bp->hwrm_cmd_resp_dma_addr);
+ bp->hwrm_cmd_resp_addr = NULL;
+ }
}
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
@@ -4643,7 +4651,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
enables |= ring_grps ?
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+ enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
req->num_rx_rings = cpu_to_le16(rx_rings);
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -7672,21 +7680,6 @@ static void bnxt_tx_timeout(struct net_device *dev)
bnxt_queue_sp_work(bp);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void bnxt_poll_controller(struct net_device *dev)
-{
- struct bnxt *bp = netdev_priv(dev);
- int i;
-
- /* Only process tx rings/combined rings in netpoll mode. */
- for (i = 0; i < bp->tx_nr_rings; i++) {
- struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
-
- napi_schedule(&txr->bnapi->napi);
- }
-}
-#endif
-
static void bnxt_timer(struct timer_list *t)
{
struct bnxt *bp = from_timer(bp, t, timer);
@@ -8027,7 +8020,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (ether_addr_equal(addr->sa_data, dev->dev_addr))
return 0;
- rc = bnxt_approve_mac(bp, addr->sa_data);
+ rc = bnxt_approve_mac(bp, addr->sa_data, true);
if (rc)
return rc;
@@ -8520,9 +8513,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
.ndo_set_vf_trust = bnxt_set_vf_trust,
#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = bnxt_poll_controller,
-#endif
.ndo_setup_tc = bnxt_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
@@ -8632,7 +8622,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_tx = hw_resc->max_tx_rings;
*max_rx = hw_resc->max_rx_rings;
*max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
- hw_resc->max_irqs);
+ hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
max_ring_grps = hw_resc->max_hw_ring_grps;
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
@@ -8827,14 +8817,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
+ bool strict_approval = true;
if (is_valid_ether_addr(vf->mac_addr)) {
/* overwrite netdev dev_addr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ /* Older PF driver or firmware may not approve this
+ * correctly.
+ */
+ strict_approval = false;
} else {
eth_hw_addr_random(bp->dev);
}
- rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
#endif
}
return rc;
@@ -9063,6 +9058,7 @@ init_err_cleanup_tc:
bnxt_clear_int_mode(bp);
init_err_pci_clean:
+ bnxt_free_hwrm_resources(bp);
bnxt_cleanup_pci(bp);
init_err_free:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index ddc98c359488..a85d2be986af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
for (i = 0; i < max_tc; i++) {
- u8 qidx;
+ u8 qidx = bp->tc_to_qidx[i];
req.enables |= cpu_to_le32(
- QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+ QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
+ qidx);
memset(&cos2bw, 0, sizeof(cos2bw));
- qidx = bp->tc_to_qidx[i];
cos2bw.queue_id = bp->q_info[qidx].queue_id;
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
cos2bw.tsa =
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index f3b9fbcc705b..790c684f08ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
}
}
+ if (i == ARRAY_SIZE(nvm_params))
+ return -EOPNOTSUPP;
+
if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
idx = bp->pf.port_id;
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index fcd085a9853a..3962f6fd543c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1104,7 +1104,7 @@ update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
{
struct hwrm_func_vf_cfg_input req = {0};
int rc = 0;
@@ -1122,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
mac_done:
- if (rc) {
+ if (rc && strict) {
rc = -EADDRNOTAVAIL;
netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
mac);
+ return rc;
}
- return rc;
+ return 0;
}
#else
@@ -1144,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
{
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
{
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index e9b20cd19881..2eed9eda1195 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
-int bnxt_approve_mac(struct bnxt *, u8 *);
+int bnxt_approve_mac(struct bnxt *, u8 *, bool);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 092c817f8f11..e1594c9df4c6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
return 0;
}
-static void bnxt_tc_parse_vlan(struct bnxt *bp,
- struct bnxt_tc_actions *actions,
- const struct tc_action *tc_act)
+static int bnxt_tc_parse_vlan(struct bnxt *bp,
+ struct bnxt_tc_actions *actions,
+ const struct tc_action *tc_act)
{
- if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
+ switch (tcf_vlan_action(tc_act)) {
+ case TCA_VLAN_ACT_POP:
actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
- } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
+ break;
+ case TCA_VLAN_ACT_PUSH:
actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
+ break;
+ default:
+ return -EOPNOTSUPP;
}
+ return 0;
}
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
@@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
/* Push/pop VLAN */
if (is_tcf_vlan(tc_act)) {
- bnxt_tc_parse_vlan(bp, actions, tc_act);
+ rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
+ if (rc)
+ return rc;
continue;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 4241ae928d4a..34af5f1569c8 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -321,9 +321,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
phydev->advertising = phydev->supported;
/* The internal PHY has its link interrupts routed to the
- * Ethernet MAC ISRs
+ * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+ * that prevents the signaling of link UP interrupts when
+ * the link operates at 10Mbps, so fallback to polling for
+ * those versions of GENET.
*/
- if (priv->internal_phy)
+ if (priv->internal_phy && !GENET_IS_V5(priv))
dev->phydev->irq = PHY_IGNORE_INTERRUPT;
return 0;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 16e4ef7d7185..58b9744c4058 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2160,6 +2160,7 @@ static void macb_configure_dma(struct macb *bp)
else
dmacfg &= ~GEM_BIT(TXCOEN);
+ dmacfg &= ~GEM_BIT(ADDR64);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
dmacfg |= GEM_BIT(ADDR64);
@@ -3837,6 +3838,13 @@ static const struct macb_config at91sam9260_config = {
.init = macb_init,
};
+static const struct macb_config sama5d3macb_config = {
+ .caps = MACB_CAPS_SG_DISABLED
+ | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+};
+
static const struct macb_config pc302gem_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
@@ -3904,6 +3912,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,gem", .data = &pc302gem_config },
{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+ { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index a19172dbe6be..c34ea385fe4a 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2159,6 +2159,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_SET_QSET_PARAMS)
+ return -EINVAL;
if (t.qset_idx >= SGE_QSETS)
return -EINVAL;
if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@@ -2258,6 +2260,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_GET_QSET_PARAMS)
+ return -EINVAL;
+
/* Display qsets for all ports when offload enabled */
if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
q1 = 0;
@@ -2303,6 +2308,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
+ if (edata.cmd != CHELSIO_SET_QSET_NUM)
+ return -EINVAL;
if (edata.val < 1 ||
(edata.val > 1 && !(adapter->flags & USING_MSIX)))
return -EINVAL;
@@ -2343,6 +2350,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_LOAD_FW)
+ return -EINVAL;
/* Check t.len sanity ? */
fw_data = memdup_user(useraddr + sizeof(t), t.len);
if (IS_ERR(fw_data))
@@ -2366,6 +2375,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
+ if (m.cmd != CHELSIO_SETMTUTAB)
+ return -EINVAL;
if (m.nmtus != NMTUS)
return -EINVAL;
if (m.mtus[0] < 81) /* accommodate SACK */
@@ -2407,6 +2418,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
+ if (m.cmd != CHELSIO_SET_PM)
+ return -EINVAL;
if (!is_power_of_2(m.rx_pg_sz) ||
!is_power_of_2(m.tx_pg_sz))
return -EINVAL; /* not power of 2 */
@@ -2440,6 +2453,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_GET_MEM)
+ return -EINVAL;
if ((t.addr & 7) || (t.len & 7))
return -EINVAL;
if (t.mem_id == MEM_CM)
@@ -2492,6 +2507,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EAGAIN;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_SET_TRACE_FILTER)
+ return -EINVAL;
tp = (const struct trace_params *)&t.sip;
if (t.config_tx)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index b8f75a22fb6c..f152da1ce046 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
};
struct cpl_abort_req_rss6 {
- WR_HDR;
union opcode_tid ot;
__be32 srqidx_status;
};
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index e2a702996db4..13dfdfca49fc 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
return rx;
}
-static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ep93xx_priv *ep = netdev_priv(dev);
struct ep93xx_tdesc *txd;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 3f8fe8fd79cc..6324e80960c3 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -113,7 +113,7 @@ struct net_local {
/* Index to functions, as function prototypes. */
static int net_open(struct net_device *dev);
-static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t net_interrupt(int irq, void *dev_id);
static void set_multicast_list(struct net_device *dev);
static void net_rx(struct net_device *dev);
@@ -324,7 +324,7 @@ net_open(struct net_device *dev)
return 0;
}
-static int
+static netdev_tx_t
net_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index ff92ab1daeb8..1e9d882c04ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
}
}
- return status;
+ goto err;
}
pcie = be_get_pcie_desc(resp->func_param, desc_count,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 74d122616e76..534787291b44 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4002,8 +4002,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL;
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
be16_to_cpu(port));
@@ -4025,8 +4023,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
adapter->vxlan_port = 0;
netdev->hw_enc_features = 0;
- netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
- netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
}
static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
@@ -5320,6 +5316,7 @@ static void be_netdev_init(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX;
if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 4778b663653e..bf80855dd0dd 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -452,6 +452,10 @@ struct bufdesc_ex {
* initialisation.
*/
#define FEC_QUIRK_MIB_CLEAR (1 << 15)
+/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
+ * those FIFO receive registers are resolved in other platforms.
+ */
+#define FEC_QUIRK_HAS_FRREG (1 << 16)
struct bufdesc_prop {
int qid;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2708297e7795..7b98bb75ba8a 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -91,14 +91,16 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR,
+ .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+ FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_MIB_CLEAR,
+ .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_FRREG,
}, {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1158,7 +1160,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
@@ -1273,7 +1275,7 @@ skb_done:
/* Since we have freed up a buffer, the ring is no longer full
*/
- if (netif_queue_stopped(ndev)) {
+ if (netif_tx_queue_stopped(nq)) {
entries_free = fec_enet_get_free_txdesc_num(txq);
if (entries_free >= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
@@ -1746,7 +1748,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
@@ -2164,7 +2166,13 @@ static void fec_enet_get_regs(struct net_device *ndev,
memset(buf, 0, regs->len);
for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
- off = fec_enet_register_offset[i] / 4;
+ off = fec_enet_register_offset[i];
+
+ if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
+ !(fep->quirks & FEC_QUIRK_HAS_FRREG))
+ continue;
+
+ off >>= 2;
buf[off] = readl(&theregs[off]);
}
}
@@ -2247,7 +2255,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
fec_restart(ndev);
- netif_wake_queue(ndev);
+ netif_tx_wake_all_queues(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index a051e582d541..79d03f8ee7b1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
if (cb->type == DESC_TYPE_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
- else
+ else if (cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index f56855e63c96..28e907831b0e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -40,9 +40,9 @@
#define SKB_TMP_LEN(SKB) \
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+ int send_sz, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
{
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
- desc->tx.send_size = cpu_to_le16((u16)size);
+ desc->tx.send_size = cpu_to_le16((u16)send_sz);
/* config bd buffer end */
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
ring_ptr_move_fw(ring, next_to_use);
}
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+ buf_num, type, mtu);
+}
+
static const struct acpi_device_id hns_enet_acpi_match[] = {
{ "HISI00C1", 0 },
{ "HISI00C2", 0 },
@@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
/* when the frag size is bigger than hardware, split this frag */
for (k = 0; k < frag_buf_num; k++)
- fill_v2_desc(ring, priv,
- (k == frag_buf_num - 1) ?
+ fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+ (k == frag_buf_num - 1) ?
sizeoflast : BD_MAX_SEND_SIZE,
- dma + BD_MAX_SEND_SIZE * k,
- frag_end && (k == frag_buf_num - 1) ? 1 : 0,
- buf_num,
- (type == DESC_TYPE_SKB && !k) ?
+ dma + BD_MAX_SEND_SIZE * k,
+ frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+ buf_num,
+ (type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE,
- mtu);
+ mtu);
}
netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
return phy_mii_ioctl(phy_dev, ifr, cmd);
}
-/* use only for netconsole to poll with the device without interrupt */
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hns_nic_poll_controller(struct net_device *ndev)
-{
- struct hns_nic_priv *priv = netdev_priv(ndev);
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < priv->ae_handle->q_num * 2; i++)
- napi_schedule(&priv->ring_data[i].napi);
- local_irq_restore(flags);
-}
-#endif
-
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_set_features = hns_nic_set_features,
.ndo_fix_features = hns_nic_fix_features,
.ndo_get_stats64 = hns_nic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = hns_nic_poll_controller,
-#endif
.ndo_set_rx_mode = hns_nic_set_rx_mode,
.ndo_select_queue = hns_nic_select_queue,
};
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index c8c7ad2eff77..9b5a68b65432 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
/* Wait for link to drop */
time = jiffies + (HZ / 10);
do {
- if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+ if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
break;
if (!in_interrupt())
schedule_timeout_interruptible(1);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 09e9da10b786..4a8f82938ed5 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
stats->tx_errors = nic_tx_stats->tx_dropped;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void hinic_netpoll(struct net_device *netdev)
-{
- struct hinic_dev *nic_dev = netdev_priv(netdev);
- int i, num_qps;
-
- num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
- for (i = 0; i < num_qps; i++) {
- struct hinic_txq *txq = &nic_dev->txqs[i];
- struct hinic_rxq *rxq = &nic_dev->rxqs[i];
-
- napi_schedule(&txq->napi);
- napi_schedule(&rxq->napi);
- }
-}
-#endif
-
static const struct net_device_ops hinic_netdev_ops = {
.ndo_open = hinic_open,
.ndo_stop = hinic_close,
@@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
.ndo_start_xmit = hinic_xmit_frame,
.ndo_tx_timeout = hinic_tx_timeout,
.ndo_get_stats64 = hinic_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = hinic_netpoll,
-#endif
};
static void netdev_features_init(struct net_device *netdev)
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index dc983450354b..35f6291a3672 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG;
#define RX_AREA_END 0x0fc00
static int ether1_open(struct net_device *dev);
-static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t ether1_interrupt(int irq, void *dev_id);
static int ether1_close(struct net_device *dev);
static void ether1_setmulticastlist(struct net_device *dev);
@@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int
+static netdev_tx_t
ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
{
int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f00a1dc2128c..2f7ae118217f 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -347,7 +347,7 @@ static const char init_setup[] =
0x7f /* *multi IA */ };
static int i596_open(struct net_device *dev);
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t i596_interrupt(int irq, void *dev_id);
static int i596_close(struct net_device *dev);
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
@@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev)
}
-static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct i596_private *lp = netdev_priv(dev);
struct tx_cmd *tx_cmd;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 8bb15a8c2a40..1a86184d44c0 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
static int sun3_82586_open(struct net_device *dev);
static int sun3_82586_close(struct net_device *dev);
-static int sun3_82586_send_packet(struct sk_buff *,struct net_device *);
+static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
+ struct net_device *);
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
static void set_multicast_list(struct net_device *dev);
static void sun3_82586_timeout(struct net_device *dev);
@@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev)
* send frame
*/
-static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
{
int len,i;
#ifndef NO_NOPCOMMANDS
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index ba580bfae512..03f64f40b2a3 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -921,17 +921,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
return rx;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ehea_netpoll(struct net_device *dev)
-{
- struct ehea_port *port = netdev_priv(dev);
- int i;
-
- for (i = 0; i < port->num_def_qps; i++)
- napi_schedule(&port->port_res[i].napi);
-}
-#endif
-
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
{
struct ehea_port_res *pr = param;
@@ -2953,9 +2942,6 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_open = ehea_open,
.ndo_stop = ehea_stop,
.ndo_start_xmit = ehea_start_xmit,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ehea_netpoll,
-#endif
.ndo_get_stats64 = ehea_get_stats64,
.ndo_set_mac_address = ehea_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 372664686309..129f4e9f38da 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev)
if (of_phy_is_fixed_link(np)) {
int res = emac_dt_mdio_probe(dev);
- if (!res) {
- res = of_phy_register_fixed_link(np);
- if (res)
- mdiobus_unregister(dev->mii_bus);
+ if (res)
+ return res;
+
+ res = of_phy_register_fixed_link(np);
+ dev->phy_dev = of_phy_find_device(np);
+ if (res || !dev->phy_dev) {
+ mdiobus_unregister(dev->mii_bus);
+ return res ? res : -EINVAL;
}
- return res;
+ emac_adjust_link(dev->ndev);
+ put_device(&dev->phy_dev->mdio.dev);
}
return 0;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f0daf67b18d..699ef942b615 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2207,19 +2207,6 @@ restart_poll:
return frames_processed;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void ibmvnic_netpoll_controller(struct net_device *dev)
-{
- struct ibmvnic_adapter *adapter = netdev_priv(dev);
- int i;
-
- replenish_pools(netdev_priv(dev));
- for (i = 0; i < adapter->req_rx_queues; i++)
- ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
- adapter->rx_scrq[i]);
-}
-#endif
-
static int wait_for_reset(struct ibmvnic_adapter *adapter)
{
int rc, ret;
@@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
.ndo_set_mac_address = ibmvnic_set_mac,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = ibmvnic_tx_timeout,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ibmvnic_netpoll_controller,
-#endif
.ndo_change_mtu = ibmvnic_change_mtu,
.ndo_features_check = ibmvnic_features_check,
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index a903a0ba45e1..7d42582ed48d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface);
void fm10k_service_event_schedule(struct fm10k_intfc *interface);
void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-void fm10k_netpoll(struct net_device *netdev);
-#endif
/* Netdev */
struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 929f538d28bc..538a8467f434 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = fm10k_netpoll,
-#endif
.ndo_features_check = fm10k_features_check,
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 15071e4adb98..c859ababeed5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * fm10k_netpoll - A Polling 'interrupt' handler
- * @netdev: network interface device structure
- *
- * This is used by netconsole to send skbs without having to re-enable
- * interrupts. It's not called while the normal interrupt routine is executing.
- **/
-void fm10k_netpoll(struct net_device *netdev)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__FM10K_DOWN, interface->state))
- return;
-
- for (i = 0; i < interface->num_q_vectors; i++)
- fm10k_msix_clean_rings(0, interface->q_vector[i]);
-}
-
-#endif
#define FM10K_ERR_MSG(type) case (type): error = #type; break
static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
struct fm10k_fault *fault)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 5906c1c1d19d..fef6d892ed4c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * i40evf_netpoll - A Polling 'interrupt' handler
- * @netdev: network interface device structure
- *
- * This is used by netconsole to send skbs without having to re-enable
- * interrupts. It's not called while the normal interrupt routine is executing.
- **/
-static void i40evf_netpoll(struct net_device *netdev)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
- return;
-
- for (i = 0; i < q_vectors; i++)
- i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
-}
-
-#endif
/**
* i40evf_irq_affinity_notify - Callback for affinity changes
* @notify: context as to what irq was changed
@@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_features_check = i40evf_features_check,
.ndo_fix_features = i40evf_fix_features,
.ndo_set_features = i40evf_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = i40evf_netpoll,
-#endif
.ndo_setup_tc = i40evf_setup_tc,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f1e80eed2fd6..3f047bb43348 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4806,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
stats->rx_length_errors = vsi_stats->rx_length_errors;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * ice_netpoll - polling "interrupt" handler
- * @netdev: network interface device structure
- *
- * Used by netconsole to send skbs without having to re-enable interrupts.
- * This is not called in the normal interrupt path.
- */
-static void ice_netpoll(struct net_device *netdev)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- int i;
-
- if (test_bit(__ICE_DOWN, vsi->state) ||
- !test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- return;
-
- for (i = 0; i < vsi->num_q_vectors; i++)
- ice_msix_clean_rings(0, vsi->q_vectors[i]);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
/**
* ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
* @vsi: VSI having NAPI disabled
@@ -5497,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ice_netpoll,
-#endif /* CONFIG_NET_POLL_CONTROLLER */
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
.ndo_set_features = ice_set_features,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a32c576c1e65..0796cef96fa3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = {
.priority = 0
};
#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void igb_netpoll(struct net_device *);
-#endif
#ifdef CONFIG_PCI_IOV
static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
@@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = igb_ndo_set_vf_trust,
.ndo_get_vf_config = igb_ndo_get_vf_config,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = igb_netpoll,
-#endif
.ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features,
.ndo_fdb_add = igb_ndo_fdb_add,
@@ -9053,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void igb_netpoll(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
- struct igb_q_vector *q_vector;
- int i;
-
- for (i = 0; i < adapter->num_q_vectors; i++) {
- q_vector = adapter->q_vector[i];
- if (adapter->flags & IGB_FLAG_HAS_MSIX)
- wr32(E1000_EIMC, q_vector->eims_value);
- else
- igb_irq_disable(adapter);
- napi_schedule(&q_vector->napi);
- }
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
/**
* igb_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index d3e72d0f66ef..7722153c4ac2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* for netdump / net console */
-static void ixgb_netpoll(struct net_device *dev);
-#endif
-
static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
enum pci_channel_state state);
static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
@@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
.ndo_tx_timeout = ixgb_tx_timeout,
.ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ixgb_netpoll,
-#endif
.ndo_fix_features = ixgb_fix_features,
.ndo_set_features = ixgb_set_features,
};
@@ -2195,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-
-static void ixgb_netpoll(struct net_device *dev)
-{
- struct ixgb_adapter *adapter = netdev_priv(dev);
-
- disable_irq(adapter->pdev->irq);
- ixgb_intr(adapter->pdev->irq, dev);
- enable_irq(adapter->pdev->irq);
-}
-#endif
-
/**
* ixgb_io_error_detected - called when PCI error is detected
* @pdev: pointer to pci device with error
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9a23d33a47ed..6cdd58d9d461 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3196,11 +3196,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
return budget;
/* all work done, exit the polling mode */
- napi_complete_done(napi, work_done);
- if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr(q_vector);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
+ if (likely(napi_complete_done(napi, work_done))) {
+ if (adapter->rx_itr_setting & 1)
+ ixgbe_set_itr(q_vector);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ ixgbe_irq_enable_queues(adapter,
+ BIT_ULL(q_vector->v_idx));
+ }
return min(work_done, budget - 1);
}
@@ -8768,28 +8770,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
return err;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbe_netpoll(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__IXGBE_DOWN, &adapter->state))
- return;
-
- /* loop through and schedule all active queues */
- for (i = 0; i < adapter->num_q_vectors; i++)
- ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
-}
-
-#endif
-
static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
struct ixgbe_ring *ring)
{
@@ -10251,9 +10231,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
.ndo_setup_tc = __ixgbe_setup_tc,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ixgbe_netpoll,
-#endif
#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
.ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d86446d202d5..5a228582423b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4233,24 +4233,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling 'interrupt' - used by things like netconsole to send skbs
- * without having to re-enable interrupts. It's not called while
- * the interrupt routine is executing.
- */
-static void ixgbevf_netpoll(struct net_device *netdev)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- int i;
-
- /* if interface is down do nothing */
- if (test_bit(__IXGBEVF_DOWN, &adapter->state))
- return;
- for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
-}
-#endif /* CONFIG_NET_POLL_CONTROLLER */
-
static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4482,9 +4464,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ixgbevf_netpoll,
-#endif
.ndo_features_check = ixgbevf_features_check,
.ndo_bpf = ixgbevf_xdp,
};
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 7a637b51c7d2..e08301d833e2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev)
struct ltq_etop_chan *ch = &priv->ch[i];
ch->idx = ch->dma.nr = i;
+ ch->dma.dev = &priv->pdev->dev;
if (IS_TX(i)) {
ltq_dma_alloc_tx(&ch->dma);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc80a678abc3..b4ed7d394d07 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
if (!data || !(rx_desc->buf_phys_addr))
continue;
- dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
+ dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(data);
}
}
@@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
skb_add_rx_frag(rxq->skb, frag_num, page,
frag_offset, frag_size,
PAGE_SIZE);
- dma_unmap_single(dev->dev.parent, phys_addr,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rxq->left_size -= frag_size;
}
} else {
@@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
frag_offset, frag_size,
PAGE_SIZE);
- dma_unmap_single(dev->dev.parent, phys_addr,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
+ dma_unmap_page(dev->dev.parent, phys_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
rxq->left_size -= frag_size;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 28500417843e..a74002b43b51 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -58,6 +58,8 @@ static struct {
*/
static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
const struct phylink_link_state *state);
+static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phy);
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
@@ -1723,7 +1725,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
}
/* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
@@ -2598,14 +2600,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
u8 l4_proto;
+ __be16 l3_proto = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
@@ -2617,7 +2620,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
}
return mvpp2_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
@@ -3053,10 +3056,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
}
- cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- if (cause_tx) {
- cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
- mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+ if (port->has_tx_irqs) {
+ cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+ if (cause_tx) {
+ cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
+ mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
+ }
}
/* Process RX packets */
@@ -3142,6 +3147,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
mvpp22_mode_reconfigure(port);
if (port->phylink) {
+ netif_carrier_off(port->dev);
phylink_start(port->phylink);
} else {
/* Phylink isn't used as of now for ACPI, so the MAC has to be
@@ -3150,9 +3156,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
*/
struct phylink_link_state state = {
.interface = port->phy_interface,
- .link = 1,
};
mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
+ mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
+ NULL);
}
netif_tx_start_all_queues(port->dev);
@@ -4495,10 +4502,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
return;
}
- netif_tx_stop_all_queues(port->dev);
- if (!port->has_phy)
- netif_carrier_off(port->dev);
-
/* Make sure the port is disabled when reconfiguring the mode */
mvpp2_port_disable(port);
@@ -4523,16 +4526,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
- /* If the port already was up, make sure it's still in the same state */
- if (state->link || !port->has_phy) {
- mvpp2_port_enable(port);
-
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
- if (!port->has_phy)
- netif_carrier_on(dev);
- netif_tx_wake_all_queues(dev);
- }
+ mvpp2_port_enable(port);
}
static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6785661d1a72..fe49384eba48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1286,20 +1286,6 @@ out:
mutex_unlock(&mdev->state_lock);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mlx4_en_netpoll(struct net_device *dev)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_cq *cq;
- int i;
-
- for (i = 0; i < priv->tx_ring_num[TX]; i++) {
- cq = priv->tx_cq[TX][i];
- napi_schedule(&cq->napi);
- }
-}
-#endif
-
static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
{
u64 reg_id;
@@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_tx_timeout = mlx4_en_tx_timeout,
.ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mlx4_en_netpoll,
-#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = __mlx4_en_setup_tc,
@@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
.ndo_get_vf_stats = mlx4_en_get_vf_stats,
.ndo_get_vf_config = mlx4_en_get_vf_config,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mlx4_en_netpoll,
-#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_fix_features = mlx4_en_fix_features,
.ndo_setup_tc = __mlx4_en_setup_tc,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 1f3372c1802e..2df92dbd38e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
struct mlx4_dev *dev = &priv->dev;
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
- if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ if (!cpumask_available(eq->affinity_mask) ||
+ cpumask_empty(eq->affinity_mask))
return;
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d2d59444f562..6a046030e873 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -260,47 +260,34 @@ static const struct devlink_param mlx4_devlink_params[] = {
NULL, NULL, NULL),
};
-static void mlx4_devlink_set_init_value(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val)
-{
- struct mlx4_priv *priv = devlink_priv(devlink);
- struct mlx4_dev *dev = &priv->dev;
- int err;
-
- err = devlink_param_driverinit_value_set(devlink, param_id, init_val);
- if (err)
- mlx4_warn(dev,
- "devlink set parameter %u value failed (err = %d)",
- param_id, err);
-}
-
static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
{
union devlink_param_value value;
value.vbool = !!mlx4_internal_err_reset;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
value.vu32 = 1UL << log_num_mac;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS, value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
value.vbool = enable_64b_cqe_eqe;
- mlx4_devlink_set_init_value(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
value.vbool = enable_4k_uar;
- mlx4_devlink_set_init_value(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
value.vbool = false;
- mlx4_devlink_set_init_value(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- value);
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
}
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3ce14d42ddc8..a53736c26c0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
u8 own;
do {
- own = ent->lay->status_own;
+ own = READ_ONCE(ent->lay->status_own);
if (!(own & CMD_OWNER_HW)) {
ent->ret = 0;
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index b994b80d5714..37ba7c78859d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
delayed_event_start(priv);
dev_ctx->context = intf->add(dev);
- set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
- if (intf->attach)
- set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
-
if (dev_ctx->context) {
+ set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+ if (intf->attach)
+ set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
@@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
goto out;
- intf->attach(dev, dev_ctx->context);
+ if (intf->attach(dev, dev_ctx->context))
+ goto out;
+
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
goto out;
dev_ctx->context = intf->add(dev);
+ if (!dev_ctx->context)
+ goto out;
+
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
}
@@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
}
}
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
{
- return (u16)((dev->pdev->bus->number << 8) |
+ return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
+ (dev->pdev->bus->number << 8) |
PCI_SLOT(dev->pdev->devfn));
}
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
- u16 pci_id = mlx5_gen_pci_id(dev);
+ u32 pci_id = mlx5_gen_pci_id(dev);
struct mlx5_core_dev *res = NULL;
struct mlx5_core_dev *tmp_dev;
struct mlx5_priv *priv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index db2cfcd21d43..0f189f873859 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -54,6 +54,7 @@
#include "en_stats.h"
#include "en/fs.h"
+extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index bbf69e859b78..1431232c9a09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
DECLARE_HASHTABLE(hairpin_tbl, 8);
+
+ struct notifier_block netdevice_nb;
};
struct mlx5e_flow_table {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index eddd7702680b..e88340e196f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = {
void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
- u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
struct net_device *netdev = priv->netdev;
+ u32 caps;
if (!mlx5_accel_is_tls_device(priv->mdev))
return;
+ caps = mlx5_accel_tls_device_caps(priv->mdev);
if (caps & MLX5_ACCEL_TLS_TX) {
netdev->features |= NETIF_F_HW_TLS_TX;
netdev->hw_features |= NETIF_F_HW_TLS_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 75bb981e00b7..41cde926cdab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
{
if (psrc_m) {
MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
- MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
+ MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
}
if (pdst_m) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5a7939e70190..f291d1bf1558 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4315,23 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
- * reenabling interrupts.
- */
-static void mlx5e_netpoll(struct net_device *dev)
-{
- struct mlx5e_priv *priv = netdev_priv(dev);
- struct mlx5e_channels *chs = &priv->channels;
-
- int i;
-
- for (i = 0; i < chs->num; i++)
- napi_schedule(&chs->c[i]->napi);
-}
-#endif
-
-static const struct net_device_ops mlx5e_netdev_ops = {
+const struct net_device_ops mlx5e_netdev_ops = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
@@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = {
#ifdef CONFIG_MLX5_EN_ARFS
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = mlx5e_netpoll,
-#endif
#ifdef CONFIG_MLX5_ESWITCH
/* SRIOV E-Switch NDOs */
.ndo_set_vf_mac = mlx5e_set_vf_mac,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 15d8ae28c040..00172dee5339 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -432,10 +432,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq)
static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
struct mlx5_wq_cyc *wq,
- u16 pi, u16 frag_pi)
+ u16 pi, u16 nnops)
{
struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi];
- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
edge_wi = wi + nnops;
@@ -454,15 +453,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5e_umr_wqe *umr_wqe;
u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
- u16 pi, frag_pi;
+ u16 pi, contig_wqebbs_room;
int err;
int i;
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
-
- if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) {
- mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) {
+ mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9fed54017659..85796727093e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1368,6 +1368,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2;
}
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
}
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -2946,14 +2949,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
return 0;
}
+static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
+ struct mlx5e_priv *peer_priv)
+{
+ struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
+ struct mlx5e_hairpin_entry *hpe;
+ u16 peer_vhca_id;
+ int bkt;
+
+ if (!same_hw_devs(priv, peer_priv))
+ return;
+
+ peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
+
+ hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
+ if (hpe->peer_vhca_id == peer_vhca_id)
+ hpe->hp->pair->peer_gone = true;
+ }
+}
+
+static int mlx5e_tc_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct mlx5e_flow_steering *fs;
+ struct mlx5e_priv *peer_priv;
+ struct mlx5e_tc_table *tc;
+ struct mlx5e_priv *priv;
+
+ if (ndev->netdev_ops != &mlx5e_netdev_ops ||
+ event != NETDEV_UNREGISTER ||
+ ndev->reg_state == NETREG_REGISTERED)
+ return NOTIFY_DONE;
+
+ tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
+ fs = container_of(tc, struct mlx5e_flow_steering, tc);
+ priv = container_of(fs, struct mlx5e_priv, fs);
+ peer_priv = netdev_priv(ndev);
+ if (priv == peer_priv ||
+ !(priv->netdev->features & NETIF_F_HW_TC))
+ return NOTIFY_DONE;
+
+ mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
+
+ return NOTIFY_DONE;
+}
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ int err;
hash_init(tc->mod_hdr_tbl);
hash_init(tc->hairpin_tbl);
- return rhashtable_init(&tc->ht, &tc_ht_params);
+ err = rhashtable_init(&tc->ht, &tc_ht_params);
+ if (err)
+ return err;
+
+ tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
+ if (register_netdevice_notifier(&tc->netdevice_nb)) {
+ tc->netdevice_nb.notifier_call = NULL;
+ mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+ }
+
+ return err;
}
static void _mlx5e_tc_del_flow(void *ptr, void *arg)
@@ -2969,6 +3029,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ if (tc->netdevice_nb.notifier_call)
+ unregister_netdevice_notifier(&tc->netdevice_nb);
+
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index ae73ea992845..6dacaeba2fbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -290,10 +290,9 @@ dma_unmap_wqe_err:
static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
struct mlx5_wq_cyc *wq,
- u16 pi, u16 frag_pi)
+ u16 pi, u16 nnops)
{
struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
- u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi;
edge_wi = wi + nnops;
@@ -348,8 +347,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
+ u16 headlen, ihs, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
- u16 headlen, ihs, frag_pi;
u8 num_wqebbs, opcode;
u32 num_bytes;
int num_dma;
@@ -386,9 +385,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
}
@@ -636,7 +635,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 headlen, ihs, pi, frag_pi;
+ u16 headlen, ihs, pi, contig_wqebbs_room;
u16 ds_cnt, ds_cnt_inl = 0;
u8 num_wqebbs, opcode;
u32 num_bytes;
@@ -672,13 +671,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc);
- if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) {
+ pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+ contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+ if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+ mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi);
}
- mlx5i_sq_fetch_wqe(sq, &wqe, &pi);
+ mlx5i_sq_fetch_wqe(sq, &wqe, pi);
/* fill wqe */
wi = &sq->db.wqe_info[pi];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 48864f4988a4..c1e1a16a9b07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -273,7 +273,7 @@ static void eq_pf_process(struct mlx5_eq *eq)
case MLX5_PFAULT_SUBTYPE_WQE:
/* WQE based event */
pfault->type =
- be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
+ (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7;
pfault->token =
be32_to_cpu(pf_eqe->wqe.token);
pfault->wqe.wq_num =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2b252cde5cc2..ea7dedc2d5ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
u32 max_guarantee = 0;
int i;
- for (i = 0; i <= esw->total_vports; i++) {
+ for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled || evport->info.min_rate < max_guarantee)
continue;
@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int err;
int i;
- for (i = 0; i <= esw->total_vports; i++) {
+ for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i];
if (!evport->enabled)
continue;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f72b5c9dcfe9..3028e8d90920 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
if (err)
goto miss_rule_err;
+ kvfree(flow_group_in);
return 0;
miss_rule_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index 5645a4facad2..b8ee9101c506 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
return ERR_PTR(res);
}
- /* Context will be freed by wait func after completion */
+ /* Context should be freed by the caller after completion. */
return context;
}
@@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
cmd.flags = htonl(flags);
context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
- if (IS_ERR(context)) {
- err = PTR_ERR(context);
- goto out;
- }
+ if (IS_ERR(context))
+ return PTR_ERR(context);
err = mlx5_fpga_ipsec_cmd_wait(context);
if (err)
@@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
}
out:
+ kfree(context);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f418541af7cf..37d114c668b7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
return version;
}
+static struct fs_fte *
+lookup_fte_locked(struct mlx5_flow_group *g,
+ u32 *match_value,
+ bool take_write)
+{
+ struct fs_fte *fte_tmp;
+
+ if (take_write)
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ else
+ nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
+ rhash_fte);
+ if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+ fte_tmp = NULL;
+ goto out;
+ }
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+out:
+ if (take_write)
+ up_write_ref_node(&g->node);
+ else
+ up_read_ref_node(&g->node);
+ return fte_tmp;
+}
+
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
@@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
if (IS_ERR(fte))
return ERR_PTR(-ENOMEM);
- list_for_each_entry(iter, match_head, list) {
- nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
- }
-
search_again_locked:
version = matched_fgs_get_version(match_head);
/* Try to find a fg that already contains a matching fte */
@@ -1611,20 +1634,9 @@ search_again_locked:
struct fs_fte *fte_tmp;
g = iter->g;
- fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
- rhash_fte);
- if (!fte_tmp || !tree_get_node(&fte_tmp->node))
+ fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
+ if (!fte_tmp)
continue;
-
- nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
- if (!take_write) {
- list_for_each_entry(iter, match_head, list)
- up_read_ref_node(&iter->g->node);
- } else {
- list_for_each_entry(iter, match_head, list)
- up_write_ref_node(&iter->g->node);
- }
-
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte_tmp);
up_write_ref_node(&fte_tmp->node);
@@ -1633,19 +1645,6 @@ search_again_locked:
return rule;
}
- /* No group with matching fte found. Try to add a new fte to any
- * matching fg.
- */
-
- if (!take_write) {
- list_for_each_entry(iter, match_head, list)
- up_read_ref_node(&iter->g->node);
- list_for_each_entry(iter, match_head, list)
- nested_down_write_ref_node(&iter->g->node,
- FS_LOCK_PARENT);
- take_write = true;
- }
-
/* Check the ft version, for case that new flow group
* was added while the fgs weren't locked
*/
@@ -1657,27 +1656,30 @@ search_again_locked:
/* Check the fgs version, for case the new FTE with the
* same values was added while the fgs weren't locked
*/
- if (version != matched_fgs_get_version(match_head))
+ if (version != matched_fgs_get_version(match_head)) {
+ take_write = true;
goto search_again_locked;
+ }
list_for_each_entry(iter, match_head, list) {
g = iter->g;
if (!g->node.active)
continue;
+
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+
err = insert_fte(g, fte);
if (err) {
+ up_write_ref_node(&g->node);
if (err == -ENOSPC)
continue;
- list_for_each_entry(iter, match_head, list)
- up_write_ref_node(&iter->g->node);
kmem_cache_free(steering->ftes_cache, fte);
return ERR_PTR(err);
}
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
- list_for_each_entry(iter, match_head, list)
- up_write_ref_node(&iter->g->node);
+ up_write_ref_node(&g->node);
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node);
@@ -1686,8 +1688,6 @@ search_again_locked:
}
rule = ERR_PTR(-ENOENT);
out:
- list_for_each_entry(iter, match_head, list)
- up_write_ref_node(&iter->g->node);
kmem_cache_free(steering->ftes_cache, fte);
return rule;
}
@@ -1726,6 +1726,8 @@ search_again_locked:
if (err) {
if (take_write)
up_write_ref_node(&ft->node);
+ else
+ up_read_ref_node(&ft->node);
return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index d39b0b7011b2..9f39aeca863f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
add_timer(&health->timer);
}
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
{
struct mlx5_core_health *health = &dev->priv.health;
+ unsigned long flags;
+
+ if (disable_health) {
+ spin_lock_irqsave(&health->wq_lock, flags);
+ set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
+ spin_unlock_irqrestore(&health->wq_lock, flags);
+ }
del_timer_sync(&health->timer);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index 08eac92fc26c..0982c579ec74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -109,12 +109,11 @@ struct mlx5i_tx_wqe {
static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5i_tx_wqe **wqe,
- u16 *pi)
+ u16 pi)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- *wqe = mlx5_wq_cyc_get_wqe(wq, *pi);
+ *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memset(*wqe, 0, sizeof(**wqe));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index cf3e4a659052..b5e9f664fc66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
priv->numa_node = dev_to_node(&dev->pdev->dev);
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
- if (!priv->dbg_root)
+ if (!priv->dbg_root) {
+ dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
return -ENOMEM;
+ }
err = mlx5_pci_enable_device(dev);
if (err) {
@@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
pci_clear_master(dev->pdev);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
- debugfs_remove(priv->dbg_root);
+ debugfs_remove_recursive(priv->dbg_root);
}
static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
@@ -1286,7 +1288,7 @@ err_cleanup_once:
mlx5_cleanup_once(dev);
err_stop_poll:
- mlx5_stop_health_poll(dev);
+ mlx5_stop_health_poll(dev, boot);
if (mlx5_cmd_teardown_hca(dev)) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
goto out_err;
@@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_free_irq_vectors(dev);
if (cleanup)
mlx5_cleanup_once(dev);
- mlx5_stop_health_poll(dev);
+ mlx5_stop_health_poll(dev, cleanup);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
@@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
* with the HCA, so the health polll is no longer needed.
*/
mlx5_drain_health_wq(dev);
- mlx5_stop_health_poll(dev);
+ mlx5_stop_health_poll(dev, false);
ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index dae1c5c5d27c..a1ee9a8a769e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
for (i = 0; i < hp->num_channels; i++) {
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
- mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
+ if (!hp->peer_gone)
+ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
}
}
@@ -509,7 +510,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
- if (next_state == MLX5_RQC_STATE_RDY) {
+ if (next_state == MLX5_SQC_STATE_RDY) {
MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
}
@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
MLX5_RQC_STATE_RST, 0, 0);
/* unset peer SQs */
+ if (hp->peer_gone)
+ return;
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index c8c315eb5128..ddca327e8950 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
return (u32)wq->fbc.sz_m1 + 1;
}
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
-{
- return (u32)wq->fbc.frag_sz_m1 + 1;
-}
-
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
{
return wq->fbc.sz_m1 + 1;
@@ -138,7 +133,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
- u32 sq_strides_offset;
+ u16 sq_strides_offset;
u32 rq_pg_remainder;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 2bd4c3184eba..b1293d153a58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
@@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
return ctr & wq->fbc.sz_m1;
}
-static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr)
-{
- return ctr & wq->fbc.frag_sz_m1;
-}
-
static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq)
{
return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr);
@@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
+static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix)
+{
+ return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1;
+}
+
static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
{
int equal = (cc1 == cc2);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 81533d7f395c..937d0ace699a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1055,6 +1055,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
err_driver_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
if (!reload)
devlink_unregister(devlink);
@@ -1088,6 +1089,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
mlxsw_thermal_fini(mlxsw_core->thermal);
+ mlxsw_hwmon_fini(mlxsw_core->hwmon);
if (!reload)
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 655ddd204ab2..c35be477856f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -359,6 +359,10 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
return 0;
}
+static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+}
+
#endif
struct mlxsw_thermal;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index f6cf2896d337..e04e8162aa14 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -303,8 +303,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
struct device *hwmon_dev;
int err;
- mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon),
- GFP_KERNEL);
+ mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL);
if (!mlxsw_hwmon)
return -ENOMEM;
mlxsw_hwmon->core = mlxsw_core;
@@ -321,10 +320,9 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group;
mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs;
- hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev,
- "mlxsw",
- mlxsw_hwmon,
- mlxsw_hwmon->groups);
+ hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev,
+ "mlxsw", mlxsw_hwmon,
+ mlxsw_hwmon->groups);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
goto err_hwmon_register;
@@ -337,5 +335,12 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core,
err_hwmon_register:
err_fans_init:
err_temp_init:
+ kfree(mlxsw_hwmon);
return err;
}
+
+void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon)
+{
+ hwmon_device_unregister(mlxsw_hwmon->hwmon_dev);
+ kfree(mlxsw_hwmon);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 4d271fb3de3d..5890fdfd62c3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -718,14 +718,17 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
memset(&active_cqns, 0, sizeof(active_cqns));
while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
- u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
- switch (event_type) {
- case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
+ /* Command interface completion events are always received on
+ * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
+ * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
+ */
+ switch (q->num) {
+ case MLXSW_PCI_EQ_ASYNC_NUM:
mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
q->u.eq.ev_cmd_count++;
break;
- case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
+ case MLXSW_PCI_EQ_COMP_NUM:
cqn = mlxsw_pci_eqe_cqn_get(eqe);
set_bit(cqn, active_cqns);
cq_handle = true;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 930700413b1d..30bb2c533cec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -44,8 +44,8 @@
#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 1702
-#define MLXSW_SP1_FWREV_SUBMINOR 6
+#define MLXSW_SP1_FWREV_MINOR 1703
+#define MLXSW_SP1_FWREV_SUBMINOR 4
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -4845,6 +4845,8 @@ static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
upper_dev = info->upper_dev;
if (info->linking)
break;
+ if (is_vlan_dev(upper_dev))
+ mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
if (netif_is_macvlan(upper_dev))
mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 4327487553c5..3589432d1643 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
MLXSW_SP_SB_CM(1500, 9, 0),
MLXSW_SP_SB_CM(1500, 9, 0),
MLXSW_SP_SB_CM(1500, 9, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
- MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
+ MLXSW_SP_SB_CM(0, 140000, 15),
MLXSW_SP_SB_CM(1, 0xff, 0),
};
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e7dce79ff2c9..001b5f714c1b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -2850,7 +2850,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
lan743x_hardware_cleanup(adapter);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
{
return bitrev16(crc16(0xFFFF, buf, len));
@@ -3016,7 +3016,7 @@ static int lan743x_pm_resume(struct device *dev)
static const struct dev_pm_ops lan743x_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
};
-#endif /*CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
static const struct pci_device_id lan743x_pcidev_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
@@ -3028,7 +3028,7 @@ static struct pci_driver lan743x_pcidev_driver = {
.id_table = lan743x_pcidev_tbl,
.probe = lan743x_pcidev_probe,
.remove = lan743x_pcidev_remove,
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
.driver.pm = &lan743x_pm_ops,
#endif
.shutdown = lan743x_pcidev_shutdown,
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1a4f2bb48ead..ed4e298cd823 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -133,9 +133,9 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot)
{
unsigned int val, timeout = 10;
- /* Wait for the issued mac table command to be completed, or timeout.
- * When the command read from ANA_TABLES_MACACCESS is
- * MACACCESS_CMD_IDLE, the issued command completed successfully.
+ /* Wait for the issued vlan table command to be completed, or timeout.
+ * When the command read from ANA_TABLES_VLANACCESS is
+ * VLANACCESS_CMD_IDLE, the issued command completed successfully.
*/
do {
val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS);
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 26bb3b18f3be..3cdf63e35b53 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
struct sk_buff *skb;
struct net_device *dev;
u32 *buf;
- int sz, len;
+ int sz, len, buf_len;
u32 ifh[4];
u32 val;
struct frame_info info;
@@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
err = -ENOMEM;
break;
}
- buf = (u32 *)skb_put(skb, info.len);
+ buf_len = info.len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
len = 0;
do {
sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
*buf++ = val;
len += sz;
- } while ((sz == 4) && (len < info.len));
+ } while (len < buf_len);
+
+ /* Read the FCS and discard it */
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
if (sz < 0) {
err = sz;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 9044496803e6..7a1e9cd9cc62 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -52,6 +52,7 @@
#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
+#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
@@ -428,12 +429,14 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
switch (off) {
case offsetof(struct iphdr, daddr):
- set_ip_addr->ipv4_dst_mask = mask;
- set_ip_addr->ipv4_dst = exact;
+ set_ip_addr->ipv4_dst_mask |= mask;
+ set_ip_addr->ipv4_dst &= ~mask;
+ set_ip_addr->ipv4_dst |= exact & mask;
break;
case offsetof(struct iphdr, saddr):
- set_ip_addr->ipv4_src_mask = mask;
- set_ip_addr->ipv4_src = exact;
+ set_ip_addr->ipv4_src_mask |= mask;
+ set_ip_addr->ipv4_src &= ~mask;
+ set_ip_addr->ipv4_src |= exact & mask;
break;
default:
return -EOPNOTSUPP;
@@ -447,11 +450,12 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
}
static void
-nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
+nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
struct nfp_fl_set_ipv6_addr *ip6)
{
- ip6->ipv6[idx % 4].mask = mask;
- ip6->ipv6[idx % 4].exact = exact;
+ ip6->ipv6[word].mask |= mask;
+ ip6->ipv6[word].exact &= ~mask;
+ ip6->ipv6[word].exact |= exact & mask;
ip6->reserved = cpu_to_be16(0);
ip6->head.jump_id = opcode_tag;
@@ -464,6 +468,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
struct nfp_fl_set_ipv6_addr *ip_src)
{
__be32 exact, mask;
+ u8 word;
/* We are expecting tcf_pedit to return a big endian value */
mask = (__force __be32)~tcf_pedit_mask(action, idx);
@@ -472,17 +477,20 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
if (exact & ~mask)
return -EOPNOTSUPP;
- if (off < offsetof(struct ipv6hdr, saddr))
+ if (off < offsetof(struct ipv6hdr, saddr)) {
return -EOPNOTSUPP;
- else if (off < offsetof(struct ipv6hdr, daddr))
- nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
+ } else if (off < offsetof(struct ipv6hdr, daddr)) {
+ word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
exact, mask, ip_src);
- else if (off < offsetof(struct ipv6hdr, daddr) +
- sizeof(struct in6_addr))
- nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
+ } else if (off < offsetof(struct ipv6hdr, daddr) +
+ sizeof(struct in6_addr)) {
+ word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
exact, mask, ip_dst);
- else
+ } else {
return -EOPNOTSUPP;
+ }
return 0;
}
@@ -540,7 +548,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
struct nfp_fl_set_eth set_eth;
enum pedit_header_type htype;
int idx, nkeys, err;
- size_t act_size;
+ size_t act_size = 0;
u32 offset, cmd;
u8 ip_proto = 0;
@@ -598,7 +606,9 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
act_size = sizeof(set_eth);
memcpy(nfp_action, &set_eth, act_size);
*a_len += act_size;
- } else if (set_ip_addr.head.len_lw) {
+ }
+ if (set_ip_addr.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip_addr);
memcpy(nfp_action, &set_ip_addr, act_size);
*a_len += act_size;
@@ -606,10 +616,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+ }
+ if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
*/
+ nfp_action += act_size;
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
@@ -622,6 +634,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_dst.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip6_dst);
memcpy(nfp_action, &set_ip6_dst, act_size);
*a_len += act_size;
@@ -629,13 +642,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
} else if (set_ip6_src.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_ip6_src);
memcpy(nfp_action, &set_ip6_src, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_tport.head.len_lw) {
+ }
+ if (set_tport.head.len_lw) {
+ nfp_action += act_size;
act_size = sizeof(set_tport);
memcpy(nfp_action, &set_tport, act_size);
*a_len += act_size;
@@ -741,11 +757,16 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
} else if (is_tcf_tunnel_set(a)) {
+ struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
struct nfp_repr *repr = netdev_priv(netdev);
+
*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
+ if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
+ return -EOPNOTSUPP;
+
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 85f8209bf007..81d941ab895c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -70,6 +70,7 @@ struct nfp_app;
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
+#define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index a0c72f277faa..17acb8cc6044 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -56,7 +56,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
FLOW_DISSECTOR_KEY_VLAN,
target);
/* Populate the tci field. */
- if (flow_vlan->vlan_id) {
+ if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
flow_vlan->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 2edab01c3beb..bd19624f10cf 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -192,6 +192,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_size += sizeof(struct nfp_flower_mac_mpls);
}
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *flow_vlan;
+
+ flow_vlan = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ flow->mask);
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
+ flow_vlan->vlan_priority)
+ return -EOPNOTSUPP;
+ }
+
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 253bdaef1505..c6d29fdbb880 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2077,14 +2077,17 @@ nfp_ctrl_rx_one(struct nfp_net *nn, struct nfp_net_dp *dp,
return true;
}
-static void nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
+static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
{
struct nfp_net_rx_ring *rx_ring = r_vec->rx_ring;
struct nfp_net *nn = r_vec->nfp_net;
struct nfp_net_dp *dp = &nn->dp;
+ unsigned int budget = 512;
- while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring))
+ while (nfp_ctrl_rx_one(nn, dp, r_vec, rx_ring) && budget--)
continue;
+
+ return budget;
}
static void nfp_ctrl_poll(unsigned long arg)
@@ -2096,9 +2099,13 @@ static void nfp_ctrl_poll(unsigned long arg)
__nfp_ctrl_tx_queued(r_vec);
spin_unlock_bh(&r_vec->lock);
- nfp_ctrl_rx(r_vec);
-
- nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ if (nfp_ctrl_rx(r_vec)) {
+ nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
+ } else {
+ tasklet_schedule(&r_vec->tasklet);
+ nn_dp_warn(&r_vec->nfp_net->dp,
+ "control message budget exceeded!\n");
+ }
}
/* Setup and Configuration
@@ -3146,21 +3153,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void nfp_net_netpoll(struct net_device *netdev)
-{
- struct nfp_net *nn = netdev_priv(netdev);
- int i;
-
- /* nfp_net's NAPIs are statically allocated so even if there is a race
- * with reconfig path this will simply try to schedule some disabled
- * NAPI instances.
- */
- for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
- napi_schedule_irqoff(&nn->r_vecs[i].napi);
-}
-#endif
-
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3519,9 +3511,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_get_stats64 = nfp_net_stat64,
.ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = nfp_net_netpoll,
-#endif
.ndo_set_vf_mac = nfp_app_set_vf_mac,
.ndo_set_vf_vlan = nfp_app_set_vf_vlan,
.ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 69aa7fc392c5..59c70be22a84 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -72,9 +72,6 @@ static void netxen_schedule_work(struct netxen_adapter *adapter,
work_func_t func, int delay);
static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
static int netxen_nic_poll(struct napi_struct *napi, int budget);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev);
-#endif
static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
@@ -581,9 +578,6 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_tx_timeout = netxen_tx_timeout,
.ndo_fix_features = netxen_fix_features,
.ndo_set_features = netxen_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = netxen_nic_poll_controller,
-#endif
};
static inline bool netxen_function_zero(struct pci_dev *pdev)
@@ -2402,23 +2396,6 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
return work_done;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void netxen_nic_poll_controller(struct net_device *netdev)
-{
- int ring;
- struct nx_host_sds_ring *sds_ring;
- struct netxen_adapter *adapter = netdev_priv(netdev);
- struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
-
- disable_irq(adapter->irq);
- for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- netxen_intr(adapter->irq, sds_ring);
- }
- enable_irq(adapter->irq);
-}
-#endif
-
static int
nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 6bb76e6d3c14..f5459de6d60a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
static void
qed_dcbx_set_params(struct qed_dcbx_results *p_data,
- struct qed_hw_info *p_info,
- bool enable,
- u8 prio,
- u8 tc,
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool enable, u8 prio, u8 tc,
enum dcbx_protocol_type type,
enum qed_pci_personality personality)
{
@@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
else
p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+ /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
+ if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
+ test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
+ p_data->arr[type].dont_add_vlan0 = true;
+
/* QM reconf data */
- if (p_info->personality == personality)
- qed_hw_info_set_offload_tc(p_info, tc);
+ if (p_hwfn->hw_info.personality == personality)
+ qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
+
+ /* Configure dcbx vlan priority in doorbell block for roce EDPM */
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+ type == DCBX_PROTOCOL_ROCE) {
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
+ }
}
/* Update app protocol data and hw_info fields with the TLV info */
static void
qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
- struct qed_hwfn *p_hwfn,
- bool enable,
- u8 prio, u8 tc, enum dcbx_protocol_type type)
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool enable, u8 prio, u8 tc,
+ enum dcbx_protocol_type type)
{
- struct qed_hw_info *p_info = &p_hwfn->hw_info;
enum qed_pci_personality personality;
enum dcbx_protocol_type id;
int i;
@@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
personality = qed_dcbx_app_update[i].personality;
- qed_dcbx_set_params(p_data, p_info, enable,
+ qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
prio, tc, type, personality);
}
}
@@ -265,7 +274,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
static int
-qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl,
u32 pri_tc_tbl, int count, u8 dcbx_version)
@@ -309,7 +318,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
enable = true;
}
- qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+ qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
priority, tc, type);
}
}
@@ -331,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
continue;
enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
- qed_dcbx_update_app_info(p_data, p_hwfn, enable,
+ qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
priority, tc, type);
}
@@ -341,7 +350,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
/* Parse app TLV's to update TC information in hw_info structure for
* reconfiguring QM. Get protocol specific data for PF update ramrod command.
*/
-static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+static int
+qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct dcbx_app_priority_feature *p_app;
struct dcbx_app_priority_entry *p_tbl;
@@ -365,7 +375,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
p_info = &p_hwfn->hw_info;
num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
- rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
num_entries, dcbx_version);
if (rc)
return rc;
@@ -891,7 +901,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
return rc;
if (type == QED_DCBX_OPERATIONAL_MIB) {
- rc = qed_dcbx_process_mib_info(p_hwfn);
+ rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt);
if (!rc) {
/* reconfigure tcs of QM queues according
* to negotiation results
@@ -954,6 +964,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
p_data->dcb_enable_flag = p_src->arr[type].enable;
p_data->dcb_priority = p_src->arr[type].priority;
p_data->dcb_tc = p_src->arr[type].tc;
+ p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
}
/* Set pf update ramrod command params */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index a4d688c04e18..01f253ea4b22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -55,6 +55,7 @@ struct qed_dcbx_app_data {
u8 update; /* Update indication */
u8 priority; /* Priority */
u8 tc; /* Traffic Class */
+ bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
};
#define QED_DCBX_VERSION_DISABLED 0
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 016ca8a7ec8a..97f073fd3725 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1706,7 +1706,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn,
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
{
struct qed_load_req_params load_req_params;
- u32 load_code, param, drv_mb_param;
+ u32 load_code, resp, param, drv_mb_param;
bool b_default_mtu = true;
struct qed_hwfn *p_hwfn;
int rc = 0, mfw_rc, i;
@@ -1852,6 +1852,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
if (IS_PF(cdev)) {
p_hwfn = QED_LEADING_HWFN(cdev);
+
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+
drv_mb_param = STORM_FW_VERSION;
rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 8faceb691657..a71382687ef2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -11987,6 +11987,7 @@ struct public_global {
u32 running_bundle_id;
s32 external_temperature;
u32 mdump_reason;
+ u64 reserved;
u32 data_ptr;
u32 data_size;
};
@@ -12414,6 +12415,7 @@ struct public_drv_mb {
#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
+#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
@@ -12541,6 +12543,9 @@ struct public_drv_mb {
#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
+
#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index af3a28ec04eb..0f0aba793352 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -228,7 +228,7 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
(GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
- QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)",
+ QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
out:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 17f3dfa2cc94..e860bdf0f752 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1710,7 +1710,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
cm_info->local_ip[0] = ntohl(iph->daddr);
cm_info->remote_ip[0] = ntohl(iph->saddr);
- cm_info->ip_version = TCP_IPV4;
+ cm_info->ip_version = QED_TCP_IPV4;
ip_hlen = (iph->ihl) * sizeof(u32);
*payload_len = ntohs(iph->tot_len) - ip_hlen;
@@ -1730,7 +1730,7 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
cm_info->remote_ip[i] =
ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
}
- cm_info->ip_version = TCP_IPV6;
+ cm_info->ip_version = QED_TCP_IPV6;
ip_hlen = sizeof(*ip6h);
*payload_len = ntohs(ip6h->payload_len);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 5d37ec7e9b0b..58c7eb9d8e1b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1581,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
FUNC_MF_CFG_OV_STAG_MASK;
p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
- if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) &&
- (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) {
- qed_wr(p_hwfn, p_ptt,
- NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan);
+ if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
+ if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+ p_hwfn->hw_info.ovlan);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+ p_hwfn->hw_info.ovlan);
+ } else {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+ }
+
qed_sp_pf_update_stag(p_hwfn);
}
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+
/* Acknowledge the MFW */
qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
&resp, &param);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index be941cfaa2d4..c71391b9c757 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -228,7 +228,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
num_cons, "Toggle");
if (rc) {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
- "Failed to allocate toogle bits, rc = %d\n", rc);
+ "Failed to allocate toggle bits, rc = %d\n", rc);
goto free_cq_map;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index f736f70956fd..2440970882c4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -216,6 +216,12 @@
0x00c000UL
#define DORQ_REG_IFEN \
0x100040UL
+#define DORQ_REG_TAG1_OVRD_MODE \
+ 0x1008b4UL
+#define DORQ_REG_PF_PCP_BB_K2 \
+ 0x1008c4UL
+#define DORQ_REG_PF_EXT_VID_BB_K2 \
+ 0x1008c8UL
#define DORQ_REG_DB_DROP_REASON \
0x100a2cUL
#define DORQ_REG_DB_DROP_DETAILS \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index 7d7a64c55ff1..f9167d1354bb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -140,23 +140,16 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
{
- enum roce_flavor flavor;
-
switch (roce_mode) {
case ROCE_V1:
- flavor = PLAIN_ROCE;
- break;
+ return PLAIN_ROCE;
case ROCE_V2_IPV4:
- flavor = RROCE_IPV4;
- break;
+ return RROCE_IPV4;
case ROCE_V2_IPV6:
- flavor = ROCE_V2_IPV6;
- break;
+ return RROCE_IPV6;
default:
- flavor = MAX_ROCE_MODE;
- break;
+ return MAX_ROCE_FLAVOR;
}
- return flavor;
}
static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 8de644b4721e..77b6248ad3b9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -154,7 +154,7 @@ qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
struct qed_tunnel_info *p_src)
{
- enum tunnel_clss type;
+ int type;
p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 3d4269659820..be118d057b92 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -413,7 +413,6 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
}
if (!p_iov->b_pre_fp_hsi &&
- ETH_HSI_VER_MINOR &&
(resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
DP_INFO(p_hwfn,
"PF is using older fastpath HSI; %02x.%02x is configured\n",
@@ -572,7 +571,7 @@ free_p_iov:
static void
__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_src,
- enum qed_tunn_clss mask, u8 *p_cls)
+ enum qed_tunn_mode mask, u8 *p_cls)
{
if (p_src->b_update_mode) {
p_req->tun_mode_update_mask |= BIT(mask);
@@ -587,7 +586,7 @@ __qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
static void
qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
struct qed_tunn_update_type *p_src,
- enum qed_tunn_clss mask,
+ enum qed_tunn_mode mask,
u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
u8 *p_update_port, u16 *p_udp_port)
{
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b48f76182049..10b075bc5959 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
- ql_write_nvram_reg(qdev, spir,
- ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
}
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 81312924df14..0c443ea98479 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1800,7 +1800,8 @@ struct qlcnic_hardware_ops {
int (*config_loopback) (struct qlcnic_adapter *, u8);
int (*clear_loopback) (struct qlcnic_adapter *, u8);
int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
- void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+ void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2064,9 +2065,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
}
static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 *addr, u16 id)
+ u64 *addr, u16 vlan,
+ struct qlcnic_host_tx_ring *tx_ring)
{
- adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
}
static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 569d54ededec..a79d84f99102 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2135,7 +2135,8 @@ out:
}
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
- u16 vlan_id)
+ u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 mac[ETH_ALEN];
memcpy(&mac, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index b75a81246856..73fe2f64491d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *ring);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4bb33af8e2b3..56a3bd9e37dc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev);
void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
- u64 *uaddr, u16 vlan_id);
+ u64 *uaddr, u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring);
int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 84dd83031a1b..9647578cbe6a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -268,13 +268,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
}
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
- u16 vlan_id)
+ u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@@ -301,7 +300,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
- vlan_id);
+ vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
return;
}
@@ -350,7 +350,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (!fil)
return;
- qlcnic_change_filter(adapter, &src_addr, vlan_id);
+ qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
if (adapter->drv_mac_learn)
- qlcnic_send_filter(adapter, first_desc, skb);
+ qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 2d38d1ac2aae..dbd48012224f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -59,9 +59,6 @@ static int qlcnic_close(struct net_device *netdev);
static void qlcnic_tx_timeout(struct net_device *netdev);
static void qlcnic_attach_work(struct work_struct *work);
static void qlcnic_fwinit_work(struct work_struct *work);
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev);
-#endif
static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
@@ -545,9 +542,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
.ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
.ndo_features_check = qlcnic_features_check,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = qlcnic_poll_controller,
-#endif
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
.ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
@@ -3200,45 +3194,6 @@ static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void qlcnic_poll_controller(struct net_device *netdev)
-{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx;
- struct qlcnic_host_tx_ring *tx_ring;
- int ring;
-
- if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
- return;
-
- recv_ctx = adapter->recv_ctx;
-
- for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
- sds_ring = &recv_ctx->sds_rings[ring];
- qlcnic_disable_sds_intr(adapter, sds_ring);
- napi_schedule(&sds_ring->napi);
- }
-
- if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- /* Only Multi-Tx queue capable devices need to
- * schedule NAPI for TX rings
- */
- if ((qlcnic_83xx_check(adapter) &&
- (adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
- (qlcnic_82xx_check(adapter) &&
- !qlcnic_check_multi_tx(adapter)))
- return;
-
- for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
- tx_ring = &adapter->tx_ring[ring];
- qlcnic_disable_tx_intr(adapter, tx_ring);
- napi_schedule(&tx_ring->napi);
- }
- }
-}
-#endif
-
static void
qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
{
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c
index ffe7a16bdfc8..6c8543fb90c0 100644
--- a/drivers/net/ethernet/qualcomm/qca_7k.c
+++ b/drivers/net/ethernet/qualcomm/qca_7k.c
@@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result)
{
__be16 rx_data;
__be16 tx_data;
- struct spi_transfer *transfer;
- struct spi_message *msg;
+ struct spi_transfer transfer[2];
+ struct spi_message msg;
int ret;
+ memset(transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg);
+ *result = 0;
+
+ transfer[0].tx_buf = &tx_data;
+ transfer[0].len = QCASPI_CMD_LEN;
+ transfer[1].rx_buf = &rx_data;
+ transfer[1].len = QCASPI_CMD_LEN;
+
+ spi_message_add_tail(&transfer[0], &msg);
if (qca->legacy_mode) {
- msg = &qca->spi_msg1;
- transfer = &qca->spi_xfer1;
- transfer->tx_buf = &tx_data;
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- spi_sync(qca->spi_dev, msg);
- } else {
- msg = &qca->spi_msg2;
- transfer = &qca->spi_xfer2[0];
- transfer->tx_buf = &tx_data;
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- transfer = &qca->spi_xfer2[1];
+ spi_sync(qca->spi_dev, &msg);
+ spi_message_init(&msg);
}
- transfer->tx_buf = NULL;
- transfer->rx_buf = &rx_data;
- transfer->len = QCASPI_CMD_LEN;
- ret = spi_sync(qca->spi_dev, msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
if (!ret)
- ret = msg->status;
+ ret = msg.status;
if (ret)
qcaspi_spi_error(qca);
@@ -86,35 +85,32 @@ int
qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
{
__be16 tx_data[2];
- struct spi_transfer *transfer;
- struct spi_message *msg;
+ struct spi_transfer transfer[2];
+ struct spi_message msg;
int ret;
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg);
tx_data[1] = cpu_to_be16(value);
+ transfer[0].tx_buf = &tx_data[0];
+ transfer[0].len = QCASPI_CMD_LEN;
+ transfer[1].tx_buf = &tx_data[1];
+ transfer[1].len = QCASPI_CMD_LEN;
+
+ spi_message_add_tail(&transfer[0], &msg);
if (qca->legacy_mode) {
- msg = &qca->spi_msg1;
- transfer = &qca->spi_xfer1;
- transfer->tx_buf = &tx_data[0];
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- spi_sync(qca->spi_dev, msg);
- } else {
- msg = &qca->spi_msg2;
- transfer = &qca->spi_xfer2[0];
- transfer->tx_buf = &tx_data[0];
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- transfer = &qca->spi_xfer2[1];
+ spi_sync(qca->spi_dev, &msg);
+ spi_message_init(&msg);
}
- transfer->tx_buf = &tx_data[1];
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- ret = spi_sync(qca->spi_dev, msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
if (!ret)
- ret = msg->status;
+ ret = msg.status;
if (ret)
qcaspi_spi_error(qca);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 206f0266463e..66b775d462fd 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -99,22 +99,24 @@ static u32
qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
{
__be16 cmd;
- struct spi_message *msg = &qca->spi_msg2;
- struct spi_transfer *transfer = &qca->spi_xfer2[0];
+ struct spi_message msg;
+ struct spi_transfer transfer[2];
int ret;
+ memset(&transfer, 0, sizeof(transfer));
+ spi_message_init(&msg);
+
cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
- transfer->tx_buf = &cmd;
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- transfer = &qca->spi_xfer2[1];
- transfer->tx_buf = src;
- transfer->rx_buf = NULL;
- transfer->len = len;
+ transfer[0].tx_buf = &cmd;
+ transfer[0].len = QCASPI_CMD_LEN;
+ transfer[1].tx_buf = src;
+ transfer[1].len = len;
- ret = spi_sync(qca->spi_dev, msg);
+ spi_message_add_tail(&transfer[0], &msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
- if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
+ if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
qcaspi_spi_error(qca);
return 0;
}
@@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
static u32
qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
{
- struct spi_message *msg = &qca->spi_msg1;
- struct spi_transfer *transfer = &qca->spi_xfer1;
+ struct spi_message msg;
+ struct spi_transfer transfer;
int ret;
- transfer->tx_buf = src;
- transfer->rx_buf = NULL;
- transfer->len = len;
+ memset(&transfer, 0, sizeof(transfer));
+ spi_message_init(&msg);
+
+ transfer.tx_buf = src;
+ transfer.len = len;
- ret = spi_sync(qca->spi_dev, msg);
+ spi_message_add_tail(&transfer, &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
- if (ret || (msg->actual_length != len)) {
+ if (ret || (msg.actual_length != len)) {
qcaspi_spi_error(qca);
return 0;
}
@@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
static u32
qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
{
- struct spi_message *msg = &qca->spi_msg2;
+ struct spi_message msg;
__be16 cmd;
- struct spi_transfer *transfer = &qca->spi_xfer2[0];
+ struct spi_transfer transfer[2];
int ret;
+ memset(&transfer, 0, sizeof(transfer));
+ spi_message_init(&msg);
+
cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
- transfer->tx_buf = &cmd;
- transfer->rx_buf = NULL;
- transfer->len = QCASPI_CMD_LEN;
- transfer = &qca->spi_xfer2[1];
- transfer->tx_buf = NULL;
- transfer->rx_buf = dst;
- transfer->len = len;
+ transfer[0].tx_buf = &cmd;
+ transfer[0].len = QCASPI_CMD_LEN;
+ transfer[1].rx_buf = dst;
+ transfer[1].len = len;
- ret = spi_sync(qca->spi_dev, msg);
+ spi_message_add_tail(&transfer[0], &msg);
+ spi_message_add_tail(&transfer[1], &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
- if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
+ if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
qcaspi_spi_error(qca);
return 0;
}
@@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
static u32
qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
{
- struct spi_message *msg = &qca->spi_msg1;
- struct spi_transfer *transfer = &qca->spi_xfer1;
+ struct spi_message msg;
+ struct spi_transfer transfer;
int ret;
- transfer->tx_buf = NULL;
- transfer->rx_buf = dst;
- transfer->len = len;
+ memset(&transfer, 0, sizeof(transfer));
+ spi_message_init(&msg);
- ret = spi_sync(qca->spi_dev, msg);
+ transfer.rx_buf = dst;
+ transfer.len = len;
- if (ret || (msg->actual_length != len)) {
+ spi_message_add_tail(&transfer, &msg);
+ ret = spi_sync(qca->spi_dev, &msg);
+
+ if (ret || (msg.actual_length != len)) {
qcaspi_spi_error(qca);
return 0;
}
@@ -195,19 +205,23 @@ static int
qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
{
__be16 tx_data;
- struct spi_message *msg = &qca->spi_msg1;
- struct spi_transfer *transfer = &qca->spi_xfer1;
+ struct spi_message msg;
+ struct spi_transfer transfer;
int ret;
+ memset(&transfer, 0, sizeof(transfer));
+
+ spi_message_init(&msg);
+
tx_data = cpu_to_be16(cmd);
- transfer->len = sizeof(tx_data);
- transfer->tx_buf = &tx_data;
- transfer->rx_buf = NULL;
+ transfer.len = sizeof(cmd);
+ transfer.tx_buf = &tx_data;
+ spi_message_add_tail(&transfer, &msg);
- ret = spi_sync(qca->spi_dev, msg);
+ ret = spi_sync(qca->spi_dev, &msg);
if (!ret)
- ret = msg->status;
+ ret = msg.status;
if (ret)
qcaspi_spi_error(qca);
@@ -835,16 +849,6 @@ qcaspi_netdev_setup(struct net_device *dev)
qca = netdev_priv(dev);
memset(qca, 0, sizeof(struct qcaspi));
- memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer));
- memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2);
-
- spi_message_init(&qca->spi_msg1);
- spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1);
-
- spi_message_init(&qca->spi_msg2);
- spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2);
- spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2);
-
memset(&qca->txr, 0, sizeof(qca->txr));
qca->txr.count = TX_RING_MAX_LEN;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index fc4beb1b32d1..fc0e98726b36 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -83,11 +83,6 @@ struct qcaspi {
struct tx_ring txr;
struct qcaspi_stats stats;
- struct spi_message spi_msg1;
- struct spi_message spi_msg2;
- struct spi_transfer spi_xfer1;
- struct spi_transfer spi_xfer2[2];
-
u8 *rx_buffer;
u32 buffer_size;
u8 sync;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 7fd86d40a337..11167abe5934 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -113,7 +113,7 @@ rmnet_map_ingress_handler(struct sk_buff *skb,
struct sk_buff *skbn;
if (skb->dev->type == ARPHRD_ETHER) {
- if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) {
+ if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
kfree_skb(skb);
return;
}
@@ -147,7 +147,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
}
if (skb_headroom(skb) < required_headroom) {
- if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL))
+ if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
return -ENOMEM;
}
@@ -189,6 +189,9 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
if (!skb)
goto done;
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ return RX_HANDLER_PASS;
+
dev = skb->dev;
port = rmnet_get_port(dev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b08d51bf7a20..2c350099b83c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -13,6 +13,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
@@ -631,7 +632,7 @@ struct rtl8169_tc_offsets {
};
enum rtl_flag {
- RTL_FLAG_TASK_ENABLED,
+ RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_MAX
@@ -665,6 +666,7 @@ struct rtl8169_private {
u16 event_slow;
const struct rtl_coalesce_info *coalesce_info;
+ struct clk *clk;
struct mdio_ops {
void (*write)(struct rtl8169_private *, int, int);
@@ -4069,6 +4071,14 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
phy_speed_up(dev->phydev);
genphy_soft_reset(dev->phydev);
+
+ /* It was reported that several chips end up with 10MBit/Half on a
+ * 1GBit link after resuming from S3. For whatever reason the PHY on
+ * these chips doesn't properly start a renegotiation when soft-reset.
+ * Explicitly requesting a renegotiation fixes this.
+ */
+ if (dev->phydev->autoneg == AUTONEG_ENABLE)
+ phy_restart_aneg(dev->phydev);
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4272,8 +4282,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
+ case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36:
+ case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_51:
@@ -4525,9 +4535,14 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
{
- /* Set DMA burst size and Interframe Gap Time */
- RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
- (InterFrameGap << TxInterFrameGapShift));
+ u32 val = TX_DMA_BURST << TxDMAShift |
+ InterFrameGap << TxInterFrameGapShift;
+
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_39)
+ val |= TXCFG_AUTO_FIFO;
+
+ RTL_W32(tp, TxConfig, val);
}
static void rtl_set_rx_max_size(struct rtl8169_private *tp)
@@ -4634,13 +4649,13 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_max_size(tp);
rtl_set_rx_tx_desc_registers(tp);
- rtl_set_tx_config_registers(tp);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R8(tp, IntrMask);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
+ rtl_set_tx_config_registers(tp);
rtl_set_rx_mode(tp->dev);
/* no early-rx interrupts */
@@ -4775,12 +4790,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
{
if (enable) {
- RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
} else {
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
}
+
+ udelay(10);
}
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
@@ -5020,7 +5037,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_disable_clock_request(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
/* Adjust EEE LED frequency */
@@ -5054,7 +5070,6 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_disable_clock_request(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
@@ -5099,8 +5114,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
{
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5198,8 +5211,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_hw_aspm_clkreq_enable(tp, false);
rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5282,8 +5293,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
{
rtl8168ep_stop_cmac(tp);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
-
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC);
@@ -5605,7 +5614,6 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
- RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -5625,6 +5633,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
static void rtl_hw_start_8106(struct rtl8169_private *tp)
{
+ rtl_hw_aspm_clkreq_enable(tp, false);
+
/* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
@@ -5633,6 +5643,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
rtl_pcie_state_l2l3_enable(tp, false);
+ rtl_hw_aspm_clkreq_enable(tp, true);
}
static void rtl_hw_start_8101(struct rtl8169_private *tp)
@@ -6538,17 +6549,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
struct net_device *dev = tp->dev;
u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
- int work_done= 0;
+ int work_done;
u16 status;
status = rtl_get_events(tp);
rtl_ack_events(tp, status & ~tp->event_slow);
- if (status & RTL_EVENT_NAPI_RX)
- work_done = rtl_rx(dev, tp, (u32) budget);
+ work_done = rtl_rx(dev, tp, (u32) budget);
- if (status & RTL_EVENT_NAPI_TX)
- rtl_tx(dev, tp);
+ rtl_tx(dev, tp);
if (status & tp->event_slow) {
enable_mask &= ~tp->event_slow;
@@ -6655,7 +6664,8 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_update_counters(tp);
rtl_lock_work(tp);
- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
rtl8169_down(dev);
rtl_unlock_work(tp);
@@ -6838,7 +6848,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
rtl_lock_work(tp);
napi_disable(&tp->napi);
- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
rtl_unlock_work(tp);
rtl_pll_power_down(tp);
@@ -6850,8 +6862,10 @@ static int rtl8169_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
rtl8169_net_suspend(dev);
+ clk_disable_unprepare(tp->clk);
return 0;
}
@@ -6879,6 +6893,9 @@ static int rtl8169_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
+ struct rtl8169_private *tp = netdev_priv(dev);
+
+ clk_prepare_enable(tp->clk);
if (netif_running(dev))
__rtl8169_resume(dev);
@@ -7074,20 +7091,12 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
{
unsigned int flags;
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06:
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
flags = PCI_IRQ_LEGACY;
- break;
- case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40:
- /* This version was reported to have issues with resume
- * from suspend when using MSI-X
- */
- flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
- break;
- default:
+ } else {
flags = PCI_IRQ_ALL_TYPES;
}
@@ -7254,6 +7263,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
}
}
+static void rtl_disable_clk(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
@@ -7274,6 +7288,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
tp->supports_gmii = cfg->has_gmii;
+ /* Get the *optional* external "ether_clk" used on some boards */
+ tp->clk = devm_clk_get(&pdev->dev, "ether_clk");
+ if (IS_ERR(tp->clk)) {
+ rc = PTR_ERR(tp->clk);
+ if (rc == -ENOENT) {
+ /* clk-core allows NULL (for suspend / resume) */
+ tp->clk = NULL;
+ } else if (rc == -EPROBE_DEFER) {
+ return rc;
+ } else {
+ dev_err(&pdev->dev, "failed to get clk: %d\n", rc);
+ return rc;
+ }
+ } else {
+ rc = clk_prepare_enable(tp->clk);
+ if (rc) {
+ dev_err(&pdev->dev, "failed to enable clk: %d\n", rc);
+ return rc;
+ }
+
+ rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk,
+ tp->clk);
+ if (rc)
+ return rc;
+ }
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index f3f7477043ce..bb0ebdfd4459 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Renesas device configuration
#
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index a05102a7df02..f21ab8c02af0 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Renesas device drivers.
#
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 1470fc12282b..9b6bf557a2f5 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -428,6 +428,7 @@ enum EIS_BIT {
EIS_CULF1 = 0x00000080,
EIS_TFFF = 0x00000100,
EIS_QFS = 0x00010000,
+ EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
};
/* RIC0 */
@@ -472,6 +473,7 @@ enum RIS0_BIT {
RIS0_FRF15 = 0x00008000,
RIS0_FRF16 = 0x00010000,
RIS0_FRF17 = 0x00020000,
+ RIS0_RESERVED = GENMASK(31, 18),
};
/* RIC1 */
@@ -528,6 +530,7 @@ enum RIS2_BIT {
RIS2_QFF16 = 0x00010000,
RIS2_QFF17 = 0x00020000,
RIS2_RFFF = 0x80000000,
+ RIS2_RESERVED = GENMASK(30, 18),
};
/* TIC */
@@ -544,6 +547,7 @@ enum TIS_BIT {
TIS_FTF1 = 0x00000002, /* Undocumented? */
TIS_TFUF = 0x00000100,
TIS_TFWF = 0x00000200,
+ TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
};
/* ISS */
@@ -617,6 +621,7 @@ enum GIC_BIT {
enum GIS_BIT {
GIS_PTCF = 0x00000001, /* Undocumented? */
GIS_PTMF = 0x00000004,
+ GIS_RESERVED = GENMASK(15, 10),
};
/* GIE (R-Car Gen3 only) */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index aff5516b781e..d6f753925352 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -739,10 +739,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
u32 eis, ris2;
eis = ravb_read(ndev, EIS);
- ravb_write(ndev, ~EIS_QFS, EIS);
+ ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+ RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
@@ -795,7 +796,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
u32 tis = ravb_read(ndev, TIS);
if (tis & TIS_TFUF) {
- ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
ravb_get_tx_tstamp(ndev);
return true;
}
@@ -930,7 +931,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Processing RX Descriptor Ring */
if (ris0 & mask) {
/* Clear RX interrupt */
- ravb_write(ndev, ~mask, RIS0);
+ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
if (ravb_rx(ndev, &quota, q))
goto out;
}
@@ -938,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
if (tis & mask) {
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
- ravb_write(ndev, ~mask, TIS);
+ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index eede70ec37f8..dce2a40a31e3 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -1,13 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/* PTP 1588 clock using the Renesas Ethernet AVB
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#include "ravb.h"
@@ -319,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
}
}
- ravb_write(ndev, ~gis, GIS);
+ ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
}
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index c5bc124b41a9..d1bb73bf9914 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -77,7 +77,8 @@ static void ether3_setmulticastlist(struct net_device *dev);
static int ether3_rx(struct net_device *dev, unsigned int maxcnt);
static void ether3_tx(struct net_device *dev);
static int ether3_open (struct net_device *dev);
-static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ether3_sendpacket(struct sk_buff *skb,
+ struct net_device *dev);
static irqreturn_t ether3_interrupt (int irq, void *dev_id);
static int ether3_close (struct net_device *dev);
static void ether3_setmulticastlist (struct net_device *dev);
@@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev)
/*
* Transmit a packet
*/
-static int
+static netdev_tx_t
ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 573691bc3b71..70cce63a6081 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev)
return 0;
}
-static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t
+sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sgiseeq_private *sp = netdev_priv(dev);
struct hpc3_ethregs *hregs = sp->hregs;
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 330233286e78..3d0dd39c289e 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2208,29 +2208,6 @@ static void efx_fini_napi(struct efx_nic *efx)
/**************************************************************************
*
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void efx_netpoll(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
-
- efx_for_each_channel(channel, efx)
- efx_schedule_channel(channel);
-}
-
-#endif
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2509,9 +2486,6 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
.ndo_get_phys_port_id = efx_get_phys_port_id,
.ndo_get_phys_port_name = efx_get_phys_port_name,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = efx_netpoll,
-#endif
.ndo_setup_tc = efx_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index dd5530a4f8c8..03e2455c502e 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2054,29 +2054,6 @@ static void ef4_fini_napi(struct ef4_nic *efx)
/**************************************************************************
*
- * Kernel netpoll interface
- *
- *************************************************************************/
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-
-/* Although in the common case interrupts will be disabled, this is not
- * guaranteed. However, all our work happens inside the NAPI callback,
- * so no locking is required.
- */
-static void ef4_netpoll(struct net_device *net_dev)
-{
- struct ef4_nic *efx = netdev_priv(net_dev);
- struct ef4_channel *channel;
-
- ef4_for_each_channel(channel, efx)
- ef4_schedule_channel(channel);
-}
-
-#endif
-
-/**************************************************************************
- *
* Kernel net device interface
*
*************************************************************************/
@@ -2250,9 +2227,6 @@ static const struct net_device_ops ef4_netdev_ops = {
.ndo_set_mac_address = ef4_set_mac_address,
.ndo_set_rx_mode = ef4_set_rx_mode,
.ndo_set_features = ef4_set_features,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = ef4_netpoll,
-#endif
.ndo_setup_tc = ef4_setup_tc,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = ef4_filter_rfs,
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 18d533fdf14c..3140999642ba 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -99,7 +99,7 @@ struct ioc3_private {
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void ioc3_set_multicast_list(struct net_device *dev);
-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
static void ioc3_timeout(struct net_device *dev);
static inline unsigned int ioc3_hash(const unsigned char *addr);
static inline void ioc3_stop(struct ioc3_private *ip);
@@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = {
.remove = ioc3_remove_one,
};
-static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long data;
struct ioc3_private *ip = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index ea55abd62ec7..703fbbefea44 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
/*
* Transmit a packet (called by the kernel)
*/
-static int meth_tx(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
{
struct meth_private *priv = netdev_priv(dev);
unsigned long flags;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 7aa5ebb6766c..4289ccb26e4e 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -735,8 +735,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
u16 idx = dring->tail;
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
- if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD))
+ if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
+ /* reading the register clears the irq */
+ netsec_read(priv, NETSEC_REG_NRM_RX_PKTCNT);
break;
+ }
/* This barrier is needed to keep us from reading
* any other fields out of the netsec_de until we have
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1854f270ad66..b1b305f8f414 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -258,10 +258,10 @@ struct stmmac_safety_stats {
#define MAX_DMA_RIWT 0xff
#define MIN_DMA_RIWT 0x20
/* Tx coalesce parameters */
-#define STMMAC_COAL_TX_TIMER 40000
+#define STMMAC_COAL_TX_TIMER 1000
#define STMMAC_MAX_COAL_TX_TICK 100000
#define STMMAC_TX_MAX_FRAMES 256
-#define STMMAC_TX_FRAMES 64
+#define STMMAC_TX_FRAMES 25
/* Packets types */
enum packets_types {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c0a855b7ab3b..63e1064b27a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -48,6 +48,8 @@ struct stmmac_tx_info {
/* Frequently used values are kept adjacent for cache effect */
struct stmmac_tx_queue {
+ u32 tx_count_frames;
+ struct timer_list txtimer;
u32 queue_index;
struct stmmac_priv *priv_data;
struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -73,7 +75,14 @@ struct stmmac_rx_queue {
u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
+};
+
+struct stmmac_channel {
struct napi_struct napi ____cacheline_aligned_in_smp;
+ struct stmmac_priv *priv_data;
+ u32 index;
+ int has_rx;
+ int has_tx;
};
struct stmmac_tc_entry {
@@ -109,14 +118,12 @@ struct stmmac_pps_cfg {
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
- u32 tx_count_frames;
u32 tx_coal_frames;
u32 tx_coal_timer;
int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
- struct timer_list txtimer;
bool tso;
unsigned int dma_buf_sz;
@@ -137,6 +144,9 @@ struct stmmac_priv {
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+ /* Generic channel for NAPI */
+ struct stmmac_channel channel[STMMAC_CH_MAX];
+
bool oldlink;
int speed;
int oldduplex;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9f458bb16f2a..75896d6ba6e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -148,12 +148,14 @@ static void stmmac_verify_args(void)
static void stmmac_disable_all_queues(struct stmmac_priv *priv)
{
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
- napi_disable(&rx_q->napi);
+ napi_disable(&ch->napi);
}
}
@@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
static void stmmac_enable_all_queues(struct stmmac_priv *priv)
{
u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
u32 queue;
- for (queue = 0; queue < rx_queues_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
- napi_enable(&rx_q->napi);
+ napi_enable(&ch->napi);
}
}
@@ -1843,18 +1847,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
* @queue: TX queue index
* Description: it reclaims the transmit resources after transmission completes.
*/
-static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
- unsigned int entry;
+ unsigned int entry, count = 0;
- netif_tx_lock(priv->dev);
+ __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
priv->xstats.tx_clean++;
entry = tx_q->dirty_tx;
- while (entry != tx_q->cur_tx) {
+ while ((entry != tx_q->cur_tx) && (count < budget)) {
struct sk_buff *skb = tx_q->tx_skbuff[entry];
struct dma_desc *p;
int status;
@@ -1870,6 +1874,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
if (unlikely(status & tx_dma_own))
break;
+ count++;
+
/* Make sure descriptor fields are read after reading
* the own bit.
*/
@@ -1937,7 +1943,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
}
- netif_tx_unlock(priv->dev);
+
+ __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+ return count;
}
/**
@@ -2020,6 +2029,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
return false;
}
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
+{
+ int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+ &priv->xstats, chan);
+ struct stmmac_channel *ch = &priv->channel[chan];
+ bool needs_work = false;
+
+ if ((status & handle_rx) && ch->has_rx) {
+ needs_work = true;
+ } else {
+ status &= ~handle_rx;
+ }
+
+ if ((status & handle_tx) && ch->has_tx) {
+ needs_work = true;
+ } else {
+ status &= ~handle_tx;
+ }
+
+ if (needs_work && napi_schedule_prep(&ch->napi)) {
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ __napi_schedule(&ch->napi);
+ }
+
+ return status;
+}
+
/**
* stmmac_dma_interrupt - DMA ISR
* @priv: driver private structure
@@ -2034,57 +2070,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
u32 channels_to_check = tx_channel_count > rx_channel_count ?
tx_channel_count : rx_channel_count;
u32 chan;
- bool poll_scheduled = false;
int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
/* Make sure we never check beyond our status buffer. */
if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
channels_to_check = ARRAY_SIZE(status);
- /* Each DMA channel can be used for rx and tx simultaneously, yet
- * napi_struct is embedded in struct stmmac_rx_queue rather than in a
- * stmmac_channel struct.
- * Because of this, stmmac_poll currently checks (and possibly wakes)
- * all tx queues rather than just a single tx queue.
- */
for (chan = 0; chan < channels_to_check; chan++)
- status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr,
- &priv->xstats, chan);
-
- for (chan = 0; chan < rx_channel_count; chan++) {
- if (likely(status[chan] & handle_rx)) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
-
- if (likely(napi_schedule_prep(&rx_q->napi))) {
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
- __napi_schedule(&rx_q->napi);
- poll_scheduled = true;
- }
- }
- }
-
- /* If we scheduled poll, we already know that tx queues will be checked.
- * If we didn't schedule poll, see if any DMA channel (used by tx) has a
- * completed transmission, if so, call stmmac_poll (once).
- */
- if (!poll_scheduled) {
- for (chan = 0; chan < tx_channel_count; chan++) {
- if (status[chan] & handle_tx) {
- /* It doesn't matter what rx queue we choose
- * here. We use 0 since it always exists.
- */
- struct stmmac_rx_queue *rx_q =
- &priv->rx_queue[0];
-
- if (likely(napi_schedule_prep(&rx_q->napi))) {
- stmmac_disable_dma_irq(priv,
- priv->ioaddr, chan);
- __napi_schedule(&rx_q->napi);
- }
- break;
- }
- }
- }
+ status[chan] = stmmac_napi_check(priv, chan);
for (chan = 0; chan < tx_channel_count; chan++) {
if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
@@ -2220,8 +2213,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
- tx_q->tx_tail_addr = tx_q->dma_tx_phy +
- (DMA_TX_SIZE * sizeof(struct dma_desc));
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
tx_q->tx_tail_addr, chan);
}
@@ -2233,6 +2225,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
return ret;
}
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
+}
+
/**
* stmmac_tx_timer - mitigation sw timer for tx.
* @data: data pointer
@@ -2241,13 +2240,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
*/
static void stmmac_tx_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = from_timer(priv, t, txtimer);
- u32 tx_queues_count = priv->plat->tx_queues_to_use;
- u32 queue;
+ struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ struct stmmac_channel *ch;
+
+ ch = &priv->channel[tx_q->queue_index];
- /* let's scan all the tx queues */
- for (queue = 0; queue < tx_queues_count; queue++)
- stmmac_tx_clean(priv, queue);
+ if (likely(napi_schedule_prep(&ch->napi)))
+ __napi_schedule(&ch->napi);
}
/**
@@ -2260,11 +2260,17 @@ static void stmmac_tx_timer(struct timer_list *t)
*/
static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
+ u32 chan;
+
priv->tx_coal_frames = STMMAC_TX_FRAMES;
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
- timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
- priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
- add_timer(&priv->txtimer);
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+ timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
+ }
}
static void stmmac_set_rings_length(struct stmmac_priv *priv)
@@ -2592,6 +2598,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
static int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
int ret;
stmmac_check_ether_addr(priv);
@@ -2688,7 +2695,9 @@ irq_error:
if (dev->phydev)
phy_stop(dev->phydev);
- del_timer_sync(&priv->txtimer);
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ del_timer_sync(&priv->tx_queue[chan].txtimer);
+
stmmac_hw_teardown(dev);
init_error:
free_dma_desc_resources(priv);
@@ -2708,6 +2717,7 @@ dma_desc_error:
static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
if (priv->eee_enabled)
del_timer_sync(&priv->eee_ctrl_timer);
@@ -2722,7 +2732,8 @@ static int stmmac_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
- del_timer_sync(&priv->txtimer);
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ del_timer_sync(&priv->tx_queue[chan].txtimer);
/* Free the IRQ lines */
free_irq(dev->irq, dev);
@@ -2936,14 +2947,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_tso_nfrags += nfrags;
/* Manage tx mitigation */
- priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
- mod_timer(&priv->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer));
- } else {
- priv->tx_count_frames = 0;
+ tx_q->tx_count_frames += nfrags + 1;
+ if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ tx_q->tx_count_frames = 0;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
@@ -2992,6 +3002,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK;
@@ -3146,14 +3157,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* This approach takes care about the fragments: desc is the first
* element in case of no SG.
*/
- priv->tx_count_frames += nfrags + 1;
- if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
- mod_timer(&priv->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer));
- } else {
- priv->tx_count_frames = 0;
+ tx_q->tx_count_frames += nfrags + 1;
+ if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
+ tx_q->tx_count_frames = 0;
+ } else {
+ stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
@@ -3199,6 +3209,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
return NETDEV_TX_OK;
@@ -3319,6 +3331,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
unsigned int entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
unsigned int next_entry;
@@ -3491,7 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
- napi_gro_receive(&rx_q->napi, skb);
+ napi_gro_receive(&ch->napi, skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
@@ -3514,27 +3527,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* Description :
* To look at the incoming frames and clear the tx resources.
*/
-static int stmmac_poll(struct napi_struct *napi, int budget)
+static int stmmac_napi_poll(struct napi_struct *napi, int budget)
{
- struct stmmac_rx_queue *rx_q =
- container_of(napi, struct stmmac_rx_queue, napi);
- struct stmmac_priv *priv = rx_q->priv_data;
- u32 tx_count = priv->plat->tx_queues_to_use;
- u32 chan = rx_q->queue_index;
- int work_done = 0;
- u32 queue;
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ int work_done = 0, work_rem = budget;
+ u32 chan = ch->index;
priv->xstats.napi_poll++;
- /* check all the queues */
- for (queue = 0; queue < tx_count; queue++)
- stmmac_tx_clean(priv, queue);
+ if (ch->has_tx) {
+ int done = stmmac_tx_clean(priv, work_rem, chan);
- work_done = stmmac_rx(priv, budget, rx_q->queue_index);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ work_done += done;
+ work_rem -= done;
+ }
+
+ if (ch->has_rx) {
+ int done = stmmac_rx(priv, work_rem, chan);
+
+ work_done += done;
+ work_rem -= done;
}
+
+ if (work_done < budget && napi_complete_done(napi, work_done))
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+
return work_done;
}
@@ -4198,8 +4217,8 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
+ u32 queue, maxq;
int ret = 0;
- u32 queue;
ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
MTL_MAX_TX_QUEUES,
@@ -4322,11 +4341,22 @@ int stmmac_dvr_probe(struct device *device,
"Enable RX Mitigation via HW Watchdog Timer\n");
}
- for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ /* Setup channels NAPI */
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
- netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
- (8 * priv->plat->rx_queues_to_use));
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ ch->priv_data = priv;
+ ch->index = queue;
+
+ if (queue < priv->plat->rx_queues_to_use)
+ ch->has_rx = true;
+ if (queue < priv->plat->tx_queues_to_use)
+ ch->has_tx = true;
+
+ netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
+ NAPI_POLL_WEIGHT);
}
mutex_init(&priv->lock);
@@ -4372,10 +4402,10 @@ error_netdev_register:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
- for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
- netif_napi_del(&rx_q->napi);
+ netif_napi_del(&ch->napi);
}
error_hw_init:
destroy_workqueue(priv->wq);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3609c7b696c7..2b800ce1d5bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
* Description:
* This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
* logic. This function validates a valid, supported configuration is
* selected, and defaults to 1 Unicast address if an unsupported
* configuration is selected.
@@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
int x = ucast_entries;
switch (x) {
- case 1:
- case 32:
+ case 1 ... 32:
case 64:
case 128:
break;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 9263d638bd6d..f932923f7d56 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO
config TI_DAVINCI_CPDMA
tristate "TI DaVinci CPDMA Support"
depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+ select GENERIC_ALLOCATOR
---help---
This driver supports TI's DaVinci CPDMA dma engine.
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 2bdfb39215e9..d8ba512f166a 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work)
w5100_tx_skb(priv->ndev, skb);
}
-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 56ae573001e8..80fdbff67d82 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
netif_wake_queue(ndev);
}
-static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
{
struct w5300_priv *priv = netdev_priv(ndev);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 6acb6b5718b9..493cd382b8aa 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -830,12 +830,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(rt))
return PTR_ERR(rt);
- if (skb_dst(skb)) {
- int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN -
- info->options_len;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, &rt->dst,
+ GENEVE_IPV4_HLEN + info->options_len);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
@@ -876,11 +872,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (IS_ERR(dst))
return PTR_ERR(dst);
- if (skb_dst(skb)) {
- int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len);
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
if (geneve->collect_md) {
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 16ec7af6ab7b..ba9df430fca6 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -966,6 +966,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
+ if (ym->cmd != SIOCYAMSMCS)
+ return -EINVAL;
if (ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
@@ -981,6 +983,8 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (copy_from_user(&yi, ifr->ifr_data, sizeof(struct yamdrv_ioctl_cfg)))
return -EFAULT;
+ if (yi.cmd != SIOCYAMSCFG)
+ return -EINVAL;
if ((yi.cfg.mask & YAM_IOBASE) && netif_running(dev))
return -EINVAL; /* Cannot change this parameter when up */
if ((yi.cfg.mask & YAM_IRQ) && netif_running(dev))
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 31c3d77b4733..fe01e141c8f8 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1203,6 +1203,9 @@ static void netvsc_send_vf(struct net_device *ndev,
net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
+ netdev_info(ndev, "VF slot %u %s\n",
+ net_device_ctx->vf_serial,
+ net_device_ctx->vf_alloc ? "added" : "removed");
}
static void netvsc_receive_inband(struct net_device *ndev,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 70921bbe0e28..3af6d8d15233 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1894,20 +1894,6 @@ out_unlock:
rtnl_unlock();
}
-static struct net_device *get_netvsc_bymac(const u8 *mac)
-{
- struct net_device_context *ndev_ctx;
-
- list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
- struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
-
- if (ether_addr_equal(mac, dev->perm_addr))
- return dev;
- }
-
- return NULL;
-}
-
static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
{
struct net_device_context *net_device_ctx;
@@ -2036,26 +2022,48 @@ static void netvsc_vf_setup(struct work_struct *w)
rtnl_unlock();
}
+/* Find netvsc by VMBus serial number.
+ * The PCI hyperv controller records the serial number as the slot.
+ */
+static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
+{
+ struct device *parent = vf_netdev->dev.parent;
+ struct net_device_context *ndev_ctx;
+ struct pci_dev *pdev;
+
+ if (!parent || !dev_is_pci(parent))
+ return NULL; /* not a PCI device */
+
+ pdev = to_pci_dev(parent);
+ if (!pdev->slot) {
+ netdev_notice(vf_netdev, "no PCI slot information\n");
+ return NULL;
+ }
+
+ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
+ if (!ndev_ctx->vf_alloc)
+ continue;
+
+ if (ndev_ctx->vf_serial == pdev->slot->number)
+ return hv_get_drvdata(ndev_ctx->device_ctx);
+ }
+
+ netdev_notice(vf_netdev,
+ "no netdev found for slot %u\n", pdev->slot->number);
+ return NULL;
+}
+
static int netvsc_register_vf(struct net_device *vf_netdev)
{
- struct net_device *ndev;
struct net_device_context *net_device_ctx;
- struct device *pdev = vf_netdev->dev.parent;
struct netvsc_device *netvsc_dev;
+ struct net_device *ndev;
int ret;
if (vf_netdev->addr_len != ETH_ALEN)
return NOTIFY_DONE;
- if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
- return NOTIFY_DONE;
-
- /*
- * We will use the MAC address to locate the synthetic interface to
- * associate with the VF interface. If we don't find a matching
- * synthetic interface, move on.
- */
- ndev = get_netvsc_bymac(vf_netdev->perm_addr);
+ ndev = get_netvsc_byslot(vf_netdev);
if (!ndev)
return NOTIFY_DONE;
@@ -2272,17 +2280,15 @@ static int netvsc_remove(struct hv_device *dev)
cancel_delayed_work_sync(&ndev_ctx->dwork);
- rcu_read_lock();
- nvdev = rcu_dereference(ndev_ctx->nvdev);
-
- if (nvdev)
+ rtnl_lock();
+ nvdev = rtnl_dereference(ndev_ctx->nvdev);
+ if (nvdev)
cancel_work_sync(&nvdev->subchan_work);
/*
* Call to the vsc driver to let it know that the device is being
* removed. Also blocks mtu and channel changes.
*/
- rtnl_lock();
vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
if (vf_netdev)
netvsc_unregister_vf(vf_netdev);
@@ -2294,7 +2300,6 @@ static int netvsc_remove(struct hv_device *dev)
list_del(&ndev_ctx->list);
rtnl_unlock();
- rcu_read_unlock();
hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 23a52b9293f3..cd1d8faccca5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1308,8 +1308,7 @@ static int adf7242_remove(struct spi_device *spi)
{
struct adf7242_local *lp = spi_get_drvdata(spi);
- if (!IS_ERR_OR_NULL(lp->debugfs_root))
- debugfs_remove_recursive(lp->debugfs_root);
+ debugfs_remove_recursive(lp->debugfs_root);
cancel_delayed_work_sync(&lp->work);
destroy_workqueue(lp->wqueue);
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 58299fb666ed..0ff5a403a8dc 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -634,10 +634,9 @@ static int ca8210_test_int_driver_write(
for (i = 0; i < len; i++)
dev_dbg(&priv->spi->dev, "%#03x\n", buf[i]);
- fifo_buffer = kmalloc(len, GFP_KERNEL);
+ fifo_buffer = kmemdup(buf, len, GFP_KERNEL);
if (!fifo_buffer)
return -ENOMEM;
- memcpy(fifo_buffer, buf, len);
kfifo_in(&test->up_fifo, &fifo_buffer, 4);
wake_up_interruptible(&priv->test.readq);
@@ -3044,8 +3043,7 @@ static void ca8210_test_interface_clear(struct ca8210_priv *priv)
{
struct ca8210_test *test = &priv->test;
- if (!IS_ERR(test->ca8210_dfs_spi_int))
- debugfs_remove(test->ca8210_dfs_spi_int);
+ debugfs_remove(test->ca8210_dfs_spi_int);
kfifo_free(&test->up_fifo);
dev_info(&priv->spi->dev, "Test interface removed\n");
}
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index e428277781ac..04891429a554 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -903,19 +903,19 @@ mcr20a_irq_clean_complete(void *context)
switch (seq_state) {
/* TX IRQ, RX IRQ and SEQ IRQ */
- case (0x03):
+ case (DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
if (lp->is_tx) {
lp->is_tx = 0;
dev_dbg(printdev(lp), "TX is done. No ACK\n");
mcr20a_handle_tx_complete(lp);
}
break;
- case (0x05):
+ case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
/* rx is starting */
dev_dbg(printdev(lp), "RX is starting\n");
mcr20a_handle_rx(lp);
break;
- case (0x07):
+ case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
if (lp->is_tx) {
/* tx is done */
lp->is_tx = 0;
@@ -927,7 +927,7 @@ mcr20a_irq_clean_complete(void *context)
mcr20a_handle_rx(lp);
}
break;
- case (0x01):
+ case (DAR_IRQSTS1_SEQIRQ):
if (lp->is_tx) {
dev_dbg(printdev(lp), "TX is starting\n");
mcr20a_handle_tx(lp);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index db1172db1e7c..19ab8a7d1e48 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -93,7 +93,12 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!netdev)
return !phydev->suspended;
- /* Don't suspend PHY if the attached netdev parent may wakeup.
+ if (netdev->wol_enabled)
+ return false;
+
+ /* As long as not all affected network drivers support the
+ * wol_enabled flag, let's check for hints that WoL is enabled.
+ * Don't suspend PHY if the attached netdev parent may wake up.
* The parent may point to a PCI device, as in tg3 driver.
*/
if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent))
@@ -1132,9 +1137,9 @@ void phy_detach(struct phy_device *phydev)
sysfs_remove_link(&dev->dev.kobj, "phydev");
sysfs_remove_link(&phydev->mdio.dev.kobj, "attached_dev");
}
+ phy_suspend(phydev);
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
- phy_suspend(phydev);
phydev->phylink = NULL;
phy_led_triggers_unregister(phydev);
@@ -1168,12 +1173,13 @@ EXPORT_SYMBOL(phy_detach);
int phy_suspend(struct phy_device *phydev)
{
struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
+ struct net_device *netdev = phydev->attached_dev;
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
int ret = 0;
/* If the device has WOL enabled, we cannot suspend the PHY */
phy_ethtool_get_wol(phydev, &wol);
- if (wol.wolopts)
+ if (wol.wolopts || (netdev && netdev->wol_enabled))
return -EBUSY;
if (phydev->drv && phydrv->suspend)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 3ba5cf2a8a5f..7abca86c3aa9 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -717,6 +717,30 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
return 0;
}
+static int __phylink_connect_phy(struct phylink *pl, struct phy_device *phy,
+ phy_interface_t interface)
+{
+ int ret;
+
+ if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
+ (pl->link_an_mode == MLO_AN_INBAND &&
+ phy_interface_mode_is_8023z(interface))))
+ return -EINVAL;
+
+ if (pl->phydev)
+ return -EBUSY;
+
+ ret = phy_attach_direct(pl->netdev, phy, 0, interface);
+ if (ret)
+ return ret;
+
+ ret = phylink_bringup_phy(pl, phy);
+ if (ret)
+ phy_detach(phy);
+
+ return ret;
+}
+
/**
* phylink_connect_phy() - connect a PHY to the phylink instance
* @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -734,31 +758,13 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
*/
int phylink_connect_phy(struct phylink *pl, struct phy_device *phy)
{
- int ret;
-
- if (WARN_ON(pl->link_an_mode == MLO_AN_FIXED ||
- (pl->link_an_mode == MLO_AN_INBAND &&
- phy_interface_mode_is_8023z(pl->link_interface))))
- return -EINVAL;
-
- if (pl->phydev)
- return -EBUSY;
-
/* Use PHY device/driver interface */
if (pl->link_interface == PHY_INTERFACE_MODE_NA) {
pl->link_interface = phy->interface;
pl->link_config.interface = pl->link_interface;
}
- ret = phy_attach_direct(pl->netdev, phy, 0, pl->link_interface);
- if (ret)
- return ret;
-
- ret = phylink_bringup_phy(pl, phy);
- if (ret)
- phy_detach(phy);
-
- return ret;
+ return __phylink_connect_phy(pl, phy, pl->link_interface);
}
EXPORT_SYMBOL_GPL(phylink_connect_phy);
@@ -1672,7 +1678,9 @@ static void phylink_sfp_link_up(void *upstream)
static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy)
{
- return phylink_connect_phy(upstream, phy);
+ struct phylink *pl = upstream;
+
+ return __phylink_connect_phy(upstream, phy, pl->link_config.interface);
}
static void phylink_sfp_disconnect_phy(void *upstream)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 740655261e5b..83060fb349f4 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
}
if (bus->started)
bus->socket_ops->start(bus->sfp);
+ bus->netdev->sfp_bus = bus;
bus->registered = true;
return 0;
}
@@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
{
const struct sfp_upstream_ops *ops = bus->upstream_ops;
+ bus->netdev->sfp_bus = NULL;
if (bus->registered) {
if (bus->started)
bus->socket_ops->stop(bus->sfp);
@@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
{
bus->upstream_ops = NULL;
bus->upstream = NULL;
- bus->netdev->sfp_bus = NULL;
bus->netdev = NULL;
}
@@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
bus->upstream_ops = ops;
bus->upstream = upstream;
bus->netdev = ndev;
- ndev->sfp_bus = bus;
if (bus->sfp) {
ret = sfp_register_bus(bus);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 52fffb98fde9..fd8bb998ae52 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -163,8 +163,6 @@ static const enum gpiod_flags gpio_flags[] = {
/* Give this long for the PHY to reset. */
#define T_PHY_RESET_MS 50
-static DEFINE_MUTEX(sfp_mutex);
-
struct sff_data {
unsigned int gpios;
bool (*module_supported)(const struct sfp_eeprom_id *id);
@@ -1098,8 +1096,11 @@ static int sfp_hwmon_insert(struct sfp *sfp)
static void sfp_hwmon_remove(struct sfp *sfp)
{
- hwmon_device_unregister(sfp->hwmon_dev);
- kfree(sfp->hwmon_name);
+ if (!IS_ERR_OR_NULL(sfp->hwmon_dev)) {
+ hwmon_device_unregister(sfp->hwmon_dev);
+ sfp->hwmon_dev = NULL;
+ kfree(sfp->hwmon_name);
+ }
}
#else
static int sfp_hwmon_insert(struct sfp *sfp)
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index ce61231e96ea..62dc564b251d 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb)
goto out;
+ if (skb_mac_header_len(skb) < ETH_HLEN)
+ goto drop;
+
if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
goto drop;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6a047d30e8c6..d887016e54b6 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1167,6 +1167,12 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EBUSY;
}
+ if (dev == port_dev) {
+ NL_SET_ERR_MSG(extack, "Cannot enslave team device to itself");
+ netdev_err(dev, "Cannot enslave team device to itself\n");
+ return -EINVAL;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ebd07ad82431..50e9cc19023a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -181,6 +181,7 @@ struct tun_file {
};
struct napi_struct napi;
bool napi_enabled;
+ bool napi_frags_enabled;
struct mutex napi_mutex; /* Protects access to the above napi */
struct list_head next;
struct tun_struct *detached;
@@ -313,32 +314,32 @@ static int tun_napi_poll(struct napi_struct *napi, int budget)
}
static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
- bool napi_en)
+ bool napi_en, bool napi_frags)
{
tfile->napi_enabled = napi_en;
+ tfile->napi_frags_enabled = napi_en && napi_frags;
if (napi_en) {
netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
NAPI_POLL_WEIGHT);
napi_enable(&tfile->napi);
- mutex_init(&tfile->napi_mutex);
}
}
-static void tun_napi_disable(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_disable(struct tun_file *tfile)
{
if (tfile->napi_enabled)
napi_disable(&tfile->napi);
}
-static void tun_napi_del(struct tun_struct *tun, struct tun_file *tfile)
+static void tun_napi_del(struct tun_file *tfile)
{
if (tfile->napi_enabled)
netif_napi_del(&tfile->napi);
}
-static bool tun_napi_frags_enabled(const struct tun_struct *tun)
+static bool tun_napi_frags_enabled(const struct tun_file *tfile)
{
- return READ_ONCE(tun->flags) & IFF_NAPI_FRAGS;
+ return tfile->napi_frags_enabled;
}
#ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -690,8 +691,8 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
tun = rtnl_dereference(tfile->tun);
if (tun && clean) {
- tun_napi_disable(tun, tfile);
- tun_napi_del(tun, tfile);
+ tun_napi_disable(tfile);
+ tun_napi_del(tfile);
}
if (tun && !tfile->detached) {
@@ -758,7 +759,7 @@ static void tun_detach_all(struct net_device *dev)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
- tun_napi_disable(tun, tfile);
+ tun_napi_disable(tfile);
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
@@ -774,7 +775,7 @@ static void tun_detach_all(struct net_device *dev)
synchronize_net();
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- tun_napi_del(tun, tfile);
+ tun_napi_del(tfile);
/* Drop read queue */
tun_queue_purge(tfile);
xdp_rxq_info_unreg(&tfile->xdp_rxq);
@@ -793,7 +794,7 @@ static void tun_detach_all(struct net_device *dev)
}
static int tun_attach(struct tun_struct *tun, struct file *file,
- bool skip_filter, bool napi)
+ bool skip_filter, bool napi, bool napi_frags)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
@@ -866,7 +867,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
tun_enable_queue(tfile);
} else {
sock_hold(&tfile->sk);
- tun_napi_init(tun, tfile, napi);
+ tun_napi_init(tun, tfile, napi, napi_frags);
}
tun_set_real_num_queues(tun);
@@ -1153,43 +1154,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev,
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void tun_poll_controller(struct net_device *dev)
-{
- /*
- * Tun only receives frames when:
- * 1) the char device endpoint gets data from user space
- * 2) the tun socket gets a sendmsg call from user space
- * If NAPI is not enabled, since both of those are synchronous
- * operations, we are guaranteed never to have pending data when we poll
- * for it so there is nothing to do here but return.
- * We need this though so netpoll recognizes us as an interface that
- * supports polling, which enables bridge devices in virt setups to
- * still use netconsole
- * If NAPI is enabled, however, we need to schedule polling for all
- * queues unless we are using napi_gro_frags(), which we call in
- * process context and not in NAPI context.
- */
- struct tun_struct *tun = netdev_priv(dev);
-
- if (tun->flags & IFF_NAPI) {
- struct tun_file *tfile;
- int i;
-
- if (tun_napi_frags_enabled(tun))
- return;
-
- rcu_read_lock();
- for (i = 0; i < tun->numqueues; i++) {
- tfile = rcu_dereference(tun->tfiles[i]);
- if (tfile->napi_enabled)
- napi_schedule(&tfile->napi);
- }
- rcu_read_unlock();
- }
- return;
-}
-#endif
static void tun_set_headroom(struct net_device *dev, int new_hr)
{
@@ -1283,9 +1247,6 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_start_xmit = tun_net_xmit,
.ndo_fix_features = tun_net_fix_features,
.ndo_select_queue = tun_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = tun_poll_controller,
-#endif
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
};
@@ -1365,9 +1326,6 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_select_queue = tun_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = tun_poll_controller,
-#endif
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
@@ -1752,7 +1710,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
int err;
u32 rxhash = 0;
int skb_xdp = 1;
- bool frags = tun_napi_frags_enabled(tun);
+ bool frags = tun_napi_frags_enabled(tfile);
if (!(tun->dev->flags & IFF_UP))
return -EIO;
@@ -2577,7 +2535,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return err;
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
- ifr->ifr_flags & IFF_NAPI);
+ ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS);
if (err < 0)
return err;
@@ -2675,7 +2634,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
(ifr->ifr_flags & TUN_FEATURES);
INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI);
+ err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
+ ifr->ifr_flags & IFF_NAPI_FRAGS);
if (err < 0)
goto err_free_flow;
@@ -2824,7 +2784,8 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
- ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI);
+ ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
+ tun->flags & IFF_NAPI_FRAGS);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
@@ -3242,6 +3203,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
return -ENOMEM;
}
+ mutex_init(&tfile->napi_mutex);
RCU_INIT_POINTER(tfile->tun, NULL);
tfile->flags = 0;
tfile->ifindex = 0;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index e95dd12edec4..023b8d0bf175 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -607,6 +607,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 9e8ad372f419..2207f7a7d1ff 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_MODE_RWLC;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a9991c5f4736..c3c9ba44e2a1 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1401,19 +1401,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
if (ret < 0)
return ret;
- pdata->wol = 0;
- if (wol->wolopts & WAKE_UCAST)
- pdata->wol |= WAKE_UCAST;
- if (wol->wolopts & WAKE_MCAST)
- pdata->wol |= WAKE_MCAST;
- if (wol->wolopts & WAKE_BCAST)
- pdata->wol |= WAKE_BCAST;
- if (wol->wolopts & WAKE_MAGIC)
- pdata->wol |= WAKE_MAGIC;
- if (wol->wolopts & WAKE_PHY)
- pdata->wol |= WAKE_PHY;
- if (wol->wolopts & WAKE_ARP)
- pdata->wol |= WAKE_ARP;
+ if (wol->wolopts & ~WAKE_ALL)
+ return -EINVAL;
+
+ pdata->wol = wol->wolopts;
device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cb0cc30c3d6a..72a55b6b4211 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -967,6 +967,13 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* Quectel EP06/EG06/EM06 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC,
+ 0xff),
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
+ },
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1206,13 +1213,13 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
{QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
{QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
- {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
- {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
- {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
- {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
+ {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -1234,6 +1241,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
@@ -1255,7 +1263,6 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
- {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
@@ -1331,6 +1338,19 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
return false;
}
+static bool quectel_ep06_diag_detected(struct usb_interface *intf)
+{
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
+
+ if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
+ intf_desc.bNumEndpoints == 2)
+ return true;
+
+ return false;
+}
+
static int qmi_wwan_probe(struct usb_interface *intf,
const struct usb_device_id *prod)
{
@@ -1365,6 +1385,15 @@ static int qmi_wwan_probe(struct usb_interface *intf,
return -ENODEV;
}
+ /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
+ * we need to match on class/subclass/protocol. These values are
+ * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
+ * different. Ignore the current interface if the number of endpoints
+ * the number for the diag interface (two).
+ */
+ if (quectel_ep06_diag_detected(intf))
+ return -ENODEV;
+
return usbnet_probe(intf, id);
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2cd71bdb6484..f1b5201cc320 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -4506,6 +4506,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (!rtl_can_wakeup(tp))
return -EOPNOTSUPP;
+ if (wol->wolopts & ~WAKE_ANY)
+ return -EINVAL;
+
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out_set_wol;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 05553d252446..ec287c9741e8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -731,6 +731,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
int ret;
+ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+ return -EINVAL;
+
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
@@ -1517,6 +1520,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
if (pdata) {
+ cancel_work_sync(&pdata->set_multicast);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 06b4d290784d..262e7a3c23cb 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -774,6 +774,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
int ret;
+ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+ return -EINVAL;
+
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 9277a0f228df..35f39f23d881 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= SR_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8d679c8b7f25..41a00cd76955 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -463,6 +463,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
int mac_len, delta, off;
struct xdp_buff xdp;
+ skb_orphan(skb);
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (unlikely(!xdp_prog)) {
@@ -508,8 +510,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
skb_copy_header(nskb, skb);
head_off = skb_headroom(nskb) - skb_headroom(skb);
skb_headers_offset_update(nskb, head_off);
- if (skb->sk)
- skb_set_owner_w(nskb, skb->sk);
consume_skb(skb);
skb = nskb;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 765920905226..ddfa3f24204c 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1699,17 +1699,6 @@ static void virtnet_stats(struct net_device *dev,
tot->rx_frame_errors = dev->stats.rx_frame_errors;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void virtnet_netpoll(struct net_device *dev)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- int i;
-
- for (i = 0; i < vi->curr_queue_pairs; i++)
- napi_schedule(&vi->rq[i].napi);
-}
-#endif
-
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
rtnl_lock();
@@ -2229,8 +2218,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
+ netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
- netif_tx_disable(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
cancel_delayed_work_sync(&vi->refill);
if (netif_running(vi->dev)) {
@@ -2266,7 +2256,9 @@ static int virtnet_restore_up(struct virtio_device *vdev)
}
}
+ netif_tx_lock_bh(vi->dev);
netif_device_attach(vi->dev);
+ netif_tx_unlock_bh(vi->dev);
return err;
}
@@ -2447,9 +2439,6 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = virtnet_netpoll,
-#endif
.ndo_bpf = virtnet_xdp,
.ndo_xdp_xmit = virtnet_xdp_xmit,
.ndo_features_check = passthru_features_check,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ababba37d735..27bd586b94b0 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2194,11 +2194,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ndst = &rt->dst;
- if (skb_dst(skb)) {
- int mtu = dst_mtu(ndst) - VXLAN_HEADROOM;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -2235,11 +2231,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto out_unlock;
}
- if (skb_dst(skb)) {
- int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM;
-
- skb_dst_update_pmtu(skb, mtu);
- }
+ skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM);
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip6_dst_hoplimit(ndst);
@@ -3539,6 +3531,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
@@ -3603,6 +3596,8 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
}
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) ||
+ nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
+ !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 094cea775d0c..ef298d8525c5 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -257,7 +257,7 @@ static const struct
[I2400M_MS_ACCESSIBILITY_ERROR] = { "accesibility error", -EIO },
[I2400M_MS_BUSY] = { "busy", -EBUSY },
[I2400M_MS_CORRUPTED_TLV] = { "corrupted TLV", -EILSEQ },
- [I2400M_MS_UNINITIALIZED] = { "not unitialized", -EILSEQ },
+ [I2400M_MS_UNINITIALIZED] = { "uninitialized", -EILSEQ },
[I2400M_MS_UNKNOWN_ERROR] = { "unknown error", -EIO },
[I2400M_MS_PRODUCTION_ERROR] = { "production error", -EIO },
[I2400M_MS_NO_RF] = { "no RF", -EIO },
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index 6b0e1ec346cb..d46d57b989ae 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1518,13 +1518,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
} else {
/* More than a single header/data pair were missed.
- * Report this error, and reset the controller to
+ * Report this error. If running with open-source
+ * firmware, then reset the controller to
* revive operation.
*/
b43dbg(dev->wl,
"Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
ring->index, firstused, slot);
- b43_controller_restart(dev, "Out of order TX");
+ if (dev->fw.opensource)
+ b43_controller_restart(dev, "Out of order TX");
return;
}
}
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index 591687984962..497fd766d87c 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -51,6 +51,7 @@
static const struct iwl_base_params iwl1000_base_params = {
.num_of_queues = IWLAGN_NUM_QUEUES,
+ .max_tfd_queue_size = 256,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.pll_cfg = true,
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 1068757ec42e..07442ada6dd0 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -520,7 +520,6 @@ struct mac80211_hwsim_data {
int channels, idx;
bool use_chanctx;
bool destroy_on_close;
- struct work_struct destroy_work;
u32 portid;
char alpha2[2];
const struct ieee80211_regdomain *regd;
@@ -2935,8 +2934,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
hwsim_radios_generation++;
spin_unlock_bh(&hwsim_radio_lock);
- if (idx > 0)
- hwsim_mcast_new_radio(idx, info, param);
+ hwsim_mcast_new_radio(idx, info, param);
return idx;
@@ -3565,30 +3563,27 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
-static void destroy_radio(struct work_struct *work)
-{
- struct mac80211_hwsim_data *data =
- container_of(work, struct mac80211_hwsim_data, destroy_work);
-
- hwsim_radios_generation++;
- mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
-}
-
static void remove_user_radios(u32 portid)
{
struct mac80211_hwsim_data *entry, *tmp;
+ LIST_HEAD(list);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) {
if (entry->destroy_on_close && entry->portid == portid) {
- list_del(&entry->list);
+ list_move(&entry->list, &list);
rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht,
hwsim_rht_params);
- INIT_WORK(&entry->destroy_work, destroy_radio);
- queue_work(hwsim_wq, &entry->destroy_work);
+ hwsim_radios_generation++;
}
}
spin_unlock_bh(&hwsim_radio_lock);
+
+ list_for_each_entry_safe(entry, tmp, &list, list) {
+ list_del(&entry->list);
+ mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy),
+ NULL);
+ }
}
static int mac80211_hwsim_netlink_notify(struct notifier_block *nb,
@@ -3646,6 +3641,7 @@ static __net_init int hwsim_init_net(struct net *net)
static void __net_exit hwsim_exit_net(struct net *net)
{
struct mac80211_hwsim_data *data, *tmp;
+ LIST_HEAD(list);
spin_lock_bh(&hwsim_radio_lock);
list_for_each_entry_safe(data, tmp, &hwsim_radios, list) {
@@ -3656,17 +3652,19 @@ static void __net_exit hwsim_exit_net(struct net *net)
if (data->netgroup == hwsim_net_get_netgroup(&init_net))
continue;
- list_del(&data->list);
+ list_move(&data->list, &list);
rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
hwsim_rht_params);
hwsim_radios_generation++;
- spin_unlock_bh(&hwsim_radio_lock);
+ }
+ spin_unlock_bh(&hwsim_radio_lock);
+
+ list_for_each_entry_safe(data, tmp, &list, list) {
+ list_del(&data->list);
mac80211_hwsim_del_radio(data,
wiphy_name(data->hw->wiphy),
NULL);
- spin_lock_bh(&hwsim_radio_lock);
}
- spin_unlock_bh(&hwsim_radio_lock);
ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
}
diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c
index 43743c26c071..39bf85d0ade0 100644
--- a/drivers/net/wireless/marvell/libertas/if_sdio.c
+++ b/drivers/net/wireless/marvell/libertas/if_sdio.c
@@ -1317,6 +1317,10 @@ static int if_sdio_suspend(struct device *dev)
if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
dev_info(dev, "Suspend without wake params -- powering down card\n");
if (priv->fw_ready) {
+ ret = lbs_suspend(priv);
+ if (ret)
+ return ret;
+
priv->power_up_on_resume = true;
if_sdio_power_off(card);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
index cf6ffb1ba4a2..22bc9d368728 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c
@@ -77,9 +77,8 @@ static void mt76x0_remove_interface(struct ieee80211_hw *hw,
{
struct mt76x0_dev *dev = hw->priv;
struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv;
- unsigned int wcid = mvif->group_wcid.idx;
- dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG);
+ dev->vif_mask &= ~BIT(mvif->idx);
}
static int mt76x0_config(struct ieee80211_hw *hw, u32 changed)
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 7780b07543bb..79e59f2379a2 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -258,7 +258,7 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
if (!buf->urb)
return -ENOMEM;
- buf->urb->sg = devm_kzalloc(dev->dev, nsgs * sizeof(*buf->urb->sg),
+ buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
gfp);
if (!buf->urb->sg)
return -ENOMEM;
@@ -464,8 +464,8 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
int i, err, nsgs;
spin_lock_init(&q->lock);
- q->entry = devm_kzalloc(dev->dev,
- MT_NUM_RX_ENTRIES * sizeof(*q->entry),
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_RX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
@@ -717,8 +717,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
INIT_LIST_HEAD(&q->swq);
q->hw_idx = q2hwq(i);
- q->entry = devm_kzalloc(dev->dev,
- MT_NUM_TX_ENTRIES * sizeof(*q->entry),
+ q->entry = devm_kcalloc(dev->dev,
+ MT_NUM_TX_ENTRIES, sizeof(*q->entry),
GFP_KERNEL);
if (!q->entry)
return -ENOMEM;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index a46a1e94505d..936c0b3e0ba2 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -241,8 +241,9 @@ struct xenvif_hash_cache {
struct xenvif_hash {
unsigned int alg;
u32 flags;
+ bool mapping_sel;
u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
- u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+ u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE];
unsigned int size;
struct xenvif_hash_cache cache;
};
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 3c4c58b9fe76..0ccb021f1e78 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -324,7 +324,8 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
vif->hash.size = size;
- memset(vif->hash.mapping, 0, sizeof(u32) * size);
+ memset(vif->hash.mapping[vif->hash.mapping_sel], 0,
+ sizeof(u32) * size);
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
@@ -332,31 +333,49 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
u32 off)
{
- u32 *mapping = &vif->hash.mapping[off];
- struct gnttab_copy copy_op = {
+ u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel];
+ unsigned int nr = 1;
+ struct gnttab_copy copy_op[2] = {{
.source.u.ref = gref,
.source.domid = vif->domid,
- .dest.u.gmfn = virt_to_gfn(mapping),
.dest.domid = DOMID_SELF,
- .dest.offset = xen_offset_in_page(mapping),
- .len = len * sizeof(u32),
+ .len = len * sizeof(*mapping),
.flags = GNTCOPY_source_gref
- };
+ }};
- if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+ if ((off + len < off) || (off + len > vif->hash.size) ||
+ len > XEN_PAGE_SIZE / sizeof(*mapping))
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
- while (len-- != 0)
- if (mapping[off++] >= vif->num_queues)
- return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+ copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off);
+ copy_op[0].dest.offset = xen_offset_in_page(mapping + off);
+ if (copy_op[0].dest.offset + copy_op[0].len > XEN_PAGE_SIZE) {
+ copy_op[1] = copy_op[0];
+ copy_op[1].source.offset = XEN_PAGE_SIZE - copy_op[0].dest.offset;
+ copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len);
+ copy_op[1].dest.offset = 0;
+ copy_op[1].len = copy_op[0].len - copy_op[1].source.offset;
+ copy_op[0].len = copy_op[1].source.offset;
+ nr = 2;
+ }
- if (copy_op.len != 0) {
- gnttab_batch_copy(&copy_op, 1);
+ memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel],
+ vif->hash.size * sizeof(*mapping));
- if (copy_op.status != GNTST_okay)
+ if (copy_op[0].len != 0) {
+ gnttab_batch_copy(copy_op, nr);
+
+ if (copy_op[0].status != GNTST_okay ||
+ copy_op[nr - 1].status != GNTST_okay)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
}
+ while (len-- != 0)
+ if (mapping[off++] >= vif->num_queues)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ vif->hash.mapping_sel = !vif->hash.mapping_sel;
+
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
@@ -408,6 +427,8 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
}
if (vif->hash.size != 0) {
+ const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel];
+
seq_puts(m, "\nHash Mapping:\n");
for (i = 0; i < vif->hash.size; ) {
@@ -420,7 +441,7 @@ void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
- seq_printf(m, "%4u ", vif->hash.mapping[i]);
+ seq_printf(m, "%4u ", mapping[i]);
seq_puts(m, "\n");
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 92274c237200..f6ae23fc3f6b 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -162,7 +162,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
if (size == 0)
return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
- return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+ return vif->hash.mapping[vif->hash.mapping_sel]
+ [skb_get_hash_raw(skb) % size];
}
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 73f596a90c69..f17f602e6171 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,8 +87,7 @@ struct netfront_cb {
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
-static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
-static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+static DECLARE_WAIT_QUEUE_HEAD(module_wq);
struct netfront_stats {
u64 packets;
@@ -909,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
BUG_ON(pull_to <= skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
- BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+ queue->rx.rsp_cons = ++cons;
+ kfree_skb(nskb);
+ return ~0U;
+ }
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag),
@@ -1046,6 +1049,8 @@ err:
skb->len += rx->status;
i = xennet_fill_frags(queue, skb, &tmpq);
+ if (unlikely(i == ~0U))
+ goto err;
if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1332,11 +1337,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
xenbus_switch_state(dev, XenbusStateInitialising);
- wait_event(module_load_q,
- xenbus_read_driver_state(dev->otherend) !=
- XenbusStateClosed &&
- xenbus_read_driver_state(dev->otherend) !=
- XenbusStateUnknown);
+ wait_event(module_wq,
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateClosed &&
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateUnknown);
return netdev;
exit:
@@ -2010,15 +2015,14 @@ static void netback_changed(struct xenbus_device *dev,
dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
+ wake_up_all(&module_wq);
+
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
- break;
-
case XenbusStateUnknown:
- wake_up_all(&module_unload_q);
break;
case XenbusStateInitWait:
@@ -2034,12 +2038,10 @@ static void netback_changed(struct xenbus_device *dev,
break;
case XenbusStateClosed:
- wake_up_all(&module_unload_q);
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
- wake_up_all(&module_unload_q);
xenbus_frontend_closed(dev);
break;
}
@@ -2147,14 +2149,14 @@ static int xennet_remove(struct xenbus_device *dev)
if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
xenbus_switch_state(dev, XenbusStateClosing);
- wait_event(module_unload_q,
+ wait_event(module_wq,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosing ||
xenbus_read_driver_state(dev->otherend) ==
XenbusStateUnknown);
xenbus_switch_state(dev, XenbusStateClosed);
- wait_event(module_unload_q,
+ wait_event(module_wq,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosed ||
xenbus_read_driver_state(dev->otherend) ==
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 62e9cb167aad..db45c6bbb7bb 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -290,7 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
}
set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
- device_add_disk(dev, disk);
+ device_add_disk(dev, disk, NULL);
revalidate_disk(disk);
return 0;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 0360c015f658..b123b0dcf274 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1556,7 +1556,7 @@ static int btt_blk_init(struct btt *btt)
}
}
set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
- device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
+ device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
revalidate_disk(btt->btt_disk);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 6071e2942053..a75d10c23d80 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -474,7 +474,7 @@ static int pmem_attach_disk(struct device *dev,
gendev = disk_to_dev(disk);
gendev->groups = pmem_attribute_groups;
- device_add_disk(dev, disk);
+ device_add_disk(dev, disk, NULL);
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index dd8ec1dd9219..9e4a30b05bd2 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -971,7 +971,7 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
uuid_copy(&ids->uuid, data + pos + sizeof(*cur));
break;
default:
- /* Skip unnkown types */
+ /* Skip unknown types */
len = cur->nidl;
break;
}
@@ -1132,7 +1132,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
return nvme_submit_user_cmd(ns->queue, &c,
(void __user *)(uintptr_t)io.addr, length,
- metadata, meta_len, io.slba, NULL, 0);
+ metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
static u32 nvme_known_admin_effects(u8 opcode)
@@ -2076,7 +2076,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
nqnlen = strnlen(id->subnqn, NVMF_NQN_SIZE);
if (nqnlen > 0 && nqnlen < NVMF_NQN_SIZE) {
- strncpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
+ strlcpy(subsys->subnqn, id->subnqn, NVMF_NQN_SIZE);
return;
}
@@ -2729,11 +2729,19 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-const struct attribute_group nvme_ns_id_attr_group = {
+static const struct attribute_group nvme_ns_id_attr_group = {
.attrs = nvme_ns_id_attrs,
.is_visible = nvme_ns_id_attrs_are_visible,
};
+const struct attribute_group *nvme_ns_id_attr_groups[] = {
+ &nvme_ns_id_attr_group,
+#ifdef CONFIG_NVM
+ &nvme_nvm_attr_group,
+#endif
+ NULL,
+};
+
#define nvme_show_str_function(field) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -2900,9 +2908,14 @@ static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
unsigned nsid, struct nvme_id_ns *id)
{
struct nvme_ns_head *head;
+ size_t size = sizeof(*head);
int ret = -ENOMEM;
- head = kzalloc(sizeof(*head), GFP_KERNEL);
+#ifdef CONFIG_NVME_MULTIPATH
+ size += num_possible_nodes() * sizeof(struct nvme_ns *);
+#endif
+
+ head = kzalloc(size, GFP_KERNEL);
if (!head)
goto out;
ret = ida_simple_get(&ctrl->subsys->ns_ida, 1, 0, GFP_KERNEL);
@@ -3099,14 +3112,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
nvme_get_ctrl(ctrl);
- device_add_disk(ctrl->device, ns->disk);
- if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvme_ns_id_attr_group))
- pr_warn("%s: failed to create sysfs group for identification\n",
- ns->disk->disk_name);
- if (ns->ndev && nvme_nvm_register_sysfs(ns))
- pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
- ns->disk->disk_name);
+ device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
nvme_mpath_add_disk(ns, id);
nvme_fault_inject_init(ns);
@@ -3132,10 +3138,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
nvme_fault_inject_fini(ns);
if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
- sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvme_ns_id_attr_group);
- if (ns->ndev)
- nvme_nvm_unregister_sysfs(ns);
del_gendisk(ns->disk);
blk_cleanup_queue(ns->queue);
if (blk_get_integrity(ns->disk))
@@ -3143,8 +3145,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
}
mutex_lock(&ns->ctrl->subsys->lock);
- nvme_mpath_clear_current_path(ns);
list_del_rcu(&ns->siblings);
+ nvme_mpath_clear_current_path(ns);
mutex_unlock(&ns->ctrl->subsys->lock);
down_write(&ns->ctrl->namespaces_rwsem);
@@ -3411,16 +3413,21 @@ static void nvme_fw_act_work(struct work_struct *work)
static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
- switch ((result & 0xff00) >> 8) {
+ u32 aer_notice_type = (result & 0xff00) >> 8;
+
+ switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED:
+ trace_nvme_async_event(ctrl, aer_notice_type);
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
nvme_queue_scan(ctrl);
break;
case NVME_AER_NOTICE_FW_ACT_STARTING:
+ trace_nvme_async_event(ctrl, aer_notice_type);
queue_work(nvme_wq, &ctrl->fw_act_work);
break;
#ifdef CONFIG_NVME_MULTIPATH
case NVME_AER_NOTICE_ANA:
+ trace_nvme_async_event(ctrl, aer_notice_type);
if (!ctrl->ana_log_buf)
break;
queue_work(nvme_wq, &ctrl->ana_work);
@@ -3435,11 +3442,12 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
volatile union nvme_result *res)
{
u32 result = le32_to_cpu(res->u32);
+ u32 aer_type = result & 0x07;
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
- switch (result & 0x7) {
+ switch (aer_type) {
case NVME_AER_NOTICE:
nvme_handle_aen_notice(ctrl, result);
break;
@@ -3447,6 +3455,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
+ trace_nvme_async_event(ctrl, aer_type);
ctrl->aen_result = result;
break;
default:
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 206d63cb1afc..bd0969db6225 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -552,8 +552,11 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
ctrl->state != NVME_CTRL_DEAD &&
!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
- nvme_req(rq)->status = NVME_SC_ABORT_REQ;
- return BLK_STS_IOERR;
+
+ nvme_req(rq)->status = NVME_SC_HOST_PATH_ERROR;
+ blk_mq_start_request(rq);
+ nvme_complete_rq(rq);
+ return BLK_STS_OK;
}
EXPORT_SYMBOL_GPL(nvmf_fail_nonready_command);
@@ -865,6 +868,36 @@ static int nvmf_check_required_opts(struct nvmf_ctrl_options *opts,
return 0;
}
+bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts)
+{
+ if (!nvmf_ctlr_matches_baseopts(ctrl, opts) ||
+ strcmp(opts->traddr, ctrl->opts->traddr) ||
+ strcmp(opts->trsvcid, ctrl->opts->trsvcid))
+ return false;
+
+ /*
+ * Checking the local address is rough. In most cases, none is specified
+ * and the host port is selected by the stack.
+ *
+ * Assume no match if:
+ * - local address is specified and address is not the same
+ * - local address is not specified but remote is, or vice versa
+ * (admin using specific host_traddr when it matters).
+ */
+ if ((opts->mask & NVMF_OPT_HOST_TRADDR) &&
+ (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
+ if (strcmp(opts->host_traddr, ctrl->opts->host_traddr))
+ return false;
+ } else if ((opts->mask & NVMF_OPT_HOST_TRADDR) ||
+ (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR)) {
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(nvmf_ip_options_match);
+
static int nvmf_check_allowed_opts(struct nvmf_ctrl_options *opts,
unsigned int allowed_opts)
{
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index aa2fdb2a2e8f..6ea6275f332a 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -166,6 +166,8 @@ blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
struct request *rq);
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live);
+bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
+ struct nvmf_ctrl_options *opts);
static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 611e70cae754..e52b9d3c0bd6 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -20,6 +20,7 @@
#include <uapi/scsi/fc/fc_fs.h>
#include <uapi/scsi/fc/fc_els.h>
#include <linux/delay.h>
+#include <linux/overflow.h>
#include "nvme.h"
#include "fabrics.h"
@@ -104,6 +105,12 @@ struct nvme_fc_fcp_op {
struct nvme_fc_ersp_iu rsp_iu;
};
+struct nvme_fcp_op_w_sgl {
+ struct nvme_fc_fcp_op op;
+ struct scatterlist sgl[SG_CHUNK_SIZE];
+ uint8_t priv[0];
+};
+
struct nvme_fc_lport {
struct nvme_fc_local_port localport;
@@ -122,6 +129,7 @@ struct nvme_fc_rport {
struct list_head endp_list; /* for lport->endp_list */
struct list_head ctrl_list;
struct list_head ls_req_list;
+ struct list_head disc_list;
struct device *dev; /* physical device for dma */
struct nvme_fc_lport *lport;
spinlock_t lock;
@@ -210,7 +218,6 @@ static DEFINE_IDA(nvme_fc_ctrl_cnt);
* These items are short-term. They will eventually be moved into
* a generic FC class. See comments in module init.
*/
-static struct class *fc_class;
static struct device *fc_udev_device;
@@ -317,7 +324,7 @@ out_done:
* @template: LLDD entrypoints and operational parameters for the port
* @dev: physical hardware device node port corresponds to. Will be
* used for DMA mappings
- * @lport_p: pointer to a local port pointer. Upon success, the routine
+ * @portptr: pointer to a local port pointer. Upon success, the routine
* will allocate a nvme_fc_local_port structure and place its
* address in the local port pointer. Upon failure, local port
* pointer will be set to 0.
@@ -425,8 +432,7 @@ EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
* nvme_fc_unregister_localport - transport entry point called by an
* LLDD to deregister/remove a previously
* registered a NVME host FC port.
- * @localport: pointer to the (registered) local port that is to be
- * deregistered.
+ * @portptr: pointer to the (registered) local port that is to be deregistered.
*
* Returns:
* a completion status. Must be 0 upon success; a negative errno
@@ -507,6 +513,7 @@ nvme_fc_free_rport(struct kref *ref)
list_del(&rport->endp_list);
spin_unlock_irqrestore(&nvme_fc_lock, flags);
+ WARN_ON(!list_empty(&rport->disc_list));
ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
kfree(rport);
@@ -631,7 +638,7 @@ __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
* @localport: pointer to the (registered) local port that the remote
* subsystem port is connected to.
* @pinfo: pointer to information about the port to be registered
- * @rport_p: pointer to a remote port pointer. Upon success, the routine
+ * @portptr: pointer to a remote port pointer. Upon success, the routine
* will allocate a nvme_fc_remote_port structure and place its
* address in the remote port pointer. Upon failure, remote port
* pointer will be set to 0.
@@ -694,6 +701,7 @@ nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
INIT_LIST_HEAD(&newrec->endp_list);
INIT_LIST_HEAD(&newrec->ctrl_list);
INIT_LIST_HEAD(&newrec->ls_req_list);
+ INIT_LIST_HEAD(&newrec->disc_list);
kref_init(&newrec->ref);
atomic_set(&newrec->act_ctrl_cnt, 0);
spin_lock_init(&newrec->lock);
@@ -807,8 +815,8 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
* nvme_fc_unregister_remoteport - transport entry point called by an
* LLDD to deregister/remove a previously
* registered a NVME subsystem FC port.
- * @remoteport: pointer to the (registered) remote port that is to be
- * deregistered.
+ * @portptr: pointer to the (registered) remote port that is to be
+ * deregistered.
*
* Returns:
* a completion status. Must be 0 upon success; a negative errno
@@ -1385,7 +1393,7 @@ nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
__nvme_fc_finish_ls_req(lsop);
- /* fc-nvme iniator doesn't care about success or failure of cmd */
+ /* fc-nvme initiator doesn't care about success or failure of cmd */
kfree(lsop);
}
@@ -1685,6 +1693,8 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
struct request *rq, u32 rqno)
{
+ struct nvme_fcp_op_w_sgl *op_w_sgl =
+ container_of(op, typeof(*op_w_sgl), op);
struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
int ret = 0;
@@ -1694,7 +1704,6 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
op->fcp_req.rspaddr = &op->rsp_iu;
op->fcp_req.rsplen = sizeof(op->rsp_iu);
op->fcp_req.done = nvme_fc_fcpio_done;
- op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
op->ctrl = ctrl;
op->queue = queue;
@@ -1733,12 +1742,17 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
unsigned int hctx_idx, unsigned int numa_node)
{
struct nvme_fc_ctrl *ctrl = set->driver_data;
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
+ struct nvme_fcp_op_w_sgl *op = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
+ int res;
nvme_req(rq)->ctrl = &ctrl->ctrl;
- return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
+ res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
+ if (res)
+ return res;
+ op->op.fcp_req.first_sgl = &op->sgl[0];
+ return res;
}
static int
@@ -1768,7 +1782,6 @@ nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
}
aen_op->flags = FCOP_FLAGS_AEN;
- aen_op->fcp_req.first_sgl = NULL; /* no sg list */
aen_op->fcp_req.private = private;
memset(sqe, 0, sizeof(*sqe));
@@ -2422,10 +2435,9 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
ctrl->tag_set.numa_node = NUMA_NO_NODE;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
- (SG_CHUNK_SIZE *
- sizeof(struct scatterlist)) +
- ctrl->lport->ops->fcprqst_priv_sz;
+ ctrl->tag_set.cmd_size =
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz);
ctrl->tag_set.driver_data = ctrl;
ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
@@ -3027,10 +3039,9 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
- ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
- (SG_CHUNK_SIZE *
- sizeof(struct scatterlist)) +
- ctrl->lport->ops->fcprqst_priv_sz;
+ ctrl->admin_tag_set.cmd_size =
+ struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
+ ctrl->lport->ops->fcprqst_priv_sz);
ctrl->admin_tag_set.driver_data = ctrl;
ctrl->admin_tag_set.nr_hw_queues = 1;
ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
@@ -3159,7 +3170,7 @@ nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
substring_t wwn = { name, &name[sizeof(name)-1] };
int nnoffset, pnoffset;
- /* validate it string one of the 2 allowed formats */
+ /* validate if string is one of the 2 allowed formats */
if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
@@ -3254,6 +3265,90 @@ static struct nvmf_transport_ops nvme_fc_transport = {
.create_ctrl = nvme_fc_create_ctrl,
};
+/* Arbitrary successive failures max. With lots of subsystems could be high */
+#define DISCOVERY_MAX_FAIL 20
+
+static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ unsigned long flags;
+ LIST_HEAD(local_disc_list);
+ struct nvme_fc_lport *lport;
+ struct nvme_fc_rport *rport;
+ int failcnt = 0;
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+restart:
+ list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
+ list_for_each_entry(rport, &lport->endp_list, endp_list) {
+ if (!nvme_fc_lport_get(lport))
+ continue;
+ if (!nvme_fc_rport_get(rport)) {
+ /*
+ * This is a temporary condition. Upon restart
+ * this rport will be gone from the list.
+ *
+ * Revert the lport put and retry. Anything
+ * added to the list already will be skipped (as
+ * they are no longer list_empty). Loops should
+ * resume at rports that were not yet seen.
+ */
+ nvme_fc_lport_put(lport);
+
+ if (failcnt++ < DISCOVERY_MAX_FAIL)
+ goto restart;
+
+ pr_err("nvme_discovery: too many reference "
+ "failures\n");
+ goto process_local_list;
+ }
+ if (list_empty(&rport->disc_list))
+ list_add_tail(&rport->disc_list,
+ &local_disc_list);
+ }
+ }
+
+process_local_list:
+ while (!list_empty(&local_disc_list)) {
+ rport = list_first_entry(&local_disc_list,
+ struct nvme_fc_rport, disc_list);
+ list_del_init(&rport->disc_list);
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ lport = rport->lport;
+ /* signal discovery. Won't hurt if it repeats */
+ nvme_fc_signal_discovery_scan(lport, rport);
+ nvme_fc_rport_put(rport);
+ nvme_fc_lport_put(lport);
+
+ spin_lock_irqsave(&nvme_fc_lock, flags);
+ }
+ spin_unlock_irqrestore(&nvme_fc_lock, flags);
+
+ return count;
+}
+static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
+
+static struct attribute *nvme_fc_attrs[] = {
+ &dev_attr_nvme_discovery.attr,
+ NULL
+};
+
+static struct attribute_group nvme_fc_attr_group = {
+ .attrs = nvme_fc_attrs,
+};
+
+static const struct attribute_group *nvme_fc_attr_groups[] = {
+ &nvme_fc_attr_group,
+ NULL
+};
+
+static struct class fc_class = {
+ .name = "fc",
+ .dev_groups = nvme_fc_attr_groups,
+ .owner = THIS_MODULE,
+};
+
static int __init nvme_fc_init_module(void)
{
int ret;
@@ -3272,16 +3367,16 @@ static int __init nvme_fc_init_module(void)
* put in place, this code will move to a more generic
* location for the class.
*/
- fc_class = class_create(THIS_MODULE, "fc");
- if (IS_ERR(fc_class)) {
+ ret = class_register(&fc_class);
+ if (ret) {
pr_err("couldn't register class fc\n");
- return PTR_ERR(fc_class);
+ return ret;
}
/*
* Create a device for the FC-centric udev events
*/
- fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
+ fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
"fc_udev_device");
if (IS_ERR(fc_udev_device)) {
pr_err("couldn't create fc_udev device!\n");
@@ -3296,9 +3391,9 @@ static int __init nvme_fc_init_module(void)
return 0;
out_destroy_device:
- device_destroy(fc_class, MKDEV(0, 0));
+ device_destroy(&fc_class, MKDEV(0, 0));
out_destroy_class:
- class_destroy(fc_class);
+ class_unregister(&fc_class);
return ret;
}
@@ -3313,8 +3408,8 @@ static void __exit nvme_fc_exit_module(void)
ida_destroy(&nvme_fc_local_port_cnt);
ida_destroy(&nvme_fc_ctrl_cnt);
- device_destroy(fc_class, MKDEV(0, 0));
- class_destroy(fc_class);
+ device_destroy(&fc_class, MKDEV(0, 0));
+ class_unregister(&fc_class);
}
module_init(nvme_fc_init_module);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 6fe5923c95d4..a4f3b263cd6c 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -567,13 +567,13 @@ static int nvme_nvm_set_bb_tbl(struct nvm_dev *nvmdev, struct ppa_addr *ppas,
* Expect the lba in device format
*/
static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
- struct nvm_chk_meta *meta,
- sector_t slba, int nchks)
+ sector_t slba, int nchks,
+ struct nvm_chk_meta *meta)
{
struct nvm_geo *geo = &ndev->geo;
struct nvme_ns *ns = ndev->q->queuedata;
struct nvme_ctrl *ctrl = ns->ctrl;
- struct nvme_nvm_chk_meta *dev_meta = (struct nvme_nvm_chk_meta *)meta;
+ struct nvme_nvm_chk_meta *dev_meta, *dev_meta_off;
struct ppa_addr ppa;
size_t left = nchks * sizeof(struct nvme_nvm_chk_meta);
size_t log_pos, offset, len;
@@ -585,6 +585,10 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
*/
max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024);
+ dev_meta = kmalloc(max_len, GFP_KERNEL);
+ if (!dev_meta)
+ return -ENOMEM;
+
/* Normalize lba address space to obtain log offset */
ppa.ppa = slba;
ppa = dev_to_generic_addr(ndev, ppa);
@@ -598,6 +602,9 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
while (left) {
len = min_t(unsigned int, left, max_len);
+ memset(dev_meta, 0, max_len);
+ dev_meta_off = dev_meta;
+
ret = nvme_get_log(ctrl, ns->head->ns_id,
NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len,
offset);
@@ -607,21 +614,23 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev,
}
for (i = 0; i < len; i += sizeof(struct nvme_nvm_chk_meta)) {
- meta->state = dev_meta->state;
- meta->type = dev_meta->type;
- meta->wi = dev_meta->wi;
- meta->slba = le64_to_cpu(dev_meta->slba);
- meta->cnlb = le64_to_cpu(dev_meta->cnlb);
- meta->wp = le64_to_cpu(dev_meta->wp);
+ meta->state = dev_meta_off->state;
+ meta->type = dev_meta_off->type;
+ meta->wi = dev_meta_off->wi;
+ meta->slba = le64_to_cpu(dev_meta_off->slba);
+ meta->cnlb = le64_to_cpu(dev_meta_off->cnlb);
+ meta->wp = le64_to_cpu(dev_meta_off->wp);
meta++;
- dev_meta++;
+ dev_meta_off++;
}
offset += len;
left -= len;
}
+ kfree(dev_meta);
+
return ret;
}
@@ -968,6 +977,9 @@ void nvme_nvm_update_nvm_info(struct nvme_ns *ns)
struct nvm_dev *ndev = ns->ndev;
struct nvm_geo *geo = &ndev->geo;
+ if (geo->version == NVM_OCSSD_SPEC_12)
+ return;
+
geo->csecs = 1 << ns->lba_shift;
geo->sos = ns->ms;
}
@@ -1190,10 +1202,29 @@ static NVM_DEV_ATTR_12_RO(multiplane_modes);
static NVM_DEV_ATTR_12_RO(media_capabilities);
static NVM_DEV_ATTR_12_RO(max_phys_secs);
-static struct attribute *nvm_dev_attrs_12[] = {
+/* 2.0 values */
+static NVM_DEV_ATTR_20_RO(groups);
+static NVM_DEV_ATTR_20_RO(punits);
+static NVM_DEV_ATTR_20_RO(chunks);
+static NVM_DEV_ATTR_20_RO(clba);
+static NVM_DEV_ATTR_20_RO(ws_min);
+static NVM_DEV_ATTR_20_RO(ws_opt);
+static NVM_DEV_ATTR_20_RO(maxoc);
+static NVM_DEV_ATTR_20_RO(maxocpu);
+static NVM_DEV_ATTR_20_RO(mw_cunits);
+static NVM_DEV_ATTR_20_RO(write_typ);
+static NVM_DEV_ATTR_20_RO(write_max);
+static NVM_DEV_ATTR_20_RO(reset_typ);
+static NVM_DEV_ATTR_20_RO(reset_max);
+
+static struct attribute *nvm_dev_attrs[] = {
+ /* version agnostic attrs */
&dev_attr_version.attr,
&dev_attr_capabilities.attr,
+ &dev_attr_read_typ.attr,
+ &dev_attr_read_max.attr,
+ /* 1.2 attrs */
&dev_attr_vendor_opcode.attr,
&dev_attr_device_mode.attr,
&dev_attr_media_manager.attr,
@@ -1208,8 +1239,6 @@ static struct attribute *nvm_dev_attrs_12[] = {
&dev_attr_page_size.attr,
&dev_attr_hw_sector_size.attr,
&dev_attr_oob_sector_size.attr,
- &dev_attr_read_typ.attr,
- &dev_attr_read_max.attr,
&dev_attr_prog_typ.attr,
&dev_attr_prog_max.attr,
&dev_attr_erase_typ.attr,
@@ -1218,33 +1247,7 @@ static struct attribute *nvm_dev_attrs_12[] = {
&dev_attr_media_capabilities.attr,
&dev_attr_max_phys_secs.attr,
- NULL,
-};
-
-static const struct attribute_group nvm_dev_attr_group_12 = {
- .name = "lightnvm",
- .attrs = nvm_dev_attrs_12,
-};
-
-/* 2.0 values */
-static NVM_DEV_ATTR_20_RO(groups);
-static NVM_DEV_ATTR_20_RO(punits);
-static NVM_DEV_ATTR_20_RO(chunks);
-static NVM_DEV_ATTR_20_RO(clba);
-static NVM_DEV_ATTR_20_RO(ws_min);
-static NVM_DEV_ATTR_20_RO(ws_opt);
-static NVM_DEV_ATTR_20_RO(maxoc);
-static NVM_DEV_ATTR_20_RO(maxocpu);
-static NVM_DEV_ATTR_20_RO(mw_cunits);
-static NVM_DEV_ATTR_20_RO(write_typ);
-static NVM_DEV_ATTR_20_RO(write_max);
-static NVM_DEV_ATTR_20_RO(reset_typ);
-static NVM_DEV_ATTR_20_RO(reset_max);
-
-static struct attribute *nvm_dev_attrs_20[] = {
- &dev_attr_version.attr,
- &dev_attr_capabilities.attr,
-
+ /* 2.0 attrs */
&dev_attr_groups.attr,
&dev_attr_punits.attr,
&dev_attr_chunks.attr,
@@ -1255,8 +1258,6 @@ static struct attribute *nvm_dev_attrs_20[] = {
&dev_attr_maxocpu.attr,
&dev_attr_mw_cunits.attr,
- &dev_attr_read_typ.attr,
- &dev_attr_read_max.attr,
&dev_attr_write_typ.attr,
&dev_attr_write_max.attr,
&dev_attr_reset_typ.attr,
@@ -1265,44 +1266,38 @@ static struct attribute *nvm_dev_attrs_20[] = {
NULL,
};
-static const struct attribute_group nvm_dev_attr_group_20 = {
- .name = "lightnvm",
- .attrs = nvm_dev_attrs_20,
-};
-
-int nvme_nvm_register_sysfs(struct nvme_ns *ns)
+static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gendisk *disk = dev_to_disk(dev);
+ struct nvme_ns *ns = disk->private_data;
struct nvm_dev *ndev = ns->ndev;
- struct nvm_geo *geo = &ndev->geo;
+ struct device_attribute *dev_attr =
+ container_of(attr, typeof(*dev_attr), attr);
if (!ndev)
- return -EINVAL;
-
- switch (geo->major_ver_id) {
- case 1:
- return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_12);
- case 2:
- return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_20);
- }
+ return 0;
- return -EINVAL;
-}
+ if (dev_attr->show == nvm_dev_attr_show)
+ return attr->mode;
-void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
-{
- struct nvm_dev *ndev = ns->ndev;
- struct nvm_geo *geo = &ndev->geo;
-
- switch (geo->major_ver_id) {
+ switch (ndev->geo.major_ver_id) {
case 1:
- sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_12);
+ if (dev_attr->show == nvm_dev_attr_show_12)
+ return attr->mode;
break;
case 2:
- sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
- &nvm_dev_attr_group_20);
+ if (dev_attr->show == nvm_dev_attr_show_20)
+ return attr->mode;
break;
}
+
+ return 0;
}
+
+const struct attribute_group nvme_nvm_attr_group = {
+ .name = "lightnvm",
+ .attrs = nvm_dev_attrs,
+ .is_visible = nvm_dev_attrs_visible,
+};
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5a9562881d4e..5e3cc8c59a39 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -77,6 +77,13 @@ void nvme_failover_req(struct request *req)
queue_work(nvme_wq, &ns->ctrl->ana_work);
}
break;
+ case NVME_SC_HOST_PATH_ERROR:
+ /*
+ * Temporary transport disruption in talking to the controller.
+ * Try to send on a new path.
+ */
+ nvme_mpath_clear_current_path(ns);
+ break;
default:
/*
* Reset the controller for any non-ANA error as we don't know
@@ -110,29 +117,55 @@ static const char *nvme_ana_state_names[] = {
[NVME_ANA_CHANGE] = "change",
};
-static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
+void nvme_mpath_clear_current_path(struct nvme_ns *ns)
+{
+ struct nvme_ns_head *head = ns->head;
+ int node;
+
+ if (!head)
+ return;
+
+ for_each_node(node) {
+ if (ns == rcu_access_pointer(head->current_path[node]))
+ rcu_assign_pointer(head->current_path[node], NULL);
+ }
+}
+
+static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
{
- struct nvme_ns *ns, *fallback = NULL;
+ int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
+ struct nvme_ns *found = NULL, *fallback = NULL, *ns;
list_for_each_entry_rcu(ns, &head->list, siblings) {
if (ns->ctrl->state != NVME_CTRL_LIVE ||
test_bit(NVME_NS_ANA_PENDING, &ns->flags))
continue;
+
+ distance = node_distance(node, dev_to_node(ns->ctrl->dev));
+
switch (ns->ana_state) {
case NVME_ANA_OPTIMIZED:
- rcu_assign_pointer(head->current_path, ns);
- return ns;
+ if (distance < found_distance) {
+ found_distance = distance;
+ found = ns;
+ }
+ break;
case NVME_ANA_NONOPTIMIZED:
- fallback = ns;
+ if (distance < fallback_distance) {
+ fallback_distance = distance;
+ fallback = ns;
+ }
break;
default:
break;
}
}
- if (fallback)
- rcu_assign_pointer(head->current_path, fallback);
- return fallback;
+ if (!found)
+ found = fallback;
+ if (found)
+ rcu_assign_pointer(head->current_path[node], found);
+ return found;
}
static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
@@ -143,10 +176,12 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
{
- struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu);
+ int node = numa_node_id();
+ struct nvme_ns *ns;
+ ns = srcu_dereference(head->current_path[node], &head->srcu);
if (unlikely(!ns || !nvme_path_is_optimized(ns)))
- ns = __nvme_find_path(head);
+ ns = __nvme_find_path(head, node);
return ns;
}
@@ -193,7 +228,7 @@ static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
int srcu_idx;
srcu_idx = srcu_read_lock(&head->srcu);
- ns = srcu_dereference(head->current_path, &head->srcu);
+ ns = srcu_dereference(head->current_path[numa_node_id()], &head->srcu);
if (likely(ns && nvme_path_is_optimized(ns)))
found = ns->queue->poll_fn(q, qc);
srcu_read_unlock(&head->srcu, srcu_idx);
@@ -282,12 +317,17 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
if (!head->disk)
return;
- if (!(head->disk->flags & GENHD_FL_UP)) {
- device_add_disk(&head->subsys->dev, head->disk);
- if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
- &nvme_ns_id_attr_group))
- dev_warn(&head->subsys->dev,
- "failed to create id group.\n");
+ if (!(head->disk->flags & GENHD_FL_UP))
+ device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
+
+ if (nvme_path_is_optimized(ns)) {
+ int node, srcu_idx;
+
+ srcu_idx = srcu_read_lock(&head->srcu);
+ for_each_node(node)
+ __nvme_find_path(head, node);
+ srcu_read_unlock(&head->srcu, srcu_idx);
}
kblockd_schedule_work(&ns->head->requeue_work);
@@ -494,11 +534,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- if (head->disk->flags & GENHD_FL_UP) {
- sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
- &nvme_ns_id_attr_group);
+ if (head->disk->flags & GENHD_FL_UP)
del_gendisk(head->disk);
- }
blk_set_queue_dying(head->disk->queue);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
@@ -537,8 +574,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
INIT_WORK(&ctrl->ana_work, nvme_ana_work);
ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
- if (!ctrl->ana_log_buf)
+ if (!ctrl->ana_log_buf) {
+ error = -ENOMEM;
goto out;
+ }
error = nvme_read_ana_log(ctrl, true);
if (error)
@@ -547,7 +586,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
out_free_ana_log_buf:
kfree(ctrl->ana_log_buf);
out:
- return -ENOMEM;
+ return error;
}
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bb4a2003c097..9fefba039d1e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -277,14 +277,6 @@ struct nvme_ns_ids {
* only ever has a single entry for private namespaces.
*/
struct nvme_ns_head {
-#ifdef CONFIG_NVME_MULTIPATH
- struct gendisk *disk;
- struct nvme_ns __rcu *current_path;
- struct bio_list requeue_list;
- spinlock_t requeue_lock;
- struct work_struct requeue_work;
- struct mutex lock;
-#endif
struct list_head list;
struct srcu_struct srcu;
struct nvme_subsystem *subsys;
@@ -293,6 +285,14 @@ struct nvme_ns_head {
struct list_head entry;
struct kref ref;
int instance;
+#ifdef CONFIG_NVME_MULTIPATH
+ struct gendisk *disk;
+ struct bio_list requeue_list;
+ spinlock_t requeue_lock;
+ struct work_struct requeue_work;
+ struct mutex lock;
+ struct nvme_ns __rcu *current_path[];
+#endif
};
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
@@ -459,7 +459,7 @@ int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
void *log, size_t size, u64 offset);
-extern const struct attribute_group nvme_ns_id_attr_group;
+extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
@@ -474,14 +474,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head);
int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
-
-static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns)
-{
- struct nvme_ns_head *head = ns->head;
-
- if (head && ns == rcu_access_pointer(head->current_path))
- rcu_assign_pointer(head->current_path, NULL);
-}
+void nvme_mpath_clear_current_path(struct nvme_ns *ns);
struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
@@ -551,8 +544,7 @@ static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
void nvme_nvm_unregister(struct nvme_ns *ns);
-int nvme_nvm_register_sysfs(struct nvme_ns *ns);
-void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
+extern const struct attribute_group nvme_nvm_attr_group;
int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
#else
static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
@@ -563,11 +555,6 @@ static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
}
static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
-{
- return 0;
-}
-static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
unsigned long arg)
{
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d668682f91df..4e023cd007e1 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -772,10 +772,10 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir))
goto out_unmap;
- }
- if (blk_integrity_rq(req))
cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
+ }
+
return BLK_STS_OK;
out_unmap:
@@ -1249,7 +1249,7 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
/**
* nvme_suspend_queue - put queue into suspended state
- * @nvmeq - queue to suspend
+ * @nvmeq: queue to suspend
*/
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
@@ -2564,13 +2564,12 @@ static void nvme_remove(struct pci_dev *pdev)
struct nvme_dev *dev = pci_get_drvdata(pdev);
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
-
- cancel_work_sync(&dev->ctrl.reset_work);
pci_set_drvdata(pdev, NULL);
if (!pci_device_is_present(pdev)) {
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
nvme_dev_disable(dev, true);
+ nvme_dev_remove_admin(dev);
}
flush_work(&dev->ctrl.reset_work);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index dc042017c293..d181cafedc58 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -233,8 +233,15 @@ static void nvme_rdma_qp_event(struct ib_event *event, void *context)
static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
{
- wait_for_completion_interruptible_timeout(&queue->cm_done,
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ return -ETIMEDOUT;
+ WARN_ON_ONCE(queue->cm_error > 0);
return queue->cm_error;
}
@@ -1849,54 +1856,6 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.stop_ctrl = nvme_rdma_stop_ctrl,
};
-static inline bool
-__nvme_rdma_options_match(struct nvme_rdma_ctrl *ctrl,
- struct nvmf_ctrl_options *opts)
-{
- char *stdport = __stringify(NVME_RDMA_IP_PORT);
-
-
- if (!nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts) ||
- strcmp(opts->traddr, ctrl->ctrl.opts->traddr))
- return false;
-
- if (opts->mask & NVMF_OPT_TRSVCID &&
- ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
- if (strcmp(opts->trsvcid, ctrl->ctrl.opts->trsvcid))
- return false;
- } else if (opts->mask & NVMF_OPT_TRSVCID) {
- if (strcmp(opts->trsvcid, stdport))
- return false;
- } else if (ctrl->ctrl.opts->mask & NVMF_OPT_TRSVCID) {
- if (strcmp(stdport, ctrl->ctrl.opts->trsvcid))
- return false;
- }
- /* else, it's a match as both have stdport. Fall to next checks */
-
- /*
- * checking the local address is rough. In most cases, one
- * is not specified and the host port is selected by the stack.
- *
- * Assume no match if:
- * local address is specified and address is not the same
- * local address is not specified but remote is, or vice versa
- * (admin using specific host_traddr when it matters).
- */
- if (opts->mask & NVMF_OPT_HOST_TRADDR &&
- ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) {
- if (strcmp(opts->host_traddr, ctrl->ctrl.opts->host_traddr))
- return false;
- } else if (opts->mask & NVMF_OPT_HOST_TRADDR ||
- ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
- return false;
- /*
- * if neither controller had an host port specified, assume it's
- * a match as everything else matched.
- */
-
- return true;
-}
-
/*
* Fails a connection request if it matches an existing controller
* (association) with the same tuple:
@@ -1917,7 +1876,7 @@ nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
mutex_lock(&nvme_rdma_ctrl_mutex);
list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
- found = __nvme_rdma_options_match(ctrl, opts);
+ found = nvmf_ip_options_match(&ctrl->ctrl, opts);
if (found)
break;
}
@@ -1932,7 +1891,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
struct nvme_rdma_ctrl *ctrl;
int ret;
bool changed;
- char *port;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -1940,15 +1898,21 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
ctrl->ctrl.opts = opts;
INIT_LIST_HEAD(&ctrl->list);
- if (opts->mask & NVMF_OPT_TRSVCID)
- port = opts->trsvcid;
- else
- port = __stringify(NVME_RDMA_IP_PORT);
+ if (!(opts->mask & NVMF_OPT_TRSVCID)) {
+ opts->trsvcid =
+ kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL);
+ if (!opts->trsvcid) {
+ ret = -ENOMEM;
+ goto out_free_ctrl;
+ }
+ opts->mask |= NVMF_OPT_TRSVCID;
+ }
ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
- opts->traddr, port, &ctrl->addr);
+ opts->traddr, opts->trsvcid, &ctrl->addr);
if (ret) {
- pr_err("malformed address passed: %s:%s\n", opts->traddr, port);
+ pr_err("malformed address passed: %s:%s\n",
+ opts->traddr, opts->trsvcid);
goto out_free_ctrl;
}
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index a490790d6691..196d5bd56718 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -156,6 +156,34 @@ TRACE_EVENT(nvme_complete_rq,
);
+#define aer_name(aer) { aer, #aer }
+
+TRACE_EVENT(nvme_async_event,
+ TP_PROTO(struct nvme_ctrl *ctrl, u32 result),
+ TP_ARGS(ctrl, result),
+ TP_STRUCT__entry(
+ __field(int, ctrl_id)
+ __field(u32, result)
+ ),
+ TP_fast_assign(
+ __entry->ctrl_id = ctrl->instance;
+ __entry->result = result;
+ ),
+ TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
+ __entry->ctrl_id, __entry->result,
+ __print_symbolic(__entry->result,
+ aer_name(NVME_AER_NOTICE_NS_CHANGED),
+ aer_name(NVME_AER_NOTICE_ANA),
+ aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
+ )
+);
+
+#undef aer_name
+
#endif /* _TRACE_NVME_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index a21caea1e080..1179f6314323 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -58,7 +58,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
if (!ns) {
- pr_err("nvmet : Could not find namespace id : %d\n",
+ pr_err("Could not find namespace id : %d\n",
le32_to_cpu(req->cmd->get_log_page.nsid));
return NVME_SC_INVALID_NS;
}
@@ -245,6 +245,10 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
offset += len;
ngrps++;
}
+ for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
+ if (nvmet_ana_group_enabled[grpid])
+ ngrps++;
+ }
hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
hdr.ngrps = cpu_to_le16(ngrps);
@@ -349,7 +353,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
- strcpy(id->subnqn, ctrl->subsys->subsysnqn);
+ strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
/* Max command capsule size is sqe + single page of in-capsule data */
id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b5ec96abd048..0acdff9e6842 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1105,8 +1105,7 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
if (!port)
return NULL;
- if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
- NVMF_NQN_SIZE)) {
+ if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
return NULL;
return nvmet_disc_subsys;
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index eae29f493a07..bc0aa0bf1543 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -174,7 +174,7 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
if (req->port->inline_data_size)
id->sgls |= cpu_to_le32(1 << 20);
- strcpy(id->subnqn, ctrl->subsys->subsysnqn);
+ strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
@@ -219,12 +219,10 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
default:
- pr_err("unsupported cmd %d\n", cmd->common.opcode);
+ pr_err("unhandled cmd %d\n", cmd->common.opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
- pr_err("unhandled cmd %d\n", cmd->common.opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
int __init nvmet_init_discovery(void)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 29b4b236afd8..409081a03b24 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -110,11 +110,19 @@ struct nvmet_fc_tgtport {
struct list_head ls_busylist;
struct list_head assoc_list;
struct ida assoc_cnt;
- struct nvmet_port *port;
+ struct nvmet_fc_port_entry *pe;
struct kref ref;
u32 max_sg_cnt;
};
+struct nvmet_fc_port_entry {
+ struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_port *port;
+ u64 node_name;
+ u64 port_name;
+ struct list_head pe_list;
+};
+
struct nvmet_fc_defer_fcp_req {
struct list_head req_list;
struct nvmefc_tgt_fcp_req *fcp_req;
@@ -132,7 +140,6 @@ struct nvmet_fc_tgt_queue {
atomic_t zrspcnt;
atomic_t rsn;
spinlock_t qlock;
- struct nvmet_port *port;
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
struct nvmet_fc_tgt_assoc *assoc;
@@ -221,6 +228,7 @@ static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
static LIST_HEAD(nvmet_fc_target_list);
static DEFINE_IDA(nvmet_fc_tgtport_cnt);
+static LIST_HEAD(nvmet_fc_portentry_list);
static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
@@ -645,7 +653,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
queue->qid = qid;
queue->sqsize = sqsize;
queue->assoc = assoc;
- queue->port = assoc->tgtport->port;
queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
INIT_LIST_HEAD(&queue->fod_list);
INIT_LIST_HEAD(&queue->avail_defer_list);
@@ -957,6 +964,83 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
return ret;
}
+static void
+nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
+ struct nvmet_fc_port_entry *pe,
+ struct nvmet_port *port)
+{
+ lockdep_assert_held(&nvmet_fc_tgtlock);
+
+ pe->tgtport = tgtport;
+ tgtport->pe = pe;
+
+ pe->port = port;
+ port->priv = pe;
+
+ pe->node_name = tgtport->fc_target_port.node_name;
+ pe->port_name = tgtport->fc_target_port.port_name;
+ INIT_LIST_HEAD(&pe->pe_list);
+
+ list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
+}
+
+static void
+nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ if (pe->tgtport)
+ pe->tgtport->pe = NULL;
+ list_del(&pe->pe_list);
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/*
+ * called when a targetport deregisters. Breaks the relationship
+ * with the nvmet port, but leaves the port_entry in place so that
+ * re-registration can resume operation.
+ */
+static void
+nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_port_entry *pe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ pe = tgtport->pe;
+ if (pe)
+ pe->tgtport = NULL;
+ tgtport->pe = NULL;
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
+
+/*
+ * called when a new targetport is registered. Looks in the
+ * existing nvmet port_entries to see if the nvmet layer is
+ * configured for the targetport's wwn's. (the targetport existed,
+ * nvmet configured, the lldd unregistered the tgtport, and is now
+ * reregistering the same targetport). If so, set the nvmet port
+ * port entry on the targetport.
+ */
+static void
+nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
+{
+ struct nvmet_fc_port_entry *pe;
+ unsigned long flags;
+
+ spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
+ list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
+ if (tgtport->fc_target_port.node_name == pe->node_name &&
+ tgtport->fc_target_port.port_name == pe->port_name) {
+ WARN_ON(pe->tgtport);
+ tgtport->pe = pe;
+ pe->tgtport = tgtport;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+}
/**
* nvme_fc_register_targetport - transport entry point called by an
@@ -1034,6 +1118,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
goto out_free_newrec;
}
+ nvmet_fc_portentry_rebind_tgt(newrec);
+
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
@@ -1159,8 +1245,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
* nvme_fc_unregister_targetport - transport entry point called by an
* LLDD to deregister/remove a previously
* registered a local NVME subsystem FC port.
- * @tgtport: pointer to the (registered) target port that is to be
- * deregistered.
+ * @target_port: pointer to the (registered) target port that is to be
+ * deregistered.
*
* Returns:
* a completion status. Must be 0 upon success; a negative errno
@@ -1171,6 +1257,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
{
struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
+ nvmet_fc_portentry_unbind_tgt(tgtport);
+
/* terminate any outstanding associations */
__nvmet_fc_free_assocs(tgtport);
@@ -1661,7 +1749,7 @@ nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
*
* If this routine returns error, the LLDD should abort the exchange.
*
- * @tgtport: pointer to the (registered) target port the LS was
+ * @target_port: pointer to the (registered) target port the LS was
* received on.
* @lsreq: pointer to a lsreq request structure to be used to reference
* the exchange corresponding to the LS.
@@ -2147,7 +2235,7 @@ nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
/*
- * Actual processing routine for received FC-NVME LS Requests from the LLD
+ * Actual processing routine for received FC-NVME I/O Requests from the LLD
*/
static void
nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
@@ -2158,6 +2246,13 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
int ret;
/*
+ * if there is no nvmet mapping to the targetport there
+ * shouldn't be requests. just terminate them.
+ */
+ if (!tgtport->pe)
+ goto transport_error;
+
+ /*
* Fused commands are currently not supported in the linux
* implementation.
*
@@ -2184,7 +2279,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
fod->req.cmd = &fod->cmdiubuf.sqe;
fod->req.rsp = &fod->rspiubuf.cqe;
- fod->req.port = fod->queue->port;
+ fod->req.port = tgtport->pe->port;
/* clear any response payload */
memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
@@ -2468,7 +2563,7 @@ nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
substring_t wwn = { name, &name[sizeof(name)-1] };
int nnoffset, pnoffset;
- /* validate it string one of the 2 allowed formats */
+ /* validate if string is one of the 2 allowed formats */
if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
!strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
!strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
@@ -2508,6 +2603,7 @@ static int
nvmet_fc_add_port(struct nvmet_port *port)
{
struct nvmet_fc_tgtport *tgtport;
+ struct nvmet_fc_port_entry *pe;
struct nvmet_fc_traddr traddr = { 0L, 0L };
unsigned long flags;
int ret;
@@ -2524,24 +2620,40 @@ nvmet_fc_add_port(struct nvmet_port *port)
if (ret)
return ret;
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
ret = -ENXIO;
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
if ((tgtport->fc_target_port.node_name == traddr.nn) &&
(tgtport->fc_target_port.port_name == traddr.pn)) {
- tgtport->port = port;
- ret = 0;
+ /* a FC port can only be 1 nvmet port id */
+ if (!tgtport->pe) {
+ nvmet_fc_portentry_bind(tgtport, pe, port);
+ ret = 0;
+ } else
+ ret = -EALREADY;
break;
}
}
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+
+ if (ret)
+ kfree(pe);
+
return ret;
}
static void
nvmet_fc_remove_port(struct nvmet_port *port)
{
- /* nothing to do */
+ struct nvmet_fc_port_entry *pe = port->priv;
+
+ nvmet_fc_portentry_unbind(pe);
+
+ kfree(pe);
}
static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 5251689a1d9a..291f4121f516 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -648,6 +648,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
break;
/* Fall-Thru to RSP handling */
+ /* FALLTHRU */
case NVMET_FCOP_RSP:
if (fcpreq) {
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 7bc9f6240432..f93fb5711142 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -58,7 +58,7 @@ static void nvmet_bio_done(struct bio *bio)
static void nvmet_bdev_execute_rw(struct nvmet_req *req)
{
int sg_cnt = req->sg_cnt;
- struct bio *bio = &req->b.inline_bio;
+ struct bio *bio;
struct scatterlist *sg;
sector_t sector;
blk_qc_t cookie;
@@ -81,7 +81,12 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9);
- bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ if (req->data_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ bio = &req->b.inline_bio;
+ bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
+ } else {
+ bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
+ }
bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector;
bio->bi_private = req;
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 81a9dc5290a8..39d972e2595f 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -246,7 +246,8 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
break;
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
- len = le32_to_cpu(range.nlb) << req->ns->blksize_shift;
+ len = le32_to_cpu(range.nlb);
+ len <<= req->ns->blksize_shift;
if (offset + len > req->ns->size) {
ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
break;
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index ec9af4ee03b6..08f7b57a1203 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -264,6 +264,7 @@ struct nvmet_fabrics_ops {
};
#define NVMET_MAX_INLINE_BIOVEC 8
+#define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
struct nvmet_req {
struct nvme_command *cmd;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 3533e918ea37..bd265aceb90c 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -66,6 +66,7 @@ struct nvmet_rdma_rsp {
struct nvmet_req req;
+ bool allocated;
u8 n_rdma;
u32 flags;
u32 invalidate_rkey;
@@ -121,6 +122,7 @@ struct nvmet_rdma_device {
int inline_page_count;
};
+static struct workqueue_struct *nvmet_rdma_delete_wq;
static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -174,11 +176,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
unsigned long flags;
spin_lock_irqsave(&queue->rsps_lock, flags);
- rsp = list_first_entry(&queue->free_rsps,
+ rsp = list_first_entry_or_null(&queue->free_rsps,
struct nvmet_rdma_rsp, free_list);
- list_del(&rsp->free_list);
+ if (likely(rsp))
+ list_del(&rsp->free_list);
spin_unlock_irqrestore(&queue->rsps_lock, flags);
+ if (unlikely(!rsp)) {
+ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+ if (unlikely(!rsp))
+ return NULL;
+ rsp->allocated = true;
+ }
+
return rsp;
}
@@ -187,6 +197,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
{
unsigned long flags;
+ if (rsp->allocated) {
+ kfree(rsp);
+ return;
+ }
+
spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
@@ -776,6 +791,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
cmd->queue = queue;
rsp = nvmet_rdma_get_rsp(queue);
+ if (unlikely(!rsp)) {
+ /*
+ * we get here only under memory pressure,
+ * silently drop and have the host retry
+ * as we can't even fail it.
+ */
+ nvmet_rdma_post_recv(queue->dev, cmd);
+ return;
+ }
rsp->queue = queue;
rsp->cmd = cmd;
rsp->flags = 0;
@@ -1244,12 +1268,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_scheduled_work();
+ flush_workqueue(nvmet_rdma_delete_wq);
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- schedule_work(&queue->release_work);
+ queue_work(nvmet_rdma_delete_wq, &queue->release_work);
/* Destroying rdma_cm id is not needed here */
return 0;
}
@@ -1314,7 +1338,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_rdma_delete_wq, &queue->release_work);
}
}
@@ -1344,7 +1368,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- schedule_work(&queue->release_work);
+ queue_work(nvmet_rdma_delete_wq, &queue->release_work);
}
/**
@@ -1626,8 +1650,17 @@ static int __init nvmet_rdma_init(void)
if (ret)
goto err_ib_client;
+ nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvmet_rdma_delete_wq) {
+ ret = -ENOMEM;
+ goto err_unreg_transport;
+ }
+
return 0;
+err_unreg_transport:
+ nvmet_unregister_transport(&nvmet_rdma_ops);
err_ib_client:
ib_unregister_client(&nvmet_rdma_ib_client);
return ret;
@@ -1635,6 +1668,7 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
+ destroy_workqueue(nvmet_rdma_delete_wq);
nvmet_unregister_transport(&nvmet_rdma_ops);
ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 9095b8290150..74eaedd5b860 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -140,6 +140,9 @@ void of_populate_phandle_cache(void)
if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
phandles++;
+ if (!phandles)
+ goto out;
+
cache_entries = roundup_pow_of_two(phandles);
phandle_cache_mask = cache_entries - 1;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 722537e14848..41b49716ac75 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -771,6 +771,9 @@ static void __init of_unittest_parse_interrupts(void)
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
if (!np) {
pr_err("missing testcase data\n");
@@ -845,6 +848,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
if (!np) {
pr_err("missing testcase data\n");
@@ -1001,15 +1007,19 @@ static void __init of_unittest_platform_populate(void)
pdev = of_find_device_by_node(np);
unittest(pdev, "device 1 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
+ if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq == -EPROBE_DEFER,
+ "device deferred probe failed - %d\n", irq);
- /* Test that a parsing failure does not return -EPROBE_DEFER */
- np = of_find_node_by_path("/testcase-data/testcase-device2");
- pdev = of_find_device_by_node(np);
- unittest(pdev, "device 2 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+ /* Test that a parsing failure does not return -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device2");
+ pdev = of_find_device_by_node(np);
+ unittest(pdev, "device 2 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq < 0 && irq != -EPROBE_DEFER,
+ "device parsing error failed - %d\n", irq);
+ }
np = of_find_node_by_path("/testcase-data/platform-tests");
unittest(np, "No testcase data in device tree\n");
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 778c4f76a884..2153956a0b20 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
if (val & PCIE_ATU_ENABLE)
return;
- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
@@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
if (val & PCIE_ATU_ENABLE)
return;
- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Outbound iATU is not being enabled\n");
}
@@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
if (val & PCIE_ATU_ENABLE)
return 0;
- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
@@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
if (val & PCIE_ATU_ENABLE)
return 0;
- usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX);
+ mdelay(LINK_WAIT_IATU);
}
dev_err(pci->dev, "Inbound iATU is not being enabled\n");
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 96126fd8403c..9f1a5e399b70 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -26,8 +26,7 @@
/* Parameters for the waiting for iATU enabled routine */
#define LINK_WAIT_MAX_IATU_RETRIES 5
-#define LINK_WAIT_IATU_MIN 9000
-#define LINK_WAIT_IATU_MAX 10000
+#define LINK_WAIT_IATU 9
/* Synopsys-specific PCIe configuration registers */
#define PCIE_PORT_LINK_CONTROL 0x710
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index c00f82cc54aa..9ba4d12c179c 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version;
#define STATUS_REVISION_MISMATCH 0xC0000059
+/* space for 32bit serial number as string */
+#define SLOT_NAME_SIZE 11
+
/*
* Message Types
*/
@@ -494,6 +497,7 @@ struct hv_pci_dev {
struct list_head list_entry;
refcount_t refs;
enum hv_pcichild_state state;
+ struct pci_slot *pci_slot;
struct pci_function_description desc;
bool reported_missing;
struct hv_pcibus_device *hbus;
@@ -1457,6 +1461,36 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
spin_unlock_irqrestore(&hbus->device_list_lock, flags);
}
+/*
+ * Assign entries in sysfs pci slot directory.
+ *
+ * Note that this function does not need to lock the children list
+ * because it is called from pci_devices_present_work which
+ * is serialized with hv_eject_device_work because they are on the
+ * same ordered workqueue. Therefore hbus->children list will not change
+ * even when pci_create_slot sleeps.
+ */
+static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
+{
+ struct hv_pci_dev *hpdev;
+ char name[SLOT_NAME_SIZE];
+ int slot_nr;
+
+ list_for_each_entry(hpdev, &hbus->children, list_entry) {
+ if (hpdev->pci_slot)
+ continue;
+
+ slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
+ snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
+ hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
+ name, NULL);
+ if (IS_ERR(hpdev->pci_slot)) {
+ pr_warn("pci_create slot %s failed\n", name);
+ hpdev->pci_slot = NULL;
+ }
+ }
+}
+
/**
* create_root_hv_pci_bus() - Expose a new root PCI bus
* @hbus: Root PCI bus, as understood by this driver
@@ -1480,6 +1514,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
pci_bus_assign_resources(hbus->pci_bus);
+ hv_pci_assign_slots(hbus);
pci_bus_add_devices(hbus->pci_bus);
pci_unlock_rescan_remove();
hbus->state = hv_pcibus_installed;
@@ -1742,6 +1777,7 @@ static void pci_devices_present_work(struct work_struct *work)
*/
pci_lock_rescan_remove();
pci_scan_child_bus(hbus->pci_bus);
+ hv_pci_assign_slots(hbus);
pci_unlock_rescan_remove();
break;
@@ -1858,6 +1894,9 @@ static void hv_eject_device_work(struct work_struct *work)
list_del(&hpdev->list_entry);
spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
+ if (hpdev->pci_slot)
+ pci_destroy_slot(hpdev->pci_slot);
+
memset(&ctxt, 0, sizeof(ctxt));
ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index 50eb0729385b..a41d79b8d46a 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -1145,7 +1145,6 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
{
struct device *dev = &pcie->pdev->dev;
struct device_node *np = dev->of_node;
- unsigned int i;
int ret;
INIT_LIST_HEAD(&pcie->resources);
@@ -1179,13 +1178,58 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
resource_size(&pcie->io) - 1);
pcie->realio.name = "PCI I/O";
+ pci_add_resource(&pcie->resources, &pcie->realio);
+ }
+
+ return devm_request_pci_bus_resources(dev, &pcie->resources);
+}
+
+/*
+ * This is a copy of pci_host_probe(), except that it does the I/O
+ * remap as the last step, once we are sure we won't fail.
+ *
+ * It should be removed once the I/O remap error handling issue has
+ * been sorted out.
+ */
+static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
+{
+ struct mvebu_pcie *pcie;
+ struct pci_bus *bus, *child;
+ int ret;
+
+ ret = pci_scan_root_bus_bridge(bridge);
+ if (ret < 0) {
+ dev_err(bridge->dev.parent, "Scanning root bridge failed");
+ return ret;
+ }
+
+ pcie = pci_host_bridge_priv(bridge);
+ if (resource_size(&pcie->io) != 0) {
+ unsigned int i;
+
for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
pci_ioremap_io(i, pcie->io.start + i);
+ }
- pci_add_resource(&pcie->resources, &pcie->realio);
+ bus = bridge->bus;
+
+ /*
+ * We insert PCI resources into the iomem_resource and
+ * ioport_resource trees in either pci_bus_claim_resources()
+ * or pci_bus_assign_resources().
+ */
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_claim_resources(bus);
+ } else {
+ pci_bus_size_bridges(bus);
+ pci_bus_assign_resources(bus);
+
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
}
- return devm_request_pci_bus_resources(dev, &pcie->resources);
+ pci_bus_add_devices(bus);
+ return 0;
}
static int mvebu_pcie_probe(struct platform_device *pdev)
@@ -1268,7 +1312,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
bridge->align_resource = mvebu_pcie_align_resource;
bridge->msi = pcie->msi;
- return pci_host_probe(bridge);
+ return mvebu_pci_host_probe(bridge);
}
static const struct of_device_id mvebu_pcie_of_match_table[] = {
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c
index 86f1b002c846..975bcdd6b5c0 100644
--- a/drivers/pci/controller/pcie-cadence.c
+++ b/drivers/pci/controller/pcie-cadence.c
@@ -180,11 +180,11 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
return 0;
}
- phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL);
+ phy = devm_kcalloc(dev, phy_count, sizeof(*phy), GFP_KERNEL);
if (!phy)
return -ENOMEM;
- link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL);
+ link = devm_kcalloc(dev, phy_count, sizeof(*link), GFP_KERNEL);
if (!link)
return -ENOMEM;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index ef0b1b6ba86f..12afa7fdf77e 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
/**
* enable_slot - enable, configure a slot
* @slot: slot to be enabled
+ * @bridge: true if enable is for the whole bridge (not a single slot)
*
* This function should be called per *physical slot*,
* not per each slot object in ACPI namespace.
*/
-static void enable_slot(struct acpiphp_slot *slot)
+static void enable_slot(struct acpiphp_slot *slot, bool bridge)
{
struct pci_dev *dev;
struct pci_bus *bus = slot->bus;
struct acpiphp_func *func;
- if (bus->self && hotplug_is_native(bus->self)) {
+ if (bridge && bus->self && hotplug_is_native(bus->self)) {
/*
* If native hotplug is used, it will take care of hotplug
* slot management and resource allocation for hotplug
@@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
trim_stale_devices(dev);
/* configure all functions */
- enable_slot(slot);
+ enable_slot(slot, true);
} else {
disable_slot(slot);
}
@@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context)
if (bridge)
acpiphp_check_bridge(bridge);
else if (!(slot->flags & SLOT_IS_GOING_AWAY))
- enable_slot(slot);
+ enable_slot(slot, false);
break;
@@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
/* configure all functions */
if (!(slot->flags & SLOT_ENABLED))
- enable_slot(slot);
+ enable_slot(slot, false);
pci_unlock_rescan_remove();
return 0;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7136e3430925..a938abdb41ce 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -496,7 +496,7 @@ int pciehp_power_on_slot(struct slot *slot)
u16 slot_status;
int retval;
- /* Clear sticky power-fault bit from previous power failures */
+ /* Clear power-fault bit from previous power failures */
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
if (slot_status & PCI_EXP_SLTSTA_PFD)
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -646,6 +646,14 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
pciehp_handle_button_press(slot);
}
+ /* Check Power Fault Detected */
+ if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
+ ctrl->power_fault_detected = 1;
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
+ pciehp_set_attention_status(slot, 1);
+ pciehp_green_led_off(slot);
+ }
+
/*
* Disable requests have higher priority than Presence Detect Changed
* or Data Link Layer State Changed events.
@@ -657,14 +665,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
pciehp_handle_presence_or_link_change(slot, events);
up_read(&ctrl->reset_lock);
- /* Check Power Fault Detected */
- if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
- ctrl->power_fault_detected = 1;
- ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
- pciehp_set_attention_status(slot, 1);
- pciehp_green_led_off(slot);
- }
-
pci_config_pm_runtime_put(pdev);
wake_up(&ctrl->requester);
return IRQ_HANDLED;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 29ff9619b5fa..51b6c81671c1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1289,12 +1289,12 @@ int pci_save_state(struct pci_dev *dev)
EXPORT_SYMBOL(pci_save_state);
static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
- u32 saved_val, int retry)
+ u32 saved_val, int retry, bool force)
{
u32 val;
pci_read_config_dword(pdev, offset, &val);
- if (val == saved_val)
+ if (!force && val == saved_val)
return;
for (;;) {
@@ -1313,25 +1313,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
}
static void pci_restore_config_space_range(struct pci_dev *pdev,
- int start, int end, int retry)
+ int start, int end, int retry,
+ bool force)
{
int index;
for (index = end; index >= start; index--)
pci_restore_config_dword(pdev, 4 * index,
pdev->saved_config_space[index],
- retry);
+ retry, force);
}
static void pci_restore_config_space(struct pci_dev *pdev)
{
if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
- pci_restore_config_space_range(pdev, 10, 15, 0);
+ pci_restore_config_space_range(pdev, 10, 15, 0, false);
/* Restore BARs before the command register. */
- pci_restore_config_space_range(pdev, 4, 9, 10);
- pci_restore_config_space_range(pdev, 0, 3, 0);
+ pci_restore_config_space_range(pdev, 4, 9, 10, false);
+ pci_restore_config_space_range(pdev, 0, 3, 0, false);
+ } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+ /*
+ * Force rewriting of prefetch registers to avoid S3 resume
+ * issues on Intel PCI bridges that occur when these
+ * registers are not explicitly written.
+ */
+ pci_restore_config_space_range(pdev, 9, 11, 0, true);
+ pci_restore_config_space_range(pdev, 0, 8, 0, false);
} else {
- pci_restore_config_space_range(pdev, 0, 15, 0);
+ pci_restore_config_space_range(pdev, 0, 15, 0, false);
}
}
@@ -4547,6 +4558,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
}
+EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
{
@@ -5200,7 +5212,7 @@ static int __pci_reset_bus(struct pci_bus *bus)
*/
int pci_reset_bus(struct pci_dev *pdev)
{
- return pci_probe_reset_slot(pdev->slot) ?
+ return (!pci_probe_reset_slot(pdev->slot)) ?
__pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
}
EXPORT_SYMBOL_GPL(pci_reset_bus);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ec784009a36b..201f9e5ff55c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2074,6 +2074,7 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
{
#ifdef CONFIG_PCI_PASID
struct pci_dev *bridge;
+ int pcie_type;
u32 cap;
if (!pci_is_pcie(dev))
@@ -2083,7 +2084,9 @@ static void pci_configure_eetlp_prefix(struct pci_dev *dev)
if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX))
return;
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
+ pcie_type = pci_pcie_type(dev);
+ if (pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
+ pcie_type == PCI_EXP_TYPE_RC_END)
dev->eetlp_prefix_path = 1;
else {
bridge = pci_upstream_bridge(dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ef7143a274e0..6bc27b7fd452 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4355,11 +4355,6 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
*
* 0x9d10-0x9d1b PCI Express Root port #{1-12}
*
- * The 300 series chipset suffers from the same bug so include those root
- * ports here as well.
- *
- * 0xa32c-0xa343 PCI Express Root port #{0-24}
- *
* [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
* [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
* [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
@@ -4377,7 +4372,6 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
- case 0xa32c ... 0xa343: /* 300 series */
return true;
}
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 9940cc70f38b..54a8b30dda38 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -14,6 +14,8 @@
#include <linux/poll.h>
#include <linux/wait.h>
+#include <linux/nospec.h>
+
MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
@@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
default:
if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
return -EINVAL;
+ p.port = array_index_nospec(p.port,
+ ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
break;
}
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 7f01f6f60b87..d0b7dd8fb184 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -485,7 +485,13 @@ static int armpmu_filter_match(struct perf_event *event)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
unsigned int cpu = smp_processor_id();
- return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
+ int ret;
+
+ ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
+ if (ret && armpmu->filter_match)
+ return armpmu->filter_match(event);
+
+ return ret;
}
static ssize_t armpmu_cpumask_show(struct device *dev,
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 96075cecb0ae..933bd8410fc2 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -77,14 +77,14 @@ static int pmu_parse_irq_affinity(struct device_node *node, int i)
dn = of_parse_phandle(node, "interrupt-affinity", i);
if (!dn) {
- pr_warn("failed to parse interrupt-affinity[%d] for %s\n",
- i, node->name);
+ pr_warn("failed to parse interrupt-affinity[%d] for %pOFn\n",
+ i, node);
return -EINVAL;
}
cpu = of_cpu_node_to_id(dn);
if (cpu < 0) {
- pr_warn("failed to find logical CPU for %s\n", dn->name);
+ pr_warn("failed to find logical CPU for %pOFn\n", dn);
cpu = nr_cpu_ids;
}
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index ece41fb2848f..c4f4d904e4a6 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -1040,7 +1040,7 @@ static int madera_pin_probe(struct platform_device *pdev)
}
/* if the configuration is provided through pdata, apply it */
- if (pdata) {
+ if (pdata && pdata->gpio_configs) {
ret = pinctrl_register_mappings(pdata->gpio_configs,
pdata->n_gpio_configs);
if (ret) {
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index fb1afe55bf53..e7f45d96b0cb 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -15,10 +15,11 @@
#include "pinctrl-intel.h"
-#define CNL_PAD_OWN 0x020
-#define CNL_PADCFGLOCK 0x080
-#define CNL_HOSTSW_OWN 0x0b0
-#define CNL_GPI_IE 0x120
+#define CNL_PAD_OWN 0x020
+#define CNL_PADCFGLOCK 0x080
+#define CNL_LP_HOSTSW_OWN 0x0b0
+#define CNL_H_HOSTSW_OWN 0x0c0
+#define CNL_GPI_IE 0x120
#define CNL_GPP(r, s, e, g) \
{ \
@@ -30,12 +31,12 @@
#define CNL_NO_GPIO -1
-#define CNL_COMMUNITY(b, s, e, g) \
+#define CNL_COMMUNITY(b, s, e, o, g) \
{ \
.barno = (b), \
.padown_offset = CNL_PAD_OWN, \
.padcfglock_offset = CNL_PADCFGLOCK, \
- .hostown_offset = CNL_HOSTSW_OWN, \
+ .hostown_offset = (o), \
.ie_offset = CNL_GPI_IE, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
@@ -43,6 +44,12 @@
.ngpps = ARRAY_SIZE(g), \
}
+#define CNLLP_COMMUNITY(b, s, e, g) \
+ CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g)
+
+#define CNLH_COMMUNITY(b, s, e, g) \
+ CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g)
+
/* Cannon Lake-H */
static const struct pinctrl_pin_desc cnlh_pins[] = {
/* GPP_A */
@@ -379,7 +386,7 @@ static const struct intel_padgroup cnlh_community1_gpps[] = {
static const struct intel_padgroup cnlh_community3_gpps[] = {
CNL_GPP(0, 155, 178, 192), /* GPP_K */
CNL_GPP(1, 179, 202, 224), /* GPP_H */
- CNL_GPP(2, 203, 215, 258), /* GPP_E */
+ CNL_GPP(2, 203, 215, 256), /* GPP_E */
CNL_GPP(3, 216, 239, 288), /* GPP_F */
CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */
};
@@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = {
};
static const struct intel_community cnlh_communities[] = {
- CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
- CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
- CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
- CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
+ CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
+ CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
+ CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
+ CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
};
static const struct intel_pinctrl_soc_data cnlh_soc_data = {
@@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = {
};
static const struct intel_community cnllp_communities[] = {
- CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
- CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
- CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
+ CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
+ CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
+ CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
};
static const struct intel_pinctrl_soc_data cnllp_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 62b009b27eda..1ea3438ea67e 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -747,13 +747,63 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
.owner = THIS_MODULE,
};
+/**
+ * intel_gpio_to_pin() - Translate from GPIO offset to pin number
+ * @pctrl: Pinctrl structure
+ * @offset: GPIO offset from gpiolib
+ * @commmunity: Community is filled here if not %NULL
+ * @padgrp: Pad group is filled here if not %NULL
+ *
+ * When coming through gpiolib irqchip, the GPIO offset is not
+ * automatically translated to pinctrl pin number. This function can be
+ * used to find out the corresponding pinctrl pin.
+ */
+static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
+ const struct intel_community **community,
+ const struct intel_padgroup **padgrp)
+{
+ int i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ const struct intel_community *comm = &pctrl->communities[i];
+ int j;
+
+ for (j = 0; j < comm->ngpps; j++) {
+ const struct intel_padgroup *pgrp = &comm->gpps[j];
+
+ if (pgrp->gpio_base < 0)
+ continue;
+
+ if (offset >= pgrp->gpio_base &&
+ offset < pgrp->gpio_base + pgrp->size) {
+ int pin;
+
+ pin = pgrp->base + offset - pgrp->gpio_base;
+ if (community)
+ *community = comm;
+ if (padgrp)
+ *padgrp = pgrp;
+
+ return pin;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
void __iomem *reg;
u32 padcfg0;
+ int pin;
- reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+ if (pin < 0)
+ return -EINVAL;
+
+ reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
return -EINVAL;
@@ -770,8 +820,13 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
unsigned long flags;
void __iomem *reg;
u32 padcfg0;
+ int pin;
- reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+ if (pin < 0)
+ return;
+
+ reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
return;
@@ -790,8 +845,13 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
void __iomem *reg;
u32 padcfg0;
+ int pin;
- reg = intel_get_padcfg(pctrl, offset, PADCFG0);
+ pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
+ if (pin < 0)
+ return -EINVAL;
+
+ reg = intel_get_padcfg(pctrl, pin, PADCFG0);
if (!reg)
return -EINVAL;
@@ -827,81 +887,6 @@ static const struct gpio_chip intel_gpio_chip = {
.set_config = gpiochip_generic_config,
};
-/**
- * intel_gpio_to_pin() - Translate from GPIO offset to pin number
- * @pctrl: Pinctrl structure
- * @offset: GPIO offset from gpiolib
- * @commmunity: Community is filled here if not %NULL
- * @padgrp: Pad group is filled here if not %NULL
- *
- * When coming through gpiolib irqchip, the GPIO offset is not
- * automatically translated to pinctrl pin number. This function can be
- * used to find out the corresponding pinctrl pin.
- */
-static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
- const struct intel_community **community,
- const struct intel_padgroup **padgrp)
-{
- int i;
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- const struct intel_community *comm = &pctrl->communities[i];
- int j;
-
- for (j = 0; j < comm->ngpps; j++) {
- const struct intel_padgroup *pgrp = &comm->gpps[j];
-
- if (pgrp->gpio_base < 0)
- continue;
-
- if (offset >= pgrp->gpio_base &&
- offset < pgrp->gpio_base + pgrp->size) {
- int pin;
-
- pin = pgrp->base + offset - pgrp->gpio_base;
- if (community)
- *community = comm;
- if (padgrp)
- *padgrp = pgrp;
-
- return pin;
- }
- }
- }
-
- return -EINVAL;
-}
-
-static int intel_gpio_irq_reqres(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- int pin;
- int ret;
-
- pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
- if (pin >= 0) {
- ret = gpiochip_lock_as_irq(gc, pin);
- if (ret) {
- dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n",
- pin);
- return ret;
- }
- }
- return 0;
-}
-
-static void intel_gpio_irq_relres(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- int pin;
-
- pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
- if (pin >= 0)
- gpiochip_unlock_as_irq(gc, pin);
-}
-
static void intel_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1117,8 +1102,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
static struct irq_chip intel_gpio_irqchip = {
.name = "intel-gpio",
- .irq_request_resources = intel_gpio_irq_reqres,
- .irq_release_resources = intel_gpio_irq_relres,
.irq_enable = intel_gpio_irq_enable,
.irq_ack = intel_gpio_irq_ack,
.irq_mask = intel_gpio_irq_mask,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 41ccc759b8b8..1425c2874d40 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
unsigned long flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
- u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF);
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
pin_reg |= BIT(INTERRUPT_MASK_OFF);
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
- /*
- * When debounce logic is enabled it takes ~900 us before interrupts
- * can be enabled. During this "debounce warm up" period the
- * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it
- * reads back as 1, signaling that interrupts are now enabled.
- */
- while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
- continue;
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
}
@@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
{
int ret = 0;
- u32 pin_reg;
+ u32 pin_reg, pin_reg_irq_en, mask;
unsigned long flags, irq_flags;
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
@@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
}
pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
+ /*
+ * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
+ * debounce registers of any GPIO will block wake/interrupt status
+ * generation for *all* GPIOs for a lenght of time that depends on
+ * WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the
+ * INTERRUPT_ENABLE bit will read as 0.
+ *
+ * We temporarily enable irq for the GPIO whose configuration is
+ * changing, and then wait for it to read back as 1 to know when
+ * debounce has settled and then disable the irq again.
+ * We do this polling with the spinlock held to ensure other GPIO
+ * access routines do not read an incorrect value for the irq enable
+ * bit of other GPIOs. We keep the GPIO masked while polling to avoid
+ * spurious irqs, and disable the irq again after polling.
+ */
+ mask = BIT(INTERRUPT_ENABLE_OFF);
+ pin_reg_irq_en = pin_reg;
+ pin_reg_irq_en |= mask;
+ pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
+ writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
+ while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
+ continue;
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index 6a1b6058b991..628817c40e3b 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -793,7 +793,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
err = pinctrl_generic_add_group(jzpc->pctl, group->name,
group->pins, group->num_pins, group->data);
- if (err) {
+ if (err < 0) {
dev_err(dev, "Failed to register group %s\n",
group->name);
return err;
@@ -806,7 +806,7 @@ static int ingenic_pinctrl_probe(struct platform_device *pdev)
err = pinmux_generic_add_function(jzpc->pctl, func->name,
func->group_names, func->num_group_names,
func->data);
- if (err) {
+ if (err < 0) {
dev_err(dev, "Failed to register function %s\n",
func->name);
return err;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index 4a8a8efadefa..cf73a403d22d 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -636,6 +636,14 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp)
return err;
}
+ return 0;
+}
+
+static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp)
+{
+ struct gpio_chip *chip = &mcp->chip;
+ int err;
+
err = gpiochip_irqchip_add_nested(chip,
&mcp23s08_irq_chip,
0,
@@ -912,7 +920,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
}
if (mcp->irq && mcp->irq_controller) {
- ret = mcp23s08_irq_setup(mcp);
+ ret = mcp23s08_irqchip_setup(mcp);
if (ret)
goto fail;
}
@@ -944,6 +952,9 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
goto fail;
}
+ if (mcp->irq)
+ ret = mcp23s08_irq_setup(mcp);
+
fail:
if (ret < 0)
dev_dbg(dev, "can't setup chip %d, --> %d\n", addr, ret);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 2155a30c282b..5d72ffad32c2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -634,6 +634,29 @@ static void msm_gpio_irq_mask(struct irq_data *d)
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_cfg_reg);
+ /*
+ * There are two bits that control interrupt forwarding to the CPU. The
+ * RAW_STATUS_EN bit causes the level or edge sensed on the line to be
+ * latched into the interrupt status register when the hardware detects
+ * an irq that it's configured for (either edge for edge type or level
+ * for level type irq). The 'non-raw' status enable bit causes the
+ * hardware to assert the summary interrupt to the CPU if the latched
+ * status bit is set. There's a bug though, the edge detection logic
+ * seems to have a problem where toggling the RAW_STATUS_EN bit may
+ * cause the status bit to latch spuriously when there isn't any edge
+ * so we can't touch that bit for edge type irqs and we have to keep
+ * the bit set anyway so that edges are latched while the line is masked.
+ *
+ * To make matters more complicated, leaving the RAW_STATUS_EN bit
+ * enabled all the time causes level interrupts to re-latch into the
+ * status register because the level is still present on the line after
+ * we ack it. We clear the raw status enable bit during mask here and
+ * set the bit on unmask so the interrupt can't latch into the hardware
+ * while it's masked.
+ */
+ if (irqd_get_trigger_type(d) & IRQ_TYPE_LEVEL_MASK)
+ val &= ~BIT(g->intr_raw_status_bit);
+
val &= ~BIT(g->intr_enable_bit);
writel(val, pctrl->regs + g->intr_cfg_reg);
@@ -655,6 +678,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = readl(pctrl->regs + g->intr_cfg_reg);
+ val |= BIT(g->intr_raw_status_bit);
val |= BIT(g->intr_enable_bit);
writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 398393ab5df8..b6fd4838f60f 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -520,7 +520,7 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
ret = cros_ec_cmd_xfer(ec_dev, msg);
if (ret > 0) {
ec_dev->event_size = ret - 1;
- memcpy(&ec_dev->event_data, msg->data, ec_dev->event_size);
+ memcpy(&ec_dev->event_data, msg->data, ret);
}
return ret;
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index d975462a4c57..f10af5c383c5 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
if (obj && obj->type == ACPI_TYPE_INTEGER)
*out_data = (u32) obj->integer.value;
}
+ kfree(output.pointer);
return status;
}
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index 88afe5651d24..cf2229ece9ff 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev)
dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
priv->buf->std.output[0], priv->buf->std.output[1],
priv->buf->std.output[2], priv->buf->std.output[3]);
+ kfree(output.pointer);
return 0;
}
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 01b0e2bb3319..2012551d93e0 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -24,6 +24,8 @@
#include <linux/slab.h>
#include <linux/timekeeping.h>
+#include <linux/nospec.h>
+
#include "ptp_private.h"
static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
@@ -248,6 +250,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EINVAL;
break;
}
+ pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
pd = ops->pin_config[pin_index];
@@ -266,6 +269,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EINVAL;
break;
}
+ pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
diff --git a/drivers/regulator/bd71837-regulator.c b/drivers/regulator/bd71837-regulator.c
index 0f8ac8dec3e1..a1bd8aaf4d98 100644
--- a/drivers/regulator/bd71837-regulator.c
+++ b/drivers/regulator/bd71837-regulator.c
@@ -569,6 +569,25 @@ static int bd71837_probe(struct platform_device *pdev)
BD71837_REG_REGLOCK);
}
+ /*
+ * There is a HW quirk in BD71837. The shutdown sequence timings for
+ * bucks/LDOs which are controlled via register interface are changed.
+ * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the
+ * beginning of shut-down sequence. As bucks 6 and 7 are parent
+ * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage
+ * monitoring to errorneously detect under voltage and force PMIC to
+ * emergency state instead of poweroff. In order to avoid this we
+ * disable voltage monitoring for LDO5 and LDO6
+ */
+ err = regmap_update_bits(pmic->mfd->regmap, BD718XX_REG_MVRFLTMASK2,
+ BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80,
+ BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80);
+ if (err) {
+ dev_err(&pmic->pdev->dev,
+ "Failed to disable voltage monitoring\n");
+ goto err;
+ }
+
for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
struct regulator_desc *desc;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index bb1324f93143..9577d8941846 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3161,7 +3161,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
if (!rstate->changeable)
return -EPERM;
- rstate->enabled = en;
+ rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND;
return 0;
}
@@ -4395,13 +4395,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
!rdev->desc->fixed_uV)
rdev->is_switch = true;
+ dev_set_drvdata(&rdev->dev, rdev);
ret = device_register(&rdev->dev);
if (ret != 0) {
put_device(&rdev->dev);
goto unset_supplies;
}
- dev_set_drvdata(&rdev->dev, rdev);
rdev_init_debugfs(rdev);
/* try to resolve regulators supply since a new one was registered */
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 638f17d4c848..210fc20f7de7 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np,
else if (of_property_read_bool(suspend_np,
"regulator-off-in-suspend"))
suspend_state->enabled = DISABLE_IN_SUSPEND;
- else
- suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
&pval))
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 7036a6c6f86f..5542d9eadfe0 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -76,7 +76,7 @@ int dasd_gendisk_alloc(struct dasd_block *block)
gdp->queue = block->request_queue;
block->gdp = gdp;
set_capacity(block->gdp, 0);
- device_add_disk(&base->cdev->dev, block->gdp);
+ device_add_disk(&base->cdev->dev, block->gdp, NULL);
return 0;
}
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 23e526cda5c1..4e8aedd50cb0 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -685,7 +685,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
}
get_device(&dev_info->dev);
- device_add_disk(&dev_info->dev, dev_info->gd);
+ device_add_disk(&dev_info->dev, dev_info->gd, NULL);
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 98f66b7b6794..e01889394c84 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -500,7 +500,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
/* 512 byte sectors */
set_capacity(bdev->gendisk, scmdev->size >> 9);
- device_add_disk(&scmdev->dev, bdev->gendisk);
+ device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
return 0;
out_queue:
diff --git a/drivers/s390/char/sclp_early_core.c b/drivers/s390/char/sclp_early_core.c
index eceba3858cef..2f61f5579aa5 100644
--- a/drivers/s390/char/sclp_early_core.c
+++ b/drivers/s390/char/sclp_early_core.c
@@ -210,11 +210,11 @@ static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
* Output one or more lines of text on the SCLP console (VT220 and /
* or line-mode).
*/
-void __sclp_early_printk(const char *str, unsigned int len)
+void __sclp_early_printk(const char *str, unsigned int len, unsigned int force)
{
int have_linemode, have_vt220;
- if (sclp_init_state != sclp_init_state_uninitialized)
+ if (!force && sclp_init_state != sclp_init_state_uninitialized)
return;
if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
return;
@@ -227,5 +227,10 @@ void __sclp_early_printk(const char *str, unsigned int len)
void sclp_early_printk(const char *str)
{
- __sclp_early_printk(str, strlen(str));
+ __sclp_early_printk(str, strlen(str), 0);
+}
+
+void sclp_early_printk_force(const char *str)
+{
+ __sclp_early_printk(str, strlen(str), 1);
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index dbe7c7ac9ac8..fd77e46eb3b2 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -163,7 +163,7 @@ static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
for (i = 0; i < pat->pat_nr; i++, pa++)
for (j = 0; j < pa->pa_nr; j++)
- if (pa->pa_iova_pfn[i] == iova_pfn)
+ if (pa->pa_iova_pfn[j] == iova_pfn)
return true;
return false;
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 770fa9cfc310..f47d16b5810b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -22,6 +22,7 @@
#include "vfio_ccw_private.h"
struct workqueue_struct *vfio_ccw_work_q;
+struct kmem_cache *vfio_ccw_io_region;
/*
* Helpers
@@ -79,7 +80,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
cp_update_scsw(&private->cp, &irb->scsw);
cp_free(&private->cp);
}
- memcpy(private->io_region.irb_area, irb, sizeof(*irb));
+ memcpy(private->io_region->irb_area, irb, sizeof(*irb));
if (private->io_trigger)
eventfd_signal(private->io_trigger, 1);
@@ -114,6 +115,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
if (!private)
return -ENOMEM;
+
+ private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
+ GFP_KERNEL | GFP_DMA);
+ if (!private->io_region) {
+ kfree(private);
+ return -ENOMEM;
+ }
+
private->sch = sch;
dev_set_drvdata(&sch->dev, private);
@@ -139,6 +148,7 @@ out_disable:
cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private);
return ret;
}
@@ -153,6 +163,7 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
dev_set_drvdata(&sch->dev, NULL);
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
kfree(private);
return 0;
@@ -232,10 +243,20 @@ static int __init vfio_ccw_sch_init(void)
if (!vfio_ccw_work_q)
return -ENOMEM;
+ vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
+ sizeof(struct ccw_io_region), 0,
+ SLAB_ACCOUNT, 0,
+ sizeof(struct ccw_io_region), NULL);
+ if (!vfio_ccw_io_region) {
+ destroy_workqueue(vfio_ccw_work_q);
+ return -ENOMEM;
+ }
+
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) {
isc_unregister(VFIO_CCW_ISC);
+ kmem_cache_destroy(vfio_ccw_io_region);
destroy_workqueue(vfio_ccw_work_q);
}
@@ -246,6 +267,7 @@ static void __exit vfio_ccw_sch_exit(void)
{
css_driver_unregister(&vfio_ccw_sch_driver);
isc_unregister(VFIO_CCW_ISC);
+ kmem_cache_destroy(vfio_ccw_io_region);
destroy_workqueue(vfio_ccw_work_q);
}
module_init(vfio_ccw_sch_init);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 797a82731159..f94aa01f9c36 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -93,13 +93,13 @@ static void fsm_io_error(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state);
- private->io_region.ret_code = -EIO;
+ private->io_region->ret_code = -EIO;
}
static void fsm_io_busy(struct vfio_ccw_private *private,
enum vfio_ccw_event event)
{
- private->io_region.ret_code = -EBUSY;
+ private->io_region->ret_code = -EBUSY;
}
static void fsm_disabled_irq(struct vfio_ccw_private *private,
@@ -126,7 +126,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
{
union orb *orb;
union scsw *scsw = &private->scsw;
- struct ccw_io_region *io_region = &private->io_region;
+ struct ccw_io_region *io_region = private->io_region;
struct mdev_device *mdev = private->mdev;
char *errstr = "request";
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 41eeb57d68a3..f673e106c041 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -174,7 +174,7 @@ static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
return -EINVAL;
private = dev_get_drvdata(mdev_parent_dev(mdev));
- region = &private->io_region;
+ region = private->io_region;
if (copy_to_user(buf, (void *)region + *ppos, count))
return -EFAULT;
@@ -196,7 +196,7 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
if (private->state != VFIO_CCW_STATE_IDLE)
return -EACCES;
- region = &private->io_region;
+ region = private->io_region;
if (copy_from_user((void *)region + *ppos, buf, count))
return -EFAULT;
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 78a66d96756b..078e46f9623d 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -41,7 +41,7 @@ struct vfio_ccw_private {
atomic_t avail;
struct mdev_device *mdev;
struct notifier_block nb;
- struct ccw_io_region io_region;
+ struct ccw_io_region *io_region;
struct channel_program cp;
struct irb irb;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index ec891bc7d10a..f039266b275d 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -872,8 +872,6 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
if (bits & 0x07)
return -EINVAL;
- memset(bitmap, 0, bits / 8);
-
if (str[0] == '0' && str[1] == 'x')
str++;
if (*str == 'x')
@@ -895,25 +893,23 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
}
/*
- * str2clrsetmasks() - parse bitmask argument and set the clear and
- * the set bitmap mask. A concatenation (done with ',') of these terms
- * is recognized:
+ * modify_bitmap() - parse bitmask argument and modify an existing
+ * bit mask accordingly. A concatenation (done with ',') of these
+ * terms is recognized:
* +<bitnr>[-<bitnr>] or -<bitnr>[-<bitnr>]
* <bitnr> may be any valid number (hex, decimal or octal) in the range
* 0...bits-1; the leading + or - is required. Here are some examples:
* +0-15,+32,-128,-0xFF
* -0-255,+1-16,+0x128
* +1,+2,+3,+4,-5,-7-10
- * Returns a clear and a set bitmask. Every positive value in the string
- * results in a bit set in the set mask and every negative value in the
- * string results in a bit SET in the clear mask. As a bit may be touched
- * more than once, the last 'operation' wins: +0-255,-128 = all but bit
- * 128 set in the set mask, only bit 128 set in the clear mask.
+ * Returns the new bitmap after all changes have been applied. Every
+ * positive value in the string will set a bit and every negative value
+ * in the string will clear a bit. As a bit may be touched more than once,
+ * the last 'operation' wins:
+ * +0-255,-128 = first bits 0-255 will be set, then bit 128 will be
+ * cleared again. All other bits are unmodified.
*/
-static int str2clrsetmasks(const char *str,
- unsigned long *clrmap,
- unsigned long *setmap,
- int bits)
+static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
{
int a, i, z;
char *np, sign;
@@ -922,9 +918,6 @@ static int str2clrsetmasks(const char *str,
if (bits & 0x07)
return -EINVAL;
- memset(clrmap, 0, bits / 8);
- memset(setmap, 0, bits / 8);
-
while (*str) {
sign = *str++;
if (sign != '+' && sign != '-')
@@ -940,13 +933,10 @@ static int str2clrsetmasks(const char *str,
str = np;
}
for (i = a; i <= z; i++)
- if (sign == '+') {
- set_bit_inv(i, setmap);
- clear_bit_inv(i, clrmap);
- } else {
- clear_bit_inv(i, setmap);
- set_bit_inv(i, clrmap);
- }
+ if (sign == '+')
+ set_bit_inv(i, bitmap);
+ else
+ clear_bit_inv(i, bitmap);
while (*str == ',' || *str == '\n')
str++;
}
@@ -970,44 +960,34 @@ static int process_mask_arg(const char *str,
unsigned long *bitmap, int bits,
struct mutex *lock)
{
- int i;
+ unsigned long *newmap, size;
+ int rc;
/* bits needs to be a multiple of 8 */
if (bits & 0x07)
return -EINVAL;
+ size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
+ newmap = kmalloc(size, GFP_KERNEL);
+ if (!newmap)
+ return -ENOMEM;
+ if (mutex_lock_interruptible(lock)) {
+ kfree(newmap);
+ return -ERESTARTSYS;
+ }
+
if (*str == '+' || *str == '-') {
- DECLARE_BITMAP(clrm, bits);
- DECLARE_BITMAP(setm, bits);
-
- i = str2clrsetmasks(str, clrm, setm, bits);
- if (i)
- return i;
- if (mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
- for (i = 0; i < bits; i++) {
- if (test_bit_inv(i, clrm))
- clear_bit_inv(i, bitmap);
- if (test_bit_inv(i, setm))
- set_bit_inv(i, bitmap);
- }
+ memcpy(newmap, bitmap, size);
+ rc = modify_bitmap(str, newmap, bits);
} else {
- DECLARE_BITMAP(setm, bits);
-
- i = hex2bitmap(str, setm, bits);
- if (i)
- return i;
- if (mutex_lock_interruptible(lock))
- return -ERESTARTSYS;
- for (i = 0; i < bits; i++)
- if (test_bit_inv(i, setm))
- set_bit_inv(i, bitmap);
- else
- clear_bit_inv(i, bitmap);
+ memset(newmap, 0, size);
+ rc = hex2bitmap(str, newmap, bits);
}
+ if (rc == 0)
+ memcpy(bitmap, newmap, size);
mutex_unlock(lock);
-
- return 0;
+ kfree(newmap);
+ return rc;
}
/*
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 49f64eb3eab0..ffce6f39828a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -25,6 +25,7 @@
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
#include <net/iucv/af_iucv.h>
#include <net/dsfield.h>
@@ -609,7 +610,7 @@ static void qeth_put_reply(struct qeth_reply *reply)
static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
struct qeth_card *card)
{
- char *ipa_name;
+ const char *ipa_name;
int com = cmd->hdr.command;
ipa_name = qeth_get_ipa_cmd_name(com);
if (rc)
@@ -4699,7 +4700,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
priv.buffer_len = oat_data.buffer_len;
priv.response_len = 0;
- priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL);
+ priv.buffer = vzalloc(oat_data.buffer_len);
if (!priv.buffer) {
rc = -ENOMEM;
goto out;
@@ -4740,7 +4741,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
rc = -EFAULT;
out_free:
- kfree(priv.buffer);
+ vfree(priv.buffer);
out:
return rc;
}
@@ -5706,6 +5707,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
+ if (IS_IQD(card))
+ dev->features |= NETIF_F_SG;
}
return dev;
@@ -5768,8 +5771,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
qeth_update_from_chp_desc(card);
card->dev = qeth_alloc_netdev(card);
- if (!card->dev)
+ if (!card->dev) {
+ rc = -ENOMEM;
goto err_card;
+ }
qeth_determine_capabilities(card);
enforced_disc = qeth_enforce_discipline(card);
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 5bcb8dafc3ee..e891c0b52f4c 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -148,10 +148,10 @@ EXPORT_SYMBOL_GPL(IPA_PDU_HEADER);
struct ipa_rc_msg {
enum qeth_ipa_return_codes rc;
- char *msg;
+ const char *msg;
};
-static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
+static const struct ipa_rc_msg qeth_ipa_rc_msg[] = {
{IPA_RC_SUCCESS, "success"},
{IPA_RC_NOTSUPP, "Command not supported"},
{IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"},
@@ -219,23 +219,23 @@ static struct ipa_rc_msg qeth_ipa_rc_msg[] = {
-char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
+const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc)
{
- int x = 0;
- qeth_ipa_rc_msg[sizeof(qeth_ipa_rc_msg) /
- sizeof(struct ipa_rc_msg) - 1].rc = rc;
- while (qeth_ipa_rc_msg[x].rc != rc)
- x++;
+ int x;
+
+ for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++)
+ if (qeth_ipa_rc_msg[x].rc == rc)
+ return qeth_ipa_rc_msg[x].msg;
return qeth_ipa_rc_msg[x].msg;
}
struct ipa_cmd_names {
enum qeth_ipa_cmds cmd;
- char *name;
+ const char *name;
};
-static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
+static const struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_STARTLAN, "startlan"},
{IPA_CMD_STOPLAN, "stoplan"},
{IPA_CMD_SETVMAC, "setvmac"},
@@ -267,13 +267,12 @@ static struct ipa_cmd_names qeth_ipa_cmd_names[] = {
{IPA_CMD_UNKNOWN, "unknown"},
};
-char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
+const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd)
{
- int x = 0;
- qeth_ipa_cmd_names[
- sizeof(qeth_ipa_cmd_names) /
- sizeof(struct ipa_cmd_names)-1].cmd = cmd;
- while (qeth_ipa_cmd_names[x].cmd != cmd)
- x++;
+ int x;
+
+ for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++)
+ if (qeth_ipa_cmd_names[x].cmd == cmd)
+ return qeth_ipa_cmd_names[x].name;
return qeth_ipa_cmd_names[x].name;
}
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index aa8b9196b089..aa5de1fe01e1 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -797,8 +797,8 @@ enum qeth_ipa_arp_return_codes {
QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
};
-extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
-extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
+extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
+extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \
sizeof(struct qeth_ipacmd_setassparms_hdr))
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 710fa74892ae..b5e38531733f 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -423,7 +423,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7175086677fb..ada258c01a08 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1390,7 +1390,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;
diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c
index 7b31f19ade83..050879a2ddef 100644
--- a/drivers/sbus/char/openprom.c
+++ b/drivers/sbus/char/openprom.c
@@ -715,22 +715,13 @@ static struct miscdevice openprom_dev = {
static int __init openprom_init(void)
{
- struct device_node *dp;
int err;
err = misc_register(&openprom_dev);
if (err)
return err;
- dp = of_find_node_by_path("/");
- dp = dp->child;
- while (dp) {
- if (!strcmp(dp->name, "options"))
- break;
- dp = dp->sibling;
- }
- options_node = dp;
-
+ options_node = of_get_child_by_name(of_find_node_by_path("/"), "options");
if (!options_node) {
misc_deregister(&openprom_dev);
return -EIO;
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index 524f9ea62e52..6516bc3cb58b 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -689,8 +689,7 @@ static int dax_open(struct inode *inode, struct file *f)
alloc_error:
kfree(ctx->ccb_buf);
done:
- if (ctx != NULL)
- kfree(ctx);
+ kfree(ctx);
return -ENOMEM;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index fac377320158..f42a619198c4 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
vscsi->dds.window[LOCAL].liobn,
vscsi->dds.window[REMOTE].liobn);
- strcpy(vscsi->eye, "VSCSI ");
- strncat(vscsi->eye, vdev->name, MAX_EYE);
+ snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
vscsi->dds.unit_id = vdev->unit_address;
- strncpy(vscsi->dds.partition_name, partition_name,
+ strscpy(vscsi->dds.partition_name, partition_name,
sizeof(vscsi->dds.partition_name));
vscsi->dds.partition_num = partition_number;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f2ec80b0ffc0..271990bc065b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref)
LEAVE;
}
+static void ipr_add_remove_thread(struct work_struct *work)
+{
+ unsigned long lock_flags;
+ struct ipr_resource_entry *res;
+ struct scsi_device *sdev;
+ struct ipr_ioa_cfg *ioa_cfg =
+ container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
+ u8 bus, target, lun;
+ int did_work;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+
+restart:
+ do {
+ did_work = 0;
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return;
+ }
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->del_from_ml && res->sdev) {
+ did_work = 1;
+ sdev = res->sdev;
+ if (!scsi_device_get(sdev)) {
+ if (!res->add_to_ml)
+ list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+ else
+ res->del_from_ml = 0;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ }
+ break;
+ }
+ }
+ } while (did_work);
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if (res->add_to_ml) {
+ bus = res->bus;
+ target = res->target;
+ lun = res->lun;
+ res->add_to_ml = 0;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ scsi_add_device(ioa_cfg->host, bus, target, lun);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ goto restart;
+ }
+ }
+
+ ioa_cfg->scan_done = 1;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
+ LEAVE;
+}
+
/**
* ipr_worker_thread - Worker thread
* @work: ioa config struct
@@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref)
static void ipr_worker_thread(struct work_struct *work)
{
unsigned long lock_flags;
- struct ipr_resource_entry *res;
- struct scsi_device *sdev;
struct ipr_dump *dump;
struct ipr_ioa_cfg *ioa_cfg =
container_of(work, struct ipr_ioa_cfg, work_q);
- u8 bus, target, lun;
- int did_work;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work)
return;
}
-restart:
- do {
- did_work = 0;
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- return;
- }
+ schedule_work(&ioa_cfg->scsi_add_work_q);
- list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if (res->del_from_ml && res->sdev) {
- did_work = 1;
- sdev = res->sdev;
- if (!scsi_device_get(sdev)) {
- if (!res->add_to_ml)
- list_move_tail(&res->queue, &ioa_cfg->free_res_q);
- else
- res->del_from_ml = 0;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- }
- break;
- }
- }
- } while (did_work);
-
- list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if (res->add_to_ml) {
- bus = res->bus;
- target = res->target;
- lun = res->lun;
- res->add_to_ml = 0;
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- scsi_add_device(ioa_cfg->host, bus, target, lun);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- goto restart;
- }
- }
-
- ioa_cfg->scan_done = 1;
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
LEAVE;
}
@@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
+ INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
init_waitqueue_head(&ioa_cfg->reset_wait_q);
init_waitqueue_head(&ioa_cfg->msi_wait_q);
init_waitqueue_head(&ioa_cfg->eeh_wait_q);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 68afbbde54d3..f6baa2351313 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg {
u8 saved_mode_page_len;
struct work_struct work_q;
+ struct work_struct scsi_add_work_q;
struct workqueue_struct *reset_work_q;
wait_queue_head_t reset_wait_q;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 057a60abe664..1a6ed9b0a249 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
goto buffer_done;
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ nrport = NULL;
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
- if (!rport)
- continue;
-
- /* local short-hand pointer. */
- nrport = rport->remoteport;
+ if (rport)
+ nrport = rport->remoteport;
+ spin_unlock(&vport->phba->hbalock);
if (!nrport)
continue;
@@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
struct lpfc_nodelist *ndlp;
#if (IS_ENABLED(CONFIG_NVME_FC))
struct lpfc_nvme_rport *rport;
+ struct nvme_fc_remote_port *remoteport = NULL;
#endif
shost = lpfc_shost_from_vport(vport);
@@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
if (ndlp->rport)
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
#if (IS_ENABLED(CONFIG_NVME_FC))
+ spin_lock(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
if (rport)
+ remoteport = rport->remoteport;
+ spin_unlock(&vport->phba->hbalock);
+ if (remoteport)
nvme_fc_set_remoteport_devloss(rport->remoteport,
vport->cfg_devloss_tmo);
#endif
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 9df0c051349f..aec5b10a8c85 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
unsigned char *statep;
struct nvme_fc_local_port *localport;
struct lpfc_nvmet_tgtport *tgtp;
- struct nvme_fc_remote_port *nrport;
+ struct nvme_fc_remote_port *nrport = NULL;
struct lpfc_nvme_rport *rport;
cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
@@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
len += snprintf(buf + len, size - len, "\tRport List:\n");
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
/* local short-hand pointer. */
+ spin_lock(&phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
- if (!rport)
- continue;
-
- nrport = rport->remoteport;
+ if (rport)
+ nrport = rport->remoteport;
+ spin_unlock(&phba->hbalock);
if (!nrport)
continue;
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 028462e5994d..918ae18ef8a8 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
+ spin_lock_irq(&vport->phba->hbalock);
oldrport = lpfc_ndlp_get_nrport(ndlp);
+ spin_unlock_irq(&vport->phba->hbalock);
if (!oldrport)
lpfc_nlp_get(ndlp);
@@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport;
- struct nvme_fc_remote_port *remoteport;
+ struct nvme_fc_remote_port *remoteport = NULL;
localport = vport->localport;
@@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
if (!lport)
goto input_err;
+ spin_lock_irq(&vport->phba->hbalock);
rport = lpfc_ndlp_get_nrport(ndlp);
- if (!rport)
+ if (rport)
+ remoteport = rport->remoteport;
+ spin_unlock_irq(&vport->phba->hbalock);
+ if (!remoteport)
goto input_err;
- remoteport = rport->remoteport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
"6033 Unreg nvme remoteport %p, portname x%llx, "
"port_id x%06x, portstate x%x port type x%x\n",
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index fc3babc15fa3..a6f96b35e971 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -77,6 +77,11 @@ enum qedi_nvm_tgts {
QEDI_NVM_TGT_SEC,
};
+struct qedi_nvm_iscsi_image {
+ struct nvm_iscsi_cfg iscsi_cfg;
+ u32 crc;
+};
+
struct qedi_uio_ctrl {
/* meta data */
u32 uio_hsi_version;
@@ -294,7 +299,7 @@ struct qedi_ctx {
void *bdq_pbl_list;
dma_addr_t bdq_pbl_list_dma;
u8 bdq_pbl_list_num_entries;
- struct nvm_iscsi_cfg *iscsi_cfg;
+ struct qedi_nvm_iscsi_image *iscsi_image;
dma_addr_t nvm_buf_dma;
void __iomem *bdq_primary_prod;
void __iomem *bdq_secondary_prod;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index aa96bccb5a96..e5bd035ebad0 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1346,23 +1346,26 @@ exit_setup_int:
static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
{
- if (qedi->iscsi_cfg)
+ if (qedi->iscsi_image)
dma_free_coherent(&qedi->pdev->dev,
- sizeof(struct nvm_iscsi_cfg),
- qedi->iscsi_cfg, qedi->nvm_buf_dma);
+ sizeof(struct qedi_nvm_iscsi_image),
+ qedi->iscsi_image, qedi->nvm_buf_dma);
}
static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
{
- qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev,
- sizeof(struct nvm_iscsi_cfg),
- &qedi->nvm_buf_dma, GFP_KERNEL);
- if (!qedi->iscsi_cfg) {
+ struct qedi_nvm_iscsi_image nvm_image;
+
+ qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
+ sizeof(nvm_image),
+ &qedi->nvm_buf_dma,
+ GFP_KERNEL);
+ if (!qedi->iscsi_image) {
QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
return -ENOMEM;
}
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
- "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg,
+ "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
qedi->nvm_buf_dma);
return 0;
@@ -1905,7 +1908,7 @@ qedi_get_nvram_block(struct qedi_ctx *qedi)
struct nvm_iscsi_block *block;
pf = qedi->dev_info.common.abs_pf_id;
- block = &qedi->iscsi_cfg->block[0];
+ block = &qedi->iscsi_image->iscsi_cfg.block[0];
for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
@@ -2194,15 +2197,14 @@ static void qedi_boot_release(void *data)
static int qedi_get_boot_info(struct qedi_ctx *qedi)
{
int ret = 1;
- u16 len;
-
- len = sizeof(struct nvm_iscsi_cfg);
+ struct qedi_nvm_iscsi_image nvm_image;
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Get NVM iSCSI CFG image\n");
ret = qedi_ops->common->nvm_get_image(qedi->cdev,
QED_NVM_IMAGE_ISCSI_CFG,
- (char *)qedi->iscsi_cfg, len);
+ (char *)qedi->iscsi_image,
+ sizeof(nvm_image));
if (ret)
QEDI_ERR(&qedi->dbg_ctx,
"Could not get NVM image. ret = %d\n", ret);
@@ -2470,6 +2472,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
/* start qedi context */
spin_lock_init(&qedi->hba_lock);
spin_lock_init(&qedi->task_idx_lock);
+ mutex_init(&qedi->stats_lock);
}
qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
qedi_ops->ll2->start(qedi->cdev, &params);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index fecf96f0225c..199d3ba1916d 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -374,8 +374,8 @@ struct atio_from_isp {
static inline int fcpcmd_is_corrupted(struct atio *atio)
{
if (atio->entry_type == ATIO_TYPE7 &&
- (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
- FCP_CMD_LENGTH_MIN))
+ ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
+ FCP_CMD_LENGTH_MIN))
return 1;
else
return 0;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index eb97d2dd3651..62348412ed1b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -3046,11 +3046,14 @@ scsi_device_quiesce(struct scsi_device *sdev)
*/
WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
- blk_set_preempt_only(q);
+ if (sdev->quiesced_by == current)
+ return 0;
+
+ blk_set_pm_only(q);
blk_mq_freeze_queue(q);
/*
- * Ensure that the effect of blk_set_preempt_only() will be visible
+ * Ensure that the effect of blk_set_pm_only() will be visible
* for percpu_ref_tryget() callers that occur after the queue
* unfreeze even if the queue was already frozen before this function
* was called. See also https://lwn.net/Articles/573497/.
@@ -3063,7 +3066,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
if (err == 0)
sdev->quiesced_by = current;
else
- blk_clear_preempt_only(q);
+ blk_clear_pm_only(q);
mutex_unlock(&sdev->state_mutex);
return err;
@@ -3088,7 +3091,7 @@ void scsi_device_resume(struct scsi_device *sdev)
mutex_lock(&sdev->state_mutex);
WARN_ON_ONCE(!sdev->quiesced_by);
sdev->quiesced_by = NULL;
- blk_clear_preempt_only(sdev->request_queue);
+ blk_clear_pm_only(sdev->request_queue);
if (sdev->sdev_state == SDEV_QUIESCE)
scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index b44c1bb687a2..a2b4179bfdf7 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -8,6 +8,7 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
#include <linux/async.h>
+#include <linux/blk-pm.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b79b366a94f7..b762d0fd773c 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -45,6 +45,7 @@
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
+#include <linux/blk-pm.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
@@ -1276,7 +1277,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_reset_cmnd(cmd);
default:
- BUG();
+ WARN_ON_ONCE(1);
+ return BLKPREP_KILL;
}
}
@@ -2959,6 +2961,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+ } else {
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
}
if (sdkp->device->type == TYPE_ZBC) {
@@ -3271,7 +3276,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
}
blk_pm_runtime_init(sdp->request_queue, dev);
- device_add_disk(dev, gd);
+ device_add_disk(dev, gd, NULL);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index d0389b20574d..54dd70ae9731 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -43,6 +43,7 @@
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/blkdev.h>
+#include <linux/blk-pm.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
@@ -758,7 +759,7 @@ static int sr_probe(struct device *dev)
dev_set_drvdata(dev, cd);
disk->flags |= GENHD_FL_REMOVABLE;
- device_add_disk(&sdev->sdev_gendev, disk);
+ device_add_disk(&sdev->sdev_gendev, disk, NULL);
sdev_printk(KERN_DEBUG, sdev,
"Attached scsi CD-ROM %s\n", cd->cdi.name);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9d5d2ca7fc4f..c55f38ec391c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+
+ /*
+ * Do not use blk-mq at this time because blk-mq does not support
+ * runtime pm.
+ */
+ host->use_blk_mq = false;
+
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index 05c42235dd41..7c3cc968053c 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -120,6 +120,7 @@ static void bm_set_memory(u64 ba, u32 size)
*/
static dma_addr_t fbpr_a;
static size_t fbpr_sz;
+static int __bman_probed;
static int bman_fbpr(struct reserved_mem *rmem)
{
@@ -166,6 +167,12 @@ static irqreturn_t bman_isr(int irq, void *ptr)
return IRQ_HANDLED;
}
+int bman_is_probed(void)
+{
+ return __bman_probed;
+}
+EXPORT_SYMBOL_GPL(bman_is_probed);
+
static int fsl_bman_probe(struct platform_device *pdev)
{
int ret, err_irq;
@@ -175,6 +182,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
u16 id, bm_pool_cnt;
u8 major, minor;
+ __bman_probed = -1;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
@@ -255,6 +264,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
return ret;
}
+ __bman_probed = 1;
+
return 0;
};
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index ecb22749df0b..8cc015183043 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
{
unsigned long addr;
+ if (!p)
+ return -ENODEV;
+
addr = gen_pool_alloc(p, cnt);
if (!addr)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 79cba58387a5..6fd5fef5f39b 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -273,6 +273,7 @@ static const struct qman_error_info_mdata error_mdata[] = {
static u32 __iomem *qm_ccsr_start;
/* A SDQCR mask comprising all the available/visible pool channels */
static u32 qm_pools_sdqcr;
+static int __qman_probed;
static inline u32 qm_ccsr_in(u32 offset)
{
@@ -686,6 +687,12 @@ static int qman_resource_init(struct device *dev)
return 0;
}
+int qman_is_probed(void)
+{
+ return __qman_probed;
+}
+EXPORT_SYMBOL_GPL(qman_is_probed);
+
static int fsl_qman_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -695,6 +702,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
u16 id;
u8 major, minor;
+ __qman_probed = -1;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "Can't get %pOF property 'IORESOURCE_MEM'\n",
@@ -828,6 +837,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
if (ret)
return ret;
+ __qman_probed = 1;
+
return 0;
}
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index a120002b630e..3e9391d117c5 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -227,6 +227,14 @@ static int qman_portal_probe(struct platform_device *pdev)
int irq, cpu, err;
u32 val;
+ err = qman_is_probed();
+ if (!err)
+ return -EPROBE_DEFER;
+ if (err < 0) {
+ dev_err(&pdev->dev, "failing probe due to qman probe error\n");
+ return -ENODEV;
+ }
+
pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
if (!pcfg)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index c646d8713861..681f7d4b7724 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
{
u32 shift;
- shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE;
+ shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
shift -= tdm_num * 2;
return shift;
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 4b5e250e8615..e5c7e1ef6318 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
struct sdw_master_runtime *m_rt = stream->m_rt;
struct sdw_slave_runtime *s_rt, *_s_rt;
- list_for_each_entry_safe(s_rt, _s_rt,
- &m_rt->slave_rt_list, m_rt_node)
- sdw_stream_remove_slave(s_rt->slave, stream);
+ list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
+ sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream);
+ sdw_release_slave_stream(s_rt->slave, stream);
+ }
list_del(&m_rt->bus_node);
}
@@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus,
"Master runtime config failed for stream:%s",
stream->name);
ret = -ENOMEM;
- goto error;
+ goto unlock;
}
ret = sdw_config_stream(bus->dev, stream, stream_config, false);
@@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus,
if (ret)
goto stream_error;
- stream->state = SDW_STREAM_CONFIGURED;
+ goto unlock;
stream_error:
sdw_release_master_stream(stream);
-error:
+unlock:
mutex_unlock(&bus->bus_lock);
return ret;
}
@@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master);
* @stream: SoundWire stream
* @port_config: Port configuration for audio stream
* @num_ports: Number of ports
+ *
+ * It is expected that Slave is added before adding Master
+ * to the Stream.
+ *
*/
int sdw_stream_add_slave(struct sdw_slave *slave,
struct sdw_stream_config *stream_config,
@@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
if (ret)
goto stream_error;
+ /*
+ * Change stream state to CONFIGURED on first Slave add.
+ * Bus is not aware of number of Slave(s) in a stream at this
+ * point so cannot depend on all Slave(s) to be added in order to
+ * change stream state to CONFIGURED.
+ */
stream->state = SDW_STREAM_CONFIGURED;
goto error;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 7cb3ab0a35a0..3082e72e4f6c 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -30,7 +30,11 @@
#define DRIVER_NAME "fsl-dspi"
+#ifdef CONFIG_M5441x
+#define DSPI_FIFO_SIZE 16
+#else
#define DSPI_FIFO_SIZE 4
+#endif
#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
#define SPI_MCR 0x00
@@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi)
static void dspi_eoq_write(struct fsl_dspi *dspi)
{
int fifo_size = DSPI_FIFO_SIZE;
+ u16 xfer_cmd = dspi->tx_cmd;
/* Fill TX FIFO with as many transfers as possible */
while (dspi->len && fifo_size--) {
+ dspi->tx_cmd = xfer_cmd;
/* Request EOQF for last transfer in FIFO */
if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 0626e6e3ea0c..421bfc7dda67 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -300,8 +300,8 @@ static int spi_gpio_request(struct device *dev,
*mflags |= SPI_MASTER_NO_RX;
spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
- if (IS_ERR(spi_gpio->mosi))
- return PTR_ERR(spi_gpio->mosi);
+ if (IS_ERR(spi_gpio->sck))
+ return PTR_ERR(spi_gpio->sck);
for (i = 0; i < num_chipselects; i++) {
spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 95dc4d78618d..b37de1d991d6 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
- if (ret > 0 && rspi->dma_callbacked)
+ if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
- else if (!ret) {
- dev_err(&rspi->master->dev, "DMA timeout\n");
- ret = -ETIMEDOUT;
+ } else {
+ if (!ret) {
+ dev_err(&rspi->master->dev, "DMA timeout\n");
+ ret = -ETIMEDOUT;
+ }
if (tx)
dmaengine_terminate_all(rspi->master->dma_tx);
if (rx)
@@ -1350,12 +1352,36 @@ static const struct platform_device_id spi_driver_ids[] = {
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS &rspi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
.remove = rspi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "renesas_spi",
+ .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(rspi_of_match),
},
};
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 539d6d1a277a..101cd6aae2ea 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -397,7 +397,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
{
- sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+ sh_msiof_write(p, STR,
+ sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1426,12 +1427,37 @@ static const struct platform_device_id spi_driver_ids[] = {
};
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+ sh_msiof_spi_resume);
+#define DEV_PM_OPS &sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove = sh_msiof_spi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
+ .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(sh_msiof_match),
},
};
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 6f7b946b5ced..1427f343b39a 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
goto exit_free_master;
}
+ /* disabled clock may cause interrupt storm upon request */
+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tspi->clk)) {
+ ret = PTR_ERR(tspi->clk);
+ dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+ goto exit_free_master;
+ }
+ ret = clk_prepare(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+ goto exit_free_master;
+ }
+ ret = clk_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+ goto exit_free_master;
+ }
+
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tspi->irq);
- goto exit_free_master;
- }
-
- tspi->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tspi->clk)) {
- dev_err(&pdev->dev, "can not get clock\n");
- ret = PTR_ERR(tspi->clk);
- goto exit_free_irq;
+ goto exit_clk_disable;
}
tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@ exit_rx_dma_free:
tegra_slink_deinit_dma_param(tspi, true);
exit_free_irq:
free_irq(spi_irq, tspi);
+exit_clk_disable:
+ clk_disable(tspi->clk);
exit_free_master:
spi_master_put(master);
return ret;
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
free_irq(tspi->irq, tspi);
+ clk_disable(tspi->clk);
+
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ec395a6baf9c..9da0bc5a036c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr)
*/
if (ctlr->num_chipselect == 0)
return -EINVAL;
- /* allocate dynamic bus number using Linux idr */
- if ((ctlr->bus_num < 0) && ctlr->dev.of_node) {
+ if (ctlr->bus_num >= 0) {
+ /* devices with a fixed bus num must check-in with the num */
+ mutex_lock(&board_lock);
+ id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
+ ctlr->bus_num + 1, GFP_KERNEL);
+ mutex_unlock(&board_lock);
+ if (WARN(id < 0, "couldn't get idr"))
+ return id == -ENOSPC ? -EBUSY : id;
+ ctlr->bus_num = id;
+ } else if (ctlr->dev.of_node) {
+ /* allocate dynamic bus number using Linux idr */
id = of_alias_get_id(ctlr->dev.of_node, "spi");
if (id >= 0) {
ctlr->bus_num = id;
diff --git a/drivers/staging/erofs/Kconfig b/drivers/staging/erofs/Kconfig
index 96f614934df1..663b755bf2fb 100644
--- a/drivers/staging/erofs/Kconfig
+++ b/drivers/staging/erofs/Kconfig
@@ -2,7 +2,7 @@
config EROFS_FS
tristate "EROFS filesystem support"
- depends on BROKEN
+ depends on BLOCK
help
EROFS(Enhanced Read-Only File System) is a lightweight
read-only file system with modern designs (eg. page-sized
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 1aec509c805f..2df9768edac9 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -340,7 +340,7 @@ static int erofs_read_super(struct super_block *sb,
goto err_sbread;
sb->s_magic = EROFS_SUPER_MAGIC;
- sb->s_flags |= MS_RDONLY | MS_NOATIME;
+ sb->s_flags |= SB_RDONLY | SB_NOATIME;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_time_gran = 1;
@@ -627,7 +627,7 @@ static int erofs_remount(struct super_block *sb, int *flags, char *data)
{
BUG_ON(!sb_rdonly(sb));
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
diff --git a/drivers/staging/fbtft/TODO b/drivers/staging/fbtft/TODO
index 7e64c7e438f0..a9f4802bb6be 100644
--- a/drivers/staging/fbtft/TODO
+++ b/drivers/staging/fbtft/TODO
@@ -2,3 +2,7 @@
GPIO descriptor API in <linux/gpio/consumer.h> and look up GPIO
lines from device tree, ACPI or board files, board files should
use <linux/gpio/machine.h>
+
+* convert all these over to drm_simple_display_pipe and submit for inclusion
+ into the DRM subsystem under drivers/gpu/drm - fbdev doesn't take any new
+ drivers anymore.
diff --git a/drivers/staging/gasket/TODO b/drivers/staging/gasket/TODO
index 6ff8e01b04cc..5b1865f8af2d 100644
--- a/drivers/staging/gasket/TODO
+++ b/drivers/staging/gasket/TODO
@@ -1,9 +1,22 @@
This is a list of things that need to be done to get this driver out of the
staging directory.
+
+- Implement the gasket framework's functionality through UIO instead of
+ introducing a new user-space drivers framework that is quite similar.
+
+ UIO provides the necessary bits to implement user-space drivers. Meanwhile
+ the gasket APIs adds some extra conveniences like PCI BAR mapping, and
+ MSI interrupts. Add these features to the UIO subsystem, then re-implement
+ the Apex driver as a basic UIO driver instead (include/linux/uio_driver.h)
+
- Document sysfs files with Documentation/ABI/ entries.
+
- Use misc interface instead of major number for driver version description.
+
- Add descriptions of module_param's
+
- apex_get_status() should actually check status.
+
- "drivers" should never be dealing with "raw" sysfs calls or mess around with
kobjects at all. The driver core should handle all of this for you
automaically. There should not be a need for raw attribute macros.
diff --git a/drivers/staging/media/mt9t031/Kconfig b/drivers/staging/media/mt9t031/Kconfig
index f48e06a03cdb..9a58aaf72edd 100644
--- a/drivers/staging/media/mt9t031/Kconfig
+++ b/drivers/staging/media/mt9t031/Kconfig
@@ -1,9 +1,3 @@
-config SOC_CAMERA_IMX074
- tristate "imx074 support (DEPRECATED)"
- depends on SOC_CAMERA && I2C
- help
- This driver supports IMX074 cameras from Sony
-
config SOC_CAMERA_MT9T031
tristate "mt9t031 support (DEPRECATED)"
depends on SOC_CAMERA && I2C
diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c
index da92c493f157..69cc508af1bc 100644
--- a/drivers/staging/vboxvideo/vbox_drv.c
+++ b/drivers/staging/vboxvideo/vbox_drv.c
@@ -59,6 +59,11 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = PTR_ERR(dev);
goto err_drv_alloc;
}
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_pci_enable;
+
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
@@ -75,6 +80,8 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_drv_dev_register:
vbox_driver_unload(dev);
err_vbox_driver_load:
+ pci_disable_device(pdev);
+ err_pci_enable:
drm_dev_put(dev);
err_drv_alloc:
return ret;
diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c
index a83eac8668d0..79836c8fb909 100644
--- a/drivers/staging/vboxvideo/vbox_mode.c
+++ b/drivers/staging/vboxvideo/vbox_mode.c
@@ -323,6 +323,11 @@ static int vbox_crtc_page_flip(struct drm_crtc *crtc,
if (rc)
return rc;
+ mutex_lock(&vbox->hw_mutex);
+ vbox_set_view(crtc);
+ vbox_do_modeset(crtc, &crtc->mode);
+ mutex_unlock(&vbox->hw_mutex);
+
spin_lock_irqsave(&drm->event_lock, flags);
if (event)
diff --git a/drivers/staging/wilc1000/Makefile b/drivers/staging/wilc1000/Makefile
index f7b07c0b5ce2..ee7e26b886a5 100644
--- a/drivers/staging/wilc1000/Makefile
+++ b/drivers/staging/wilc1000/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_WILC1000) += wilc1000.o
ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
-DFIRMWARE_1003=\"atmel/wilc1003_firmware.bin\"
@@ -11,9 +12,7 @@ wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
wilc_wlan.o
obj-$(CONFIG_WILC1000_SDIO) += wilc1000-sdio.o
-wilc1000-sdio-objs += $(wilc1000-objs)
wilc1000-sdio-objs += wilc_sdio.o
obj-$(CONFIG_WILC1000_SPI) += wilc1000-spi.o
-wilc1000-spi-objs += $(wilc1000-objs)
wilc1000-spi-objs += wilc_spi.o
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 01cf4bd2e192..3b8d237decbf 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1038,8 +1038,8 @@ void wilc_netdev_cleanup(struct wilc *wilc)
}
kfree(wilc);
- wilc_debugfs_remove();
}
+EXPORT_SYMBOL_GPL(wilc_netdev_cleanup);
static const struct net_device_ops wilc_netdev_ops = {
.ndo_init = mac_init_fn,
@@ -1062,7 +1062,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
if (!wl)
return -ENOMEM;
- wilc_debugfs_init();
*wilc = wl;
wl->io_type = io_type;
wl->hif_func = ops;
@@ -1124,3 +1123,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
return 0;
}
+EXPORT_SYMBOL_GPL(wilc_netdev_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/wilc1000/wilc_debugfs.c b/drivers/staging/wilc1000/wilc_debugfs.c
index edc72876458d..8001df66b8c2 100644
--- a/drivers/staging/wilc1000/wilc_debugfs.c
+++ b/drivers/staging/wilc1000/wilc_debugfs.c
@@ -19,6 +19,7 @@ static struct dentry *wilc_dir;
#define DBG_LEVEL_ALL (DEBUG | INFO | WRN | ERR)
static atomic_t WILC_DEBUG_LEVEL = ATOMIC_INIT(ERR);
+EXPORT_SYMBOL_GPL(WILC_DEBUG_LEVEL);
static ssize_t wilc_debug_level_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -87,7 +88,7 @@ static struct wilc_debugfs_info_t debugfs_info[] = {
},
};
-int wilc_debugfs_init(void)
+static int __init wilc_debugfs_init(void)
{
int i;
struct wilc_debugfs_info_t *info;
@@ -103,10 +104,12 @@ int wilc_debugfs_init(void)
}
return 0;
}
+module_init(wilc_debugfs_init);
-void wilc_debugfs_remove(void)
+static void __exit wilc_debugfs_remove(void)
{
debugfs_remove_recursive(wilc_dir);
}
+module_exit(wilc_debugfs_remove);
#endif
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index 6787b6e9f124..8b184aa30d25 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -417,6 +417,7 @@ void chip_allow_sleep(struct wilc *wilc)
wilc->hif_func->hif_write_reg(wilc, 0xf0, reg & ~BIT(0));
wilc->hif_func->hif_write_reg(wilc, 0xfa, 0);
}
+EXPORT_SYMBOL_GPL(chip_allow_sleep);
void chip_wakeup(struct wilc *wilc)
{
@@ -471,6 +472,7 @@ void chip_wakeup(struct wilc *wilc)
}
chip_ps_state = CHIP_WAKEDUP;
}
+EXPORT_SYMBOL_GPL(chip_wakeup);
void wilc_chip_sleep_manually(struct wilc *wilc)
{
@@ -484,6 +486,7 @@ void wilc_chip_sleep_manually(struct wilc *wilc)
chip_ps_state = CHIP_SLEEPING_MANUAL;
release_bus(wilc, RELEASE_ONLY);
}
+EXPORT_SYMBOL_GPL(wilc_chip_sleep_manually);
void host_wakeup_notify(struct wilc *wilc)
{
@@ -491,6 +494,7 @@ void host_wakeup_notify(struct wilc *wilc)
wilc->hif_func->hif_write_reg(wilc, 0x10b0, 1);
release_bus(wilc, RELEASE_ONLY);
}
+EXPORT_SYMBOL_GPL(host_wakeup_notify);
void host_sleep_notify(struct wilc *wilc)
{
@@ -498,6 +502,7 @@ void host_sleep_notify(struct wilc *wilc)
wilc->hif_func->hif_write_reg(wilc, 0x10ac, 1);
release_bus(wilc, RELEASE_ONLY);
}
+EXPORT_SYMBOL_GPL(host_sleep_notify);
int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
{
@@ -871,6 +876,7 @@ void wilc_handle_isr(struct wilc *wilc)
release_bus(wilc, RELEASE_ALLOW_SLEEP);
}
+EXPORT_SYMBOL_GPL(wilc_handle_isr);
int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 buffer_size)
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 00d13b153f80..b81a73b9bd67 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -831,6 +831,4 @@ struct wilc;
int wilc_wlan_init(struct net_device *dev);
u32 wilc_get_chipid(struct wilc *wilc, bool update);
-int wilc_debugfs_init(void);
-void wilc_debugfs_remove(void);
#endif
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 94bad43c41ff..cc756a123fd8 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1416,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
sg_init_table(sg, ARRAY_SIZE(sg));
sg_set_buf(sg, buf, payload_length);
- sg_set_buf(sg + 1, pad_bytes, padding);
+ if (padding)
+ sg_set_buf(sg + 1, pad_bytes, padding);
ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
@@ -3910,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
{
int ret;
- u8 buffer[ISCSI_HDR_LEN], opcode;
+ u8 *buffer, opcode;
u32 checksum = 0, digest = 0;
struct kvec iov;
+ buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return;
+
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
@@ -3921,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
*/
iscsit_thread_check_cpumask(conn, current, 0);
- memset(buffer, 0, ISCSI_HDR_LEN);
memset(&iov, 0, sizeof(struct kvec));
iov.iov_base = buffer;
@@ -3930,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
if (ret != ISCSI_HDR_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- return;
+ break;
}
if (conn->conn_ops->HeaderDigest) {
@@ -3940,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
if (ret != ISCSI_CRC_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- return;
+ break;
}
iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
@@ -3964,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
}
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
- return;
+ break;
opcode = buffer[0] & ISCSI_OPCODE_MASK;
@@ -3975,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
" while in Discovery Session, rejecting.\n", opcode);
iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buffer);
- return;
+ break;
}
ret = iscsi_target_rx_opcode(conn, buffer);
if (ret < 0)
- return;
+ break;
}
+
+ kfree(buffer);
}
int iscsi_target_rx_thread(void *arg)
@@ -4208,22 +4214,15 @@ int iscsit_close_connection(
crypto_free_ahash(tfm);
}
- free_cpumask_var(conn->conn_cpumask);
-
- kfree(conn->conn_ops);
- conn->conn_ops = NULL;
-
if (conn->sock)
sock_release(conn->sock);
if (conn->conn_transport->iscsit_free_conn)
conn->conn_transport->iscsit_free_conn(conn);
- iscsit_put_transport(conn->conn_transport);
-
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
conn->conn_state = TARG_CONN_STATE_FREE;
- kfree(conn);
+ iscsit_free_conn(conn);
spin_lock_bh(&sess->conn_lock);
atomic_dec(&sess->nconn);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 9518ffd8b8ba..4e680d753941 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -26,27 +26,6 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
- int j = DIV_ROUND_UP(len, 2), rc;
-
- rc = hex2bin(dst, src, j);
- if (rc < 0)
- pr_debug("CHAP string contains non hex digit symbols\n");
-
- dst[j] = '\0';
- return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
- int i;
-
- for (i = 0; i < src_len; i++) {
- sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
- }
-}
-
static int chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
@@ -62,7 +41,7 @@ static int chap_gen_challenge(
ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
if (unlikely(ret))
return ret;
- chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+ bin2hex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
@@ -248,9 +227,16 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_R.\n");
goto out;
}
+ if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
+ if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(tfm)) {
@@ -294,7 +280,7 @@ static int chap_server_compute_md5(
goto out;
}
- chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
pr_debug("[server] MD5 Server Digest: %s\n", response);
if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -349,9 +335,7 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_C.\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
- challenge_len = chap_string_to_hex(challenge_binhex, challenge,
- strlen(challenge));
+ challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
if (!challenge_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
@@ -360,6 +344,11 @@ static int chap_server_compute_md5(
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
+ if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+ pr_err("Malformed CHAP_C\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_C=%s\n", challenge);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
@@ -413,7 +402,7 @@ static int chap_server_compute_md5(
/*
* Convert response from binary hex to ascii hext.
*/
- chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, MD5_SIGNATURE_SIZE);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 9e74f8bc2963..bb90c80ff388 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -67,45 +67,10 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
goto out_req_buf;
}
- conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
- if (!conn->conn_ops) {
- pr_err("Unable to allocate memory for"
- " struct iscsi_conn_ops.\n");
- goto out_rsp_buf;
- }
-
- init_waitqueue_head(&conn->queues_wq);
- INIT_LIST_HEAD(&conn->conn_list);
- INIT_LIST_HEAD(&conn->conn_cmd_list);
- INIT_LIST_HEAD(&conn->immed_queue_list);
- INIT_LIST_HEAD(&conn->response_queue_list);
- init_completion(&conn->conn_post_wait_comp);
- init_completion(&conn->conn_wait_comp);
- init_completion(&conn->conn_wait_rcfr_comp);
- init_completion(&conn->conn_waiting_on_uc_comp);
- init_completion(&conn->conn_logout_comp);
- init_completion(&conn->rx_half_close_comp);
- init_completion(&conn->tx_half_close_comp);
- init_completion(&conn->rx_login_comp);
- spin_lock_init(&conn->cmd_lock);
- spin_lock_init(&conn->conn_usage_lock);
- spin_lock_init(&conn->immed_queue_lock);
- spin_lock_init(&conn->nopin_timer_lock);
- spin_lock_init(&conn->response_queue_lock);
- spin_lock_init(&conn->state_lock);
-
- if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
- pr_err("Unable to allocate conn->conn_cpumask\n");
- goto out_conn_ops;
- }
conn->conn_login = login;
return login;
-out_conn_ops:
- kfree(conn->conn_ops);
-out_rsp_buf:
- kfree(login->rsp_buf);
out_req_buf:
kfree(login->req_buf);
out_login:
@@ -310,11 +275,9 @@ static int iscsi_login_zero_tsih_s1(
return -ENOMEM;
}
- ret = iscsi_login_set_conn_values(sess, conn, pdu->cid);
- if (unlikely(ret)) {
- kfree(sess);
- return ret;
- }
+ if (iscsi_login_set_conn_values(sess, conn, pdu->cid))
+ goto free_sess;
+
sess->init_task_tag = pdu->itt;
memcpy(&sess->isid, pdu->isid, 6);
sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn);
@@ -1149,6 +1112,75 @@ iscsit_conn_set_transport(struct iscsi_conn *conn, struct iscsit_transport *t)
return 0;
}
+static struct iscsi_conn *iscsit_alloc_conn(struct iscsi_np *np)
+{
+ struct iscsi_conn *conn;
+
+ conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ if (!conn) {
+ pr_err("Could not allocate memory for new connection\n");
+ return NULL;
+ }
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+ conn->conn_state = TARG_CONN_STATE_FREE;
+
+ init_waitqueue_head(&conn->queues_wq);
+ INIT_LIST_HEAD(&conn->conn_list);
+ INIT_LIST_HEAD(&conn->conn_cmd_list);
+ INIT_LIST_HEAD(&conn->immed_queue_list);
+ INIT_LIST_HEAD(&conn->response_queue_list);
+ init_completion(&conn->conn_post_wait_comp);
+ init_completion(&conn->conn_wait_comp);
+ init_completion(&conn->conn_wait_rcfr_comp);
+ init_completion(&conn->conn_waiting_on_uc_comp);
+ init_completion(&conn->conn_logout_comp);
+ init_completion(&conn->rx_half_close_comp);
+ init_completion(&conn->tx_half_close_comp);
+ init_completion(&conn->rx_login_comp);
+ spin_lock_init(&conn->cmd_lock);
+ spin_lock_init(&conn->conn_usage_lock);
+ spin_lock_init(&conn->immed_queue_lock);
+ spin_lock_init(&conn->nopin_timer_lock);
+ spin_lock_init(&conn->response_queue_lock);
+ spin_lock_init(&conn->state_lock);
+
+ timer_setup(&conn->nopin_response_timer,
+ iscsit_handle_nopin_response_timeout, 0);
+ timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
+
+ if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
+ goto free_conn;
+
+ conn->conn_ops = kzalloc(sizeof(struct iscsi_conn_ops), GFP_KERNEL);
+ if (!conn->conn_ops) {
+ pr_err("Unable to allocate memory for struct iscsi_conn_ops.\n");
+ goto put_transport;
+ }
+
+ if (!zalloc_cpumask_var(&conn->conn_cpumask, GFP_KERNEL)) {
+ pr_err("Unable to allocate conn->conn_cpumask\n");
+ goto free_mask;
+ }
+
+ return conn;
+
+free_mask:
+ free_cpumask_var(conn->conn_cpumask);
+put_transport:
+ iscsit_put_transport(conn->conn_transport);
+free_conn:
+ kfree(conn);
+ return NULL;
+}
+
+void iscsit_free_conn(struct iscsi_conn *conn)
+{
+ free_cpumask_var(conn->conn_cpumask);
+ kfree(conn->conn_ops);
+ iscsit_put_transport(conn->conn_transport);
+ kfree(conn);
+}
+
void iscsi_target_login_sess_out(struct iscsi_conn *conn,
struct iscsi_np *np, bool zero_tsih, bool new_sess)
{
@@ -1198,10 +1230,6 @@ old_sess_out:
crypto_free_ahash(tfm);
}
- free_cpumask_var(conn->conn_cpumask);
-
- kfree(conn->conn_ops);
-
if (conn->param_list) {
iscsi_release_param_list(conn->param_list);
conn->param_list = NULL;
@@ -1219,8 +1247,7 @@ old_sess_out:
if (conn->conn_transport->iscsit_free_conn)
conn->conn_transport->iscsit_free_conn(conn);
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
+ iscsit_free_conn(conn);
}
static int __iscsi_target_login_thread(struct iscsi_np *np)
@@ -1250,31 +1277,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
}
spin_unlock_bh(&np->np_thread_lock);
- conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ conn = iscsit_alloc_conn(np);
if (!conn) {
- pr_err("Could not allocate memory for"
- " new connection\n");
/* Get another socket */
return 1;
}
- pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
- conn->conn_state = TARG_CONN_STATE_FREE;
-
- timer_setup(&conn->nopin_response_timer,
- iscsit_handle_nopin_response_timeout, 0);
- timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
-
- if (iscsit_conn_set_transport(conn, np->np_transport) < 0) {
- kfree(conn);
- return 1;
- }
rc = np->np_transport->iscsit_accept_np(np, conn);
if (rc == -ENOSYS) {
complete(&np->np_restart_comp);
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
- conn = NULL;
+ iscsit_free_conn(conn);
goto exit;
} else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock);
@@ -1282,17 +1294,13 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
- conn = NULL;
+ iscsit_free_conn(conn);
/* Get another socket */
return 1;
}
spin_unlock_bh(&np->np_thread_lock);
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
- conn = NULL;
- goto out;
+ iscsit_free_conn(conn);
+ return 1;
}
/*
* Perform the remaining iSCSI connection initialization items..
@@ -1442,7 +1450,6 @@ old_sess_out:
tpg_np = NULL;
}
-out:
return 1;
exit:
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 74ac3abc44a0..3b8e3639ff5d 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -19,7 +19,7 @@ extern int iscsi_target_setup_login_socket(struct iscsi_np *,
extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
-extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
+extern void iscsit_free_conn(struct iscsi_conn *);
extern int iscsit_start_kthreads(struct iscsi_conn *);
extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index cb0461a10808..f459118bc11b 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -636,9 +636,9 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
/*
* The unmap_zeroes_data set means that the underlying device supports
- * REQ_DISCARD and has the discard_zeroes_data bit set. This satisfies
- * the SBC requirements for LBPRZ, meaning that a subsequent read
- * will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
+ * REQ_OP_DISCARD and has the discard_zeroes_data bit set. This
+ * satisfies the SBC requirements for LBPRZ, meaning that a subsequent
+ * read will return zeroes after an UNMAP or WRITE SAME (16) to an LBA
* See sbc4r36 6.6.4.
*/
if (((dev->dev_attrib.emulate_tpu != 0) ||
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index e1e264a9a4c7..28fc4ce75edb 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -738,14 +738,6 @@ icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
u8 link, depth;
u64 route;
- /*
- * After NVM upgrade adding root switch device fails because we
- * initiated reset. During that time ICM might still send
- * XDomain connected message which we ignore here.
- */
- if (!tb->root_switch)
- return;
-
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
@@ -1037,14 +1029,6 @@ icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
if (pkg->hdr.packet_id)
return;
- /*
- * After NVM upgrade adding root switch device fails because we
- * initiated reset. During that time ICM might still send device
- * connected message which we ignore here.
- */
- if (!tb->root_switch)
- return;
-
route = get_route(pkg->route_hi, pkg->route_lo);
authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
@@ -1408,19 +1392,26 @@ static void icm_handle_notification(struct work_struct *work)
mutex_lock(&tb->lock);
- switch (n->pkg->code) {
- case ICM_EVENT_DEVICE_CONNECTED:
- icm->device_connected(tb, n->pkg);
- break;
- case ICM_EVENT_DEVICE_DISCONNECTED:
- icm->device_disconnected(tb, n->pkg);
- break;
- case ICM_EVENT_XDOMAIN_CONNECTED:
- icm->xdomain_connected(tb, n->pkg);
- break;
- case ICM_EVENT_XDOMAIN_DISCONNECTED:
- icm->xdomain_disconnected(tb, n->pkg);
- break;
+ /*
+ * When the domain is stopped we flush its workqueue but before
+ * that the root switch is removed. In that case we should treat
+ * the queued events as being canceled.
+ */
+ if (tb->root_switch) {
+ switch (n->pkg->code) {
+ case ICM_EVENT_DEVICE_CONNECTED:
+ icm->device_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_DEVICE_DISCONNECTED:
+ icm->device_disconnected(tb, n->pkg);
+ break;
+ case ICM_EVENT_XDOMAIN_CONNECTED:
+ icm->xdomain_connected(tb, n->pkg);
+ break;
+ case ICM_EVENT_XDOMAIN_DISCONNECTED:
+ icm->xdomain_disconnected(tb, n->pkg);
+ break;
+ }
}
mutex_unlock(&tb->lock);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 88cff05a1808..5cd6bdfa068f 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1191,5 +1191,5 @@ static void __exit nhi_unload(void)
tb_domain_exit();
}
-fs_initcall(nhi_init);
+rootfs_initcall(nhi_init);
module_exit(nhi_unload);
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 5414c4a87bea..27284a2dcd2b 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -522,6 +522,8 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
return -EIO;
while (count > 0) {
+ int ret = 0;
+
spin_lock_irqsave(&hp->lock, flags);
rsize = hp->outbuf_size - hp->n_outbuf;
@@ -537,10 +539,13 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
}
if (hp->n_outbuf > 0)
- hvc_push(hp);
+ ret = hvc_push(hp);
spin_unlock_irqrestore(&hp->lock, flags);
+ if (!ret)
+ break;
+
if (count) {
if (hp->n_outbuf > 0)
hvc_flush(hp);
@@ -623,6 +628,15 @@ static int hvc_chars_in_buffer(struct tty_struct *tty)
#define MAX_TIMEOUT (2000)
static u32 timeout = MIN_TIMEOUT;
+/*
+ * Maximum number of bytes to get from the console driver if hvc_poll is
+ * called from driver (and can't sleep). Any more than this and we break
+ * and start polling with khvcd. This value was derived from from an OpenBMC
+ * console with the OPAL driver that results in about 0.25ms interrupts off
+ * latency.
+ */
+#define HVC_ATOMIC_READ_MAX 128
+
#define HVC_POLL_READ 0x00000001
#define HVC_POLL_WRITE 0x00000002
@@ -669,8 +683,8 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
if (!hp->irq_requested)
poll_mask |= HVC_POLL_READ;
+ read_again:
/* Read data if any */
-
count = tty_buffer_request_room(&hp->port, N_INBUF);
/* If flip is full, just reschedule a later read */
@@ -717,9 +731,23 @@ static int __hvc_poll(struct hvc_struct *hp, bool may_sleep)
#endif /* CONFIG_MAGIC_SYSRQ */
tty_insert_flip_char(&hp->port, buf[i], 0);
}
- if (n == count)
- poll_mask |= HVC_POLL_READ;
- read_total = n;
+ read_total += n;
+
+ if (may_sleep) {
+ /* Keep going until the flip is full */
+ spin_unlock_irqrestore(&hp->lock, flags);
+ cond_resched();
+ spin_lock_irqsave(&hp->lock, flags);
+ goto read_again;
+ } else if (read_total < HVC_ATOMIC_READ_MAX) {
+ /* Break and defer if it's a large read in atomic */
+ goto read_again;
+ }
+
+ /*
+ * Latency break, schedule another poll immediately.
+ */
+ poll_mask |= HVC_POLL_READ;
out:
/* Wakeup write queue if necessary */
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index fa8dcb470640..d31b975dd3fd 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -630,10 +630,6 @@ static int dw8250_probe(struct platform_device *pdev)
if (!data->skip_autocfg)
dw8250_setup_port(p);
-#ifdef CONFIG_PM
- uart.capabilities |= UART_CAP_RPM;
-#endif
-
/* If we have a valid fifosize, try hooking up DMA */
if (p->fifosize) {
data->dma.rxconf.src_maxburst = p->fifosize / 4;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 24a5f05e769b..e5389591bb4f 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
/* Get the address of the host memory buffer.
*/
bdp = pinfo->rx_cur;
- while (bdp->cbd_sc & BD_SC_EMPTY)
- ;
+ if (bdp->cbd_sc & BD_SC_EMPTY)
+ return NO_POLL_CHAR;
/* If the buffer address is in the CPM DPRAM, don't
* convert it.
@@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port)
poll_chars = 0;
}
if (poll_chars <= 0) {
- poll_chars = poll_wait_key(poll_buf, pinfo);
+ int ret = poll_wait_key(poll_buf, pinfo);
+
+ if (ret == NO_POLL_CHAR)
+ return ret;
+ poll_chars = ret;
pollp = poll_buf;
}
poll_chars--;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 51e47a63d61a..3f8d1274fc85 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
struct circ_buf *ring = &sport->rx_ring;
int ret, nent;
int bits, baud;
- struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port);
+ struct tty_port *port = &sport->port.state->port;
+ struct tty_struct *tty = port->tty;
struct ktermios *termios = &tty->termios;
baud = tty_get_baud_rate(tty);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 239c0fa2e981..0f67197a3783 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2351,6 +2351,14 @@ static int imx_uart_probe(struct platform_device *pdev)
ret);
return ret;
}
+
+ ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+ ret);
+ return ret;
+ }
} else {
ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
dev_name(&pdev->dev), sport);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index d04b5eeea3c6..170e446a2f62 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
termios->c_cflag &= CREAD | CBAUD;
termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
+ termios->c_cflag |= CS8;
}
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 29ec34387246..1515074e18fb 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -868,8 +868,8 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
geni_se_init(&port->se, port->rx_wm, port->rx_rfr);
geni_se_select_mode(&port->se, port->xfer_mode);
if (!uart_console(uport)) {
- port->rx_fifo = devm_kzalloc(uport->dev,
- port->rx_fifo_depth * sizeof(u32), GFP_KERNEL);
+ port->rx_fifo = devm_kcalloc(uport->dev,
+ port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
if (!port->rx_fifo)
return -ENOMEM;
}
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index ac4424bf6b13..ab3f6e91853d 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -292,6 +292,33 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
},
/*
+ * The "SCIFA" that is in RZ/T and RZ/A2.
+ * It looks like a normal SCIF with FIFO data, but with a
+ * compressed address space. Also, the break out of interrupts
+ * are different: ERI/BRI, RXI, TXI, TEI, DRI.
+ */
+ [SCIx_RZ_SCIFA_REGTYPE] = {
+ .regs = {
+ [SCSMR] = { 0x00, 16 },
+ [SCBRR] = { 0x02, 8 },
+ [SCSCR] = { 0x04, 16 },
+ [SCxTDR] = { 0x06, 8 },
+ [SCxSR] = { 0x08, 16 },
+ [SCxRDR] = { 0x0A, 8 },
+ [SCFCR] = { 0x0C, 16 },
+ [SCFDR] = { 0x0E, 16 },
+ [SCSPTR] = { 0x10, 16 },
+ [SCLSR] = { 0x12, 16 },
+ },
+ .fifosize = 16,
+ .overrun_reg = SCLSR,
+ .overrun_mask = SCLSR_ORER,
+ .sampling_rate_mask = SCI_SR(32),
+ .error_mask = SCIF_DEFAULT_ERROR_MASK,
+ .error_clear = SCIF_ERROR_CLEAR,
+ },
+
+ /*
* Common SH-3 SCIF definitions.
*/
[SCIx_SH3_SCIF_REGTYPE] = {
@@ -319,15 +346,15 @@ static const struct sci_port_params sci_port_params[SCIx_NR_REGTYPES] = {
[SCIx_SH4_SCIF_REGTYPE] = {
.regs = {
[SCSMR] = { 0x00, 16 },
- [SCBRR] = { 0x02, 8 },
- [SCSCR] = { 0x04, 16 },
- [SCxTDR] = { 0x06, 8 },
- [SCxSR] = { 0x08, 16 },
- [SCxRDR] = { 0x0a, 8 },
- [SCFCR] = { 0x0c, 16 },
- [SCFDR] = { 0x0e, 16 },
- [SCSPTR] = { 0x10, 16 },
- [SCLSR] = { 0x12, 16 },
+ [SCBRR] = { 0x04, 8 },
+ [SCSCR] = { 0x08, 16 },
+ [SCxTDR] = { 0x0c, 8 },
+ [SCxSR] = { 0x10, 16 },
+ [SCxRDR] = { 0x14, 8 },
+ [SCFCR] = { 0x18, 16 },
+ [SCFDR] = { 0x1c, 16 },
+ [SCSPTR] = { 0x20, 16 },
+ [SCLSR] = { 0x24, 16 },
},
.fifosize = 16,
.overrun_reg = SCLSR,
@@ -2810,7 +2837,7 @@ static int sci_init_single(struct platform_device *dev,
{
struct uart_port *port = &sci_port->port;
const struct resource *res;
- unsigned int i, regtype;
+ unsigned int i;
int ret;
sci_port->cfg = p;
@@ -2847,7 +2874,6 @@ static int sci_init_single(struct platform_device *dev,
if (unlikely(sci_port->params == NULL))
return -EINVAL;
- regtype = sci_port->params - sci_port_params;
switch (p->type) {
case PORT_SCIFB:
sci_port->rx_trigger = 48;
@@ -2902,10 +2928,6 @@ static int sci_init_single(struct platform_device *dev,
port->regshift = 1;
}
- if (regtype == SCIx_SH4_SCIF_REGTYPE)
- if (sci_port->reg_size >= 0x20)
- port->regshift = 1;
-
/*
* The UART port needs an IRQ value, so we peg this to the RX IRQ
* for the multi-IRQ ports, which is where we are primarily
@@ -3110,6 +3132,10 @@ static const struct of_device_id of_sci_match[] = {
.compatible = "renesas,scif-r7s72100",
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH2_SCIF_FIFODATA_REGTYPE),
},
+ {
+ .compatible = "renesas,scif-r7s9210",
+ .data = SCI_OF_DATA(PORT_SCIF, SCIx_RZ_SCIFA_REGTYPE),
+ },
/* Family-specific types */
{
.compatible = "renesas,rcar-gen1-scif",
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 32bc3e3fe4d3..5e5da9acaf0a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1255,6 +1255,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
static int tty_reopen(struct tty_struct *tty)
{
struct tty_driver *driver = tty->driver;
+ int retval;
if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER)
@@ -1268,10 +1269,14 @@ static int tty_reopen(struct tty_struct *tty)
tty->count++;
- if (!tty->ldisc)
- return tty_ldisc_reinit(tty, tty->termios.c_line);
+ if (tty->ldisc)
+ return 0;
- return 0;
+ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+ if (retval)
+ tty->count--;
+
+ return retval;
}
/**
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index a78ad10a119b..73cdc0d633dd 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -32,6 +32,8 @@
#include <asm/io.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
+
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
#include <linux/kbd_diacr.h>
@@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty,
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
ret = -ENXIO;
else {
+ vsa.console = array_index_nospec(vsa.console,
+ MAX_NR_CONSOLES + 1);
vsa.console--;
console_lock();
ret = vc_allocate(vsa.console);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 27346d69f393..9ede35cecb12 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -310,17 +310,17 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
if (difference & ACM_CTRL_DSR)
acm->iocount.dsr++;
- if (difference & ACM_CTRL_BRK)
- acm->iocount.brk++;
- if (difference & ACM_CTRL_RI)
- acm->iocount.rng++;
if (difference & ACM_CTRL_DCD)
acm->iocount.dcd++;
- if (difference & ACM_CTRL_FRAMING)
+ if (newctrl & ACM_CTRL_BRK)
+ acm->iocount.brk++;
+ if (newctrl & ACM_CTRL_RI)
+ acm->iocount.rng++;
+ if (newctrl & ACM_CTRL_FRAMING)
acm->iocount.frame++;
- if (difference & ACM_CTRL_PARITY)
+ if (newctrl & ACM_CTRL_PARITY)
acm->iocount.parity++;
- if (difference & ACM_CTRL_OVERRUN)
+ if (newctrl & ACM_CTRL_OVERRUN)
acm->iocount.overrun++;
spin_unlock_irqrestore(&acm->read_lock, flags);
@@ -355,7 +355,6 @@ static void acm_ctrl_irq(struct urb *urb)
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
- acm->nb_index = 0;
dev_dbg(&acm->control->dev,
"%s - urb shutting down with status: %d\n",
__func__, status);
@@ -780,20 +779,9 @@ static int acm_tty_write(struct tty_struct *tty,
}
if (acm->susp_count) {
- if (acm->putbuffer) {
- /* now to preserve order */
- usb_anchor_urb(acm->putbuffer->urb, &acm->delayed);
- acm->putbuffer = NULL;
- }
usb_anchor_urb(wb->urb, &acm->delayed);
spin_unlock_irqrestore(&acm->write_lock, flags);
return count;
- } else {
- if (acm->putbuffer) {
- /* at this point there is no good way to handle errors */
- acm_start_wb(acm, acm->putbuffer);
- acm->putbuffer = NULL;
- }
}
stat = acm_start_wb(acm, wb);
@@ -804,66 +792,6 @@ static int acm_tty_write(struct tty_struct *tty,
return count;
}
-static void acm_tty_flush_chars(struct tty_struct *tty)
-{
- struct acm *acm = tty->driver_data;
- struct acm_wb *cur;
- int err;
- unsigned long flags;
-
- spin_lock_irqsave(&acm->write_lock, flags);
-
- cur = acm->putbuffer;
- if (!cur) /* nothing to do */
- goto out;
-
- acm->putbuffer = NULL;
- err = usb_autopm_get_interface_async(acm->control);
- if (err < 0) {
- cur->use = 0;
- acm->putbuffer = cur;
- goto out;
- }
-
- if (acm->susp_count)
- usb_anchor_urb(cur->urb, &acm->delayed);
- else
- acm_start_wb(acm, cur);
-out:
- spin_unlock_irqrestore(&acm->write_lock, flags);
- return;
-}
-
-static int acm_tty_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct acm *acm = tty->driver_data;
- struct acm_wb *cur;
- int wbn;
- unsigned long flags;
-
-overflow:
- cur = acm->putbuffer;
- if (!cur) {
- spin_lock_irqsave(&acm->write_lock, flags);
- wbn = acm_wb_alloc(acm);
- if (wbn >= 0) {
- cur = &acm->wb[wbn];
- acm->putbuffer = cur;
- }
- spin_unlock_irqrestore(&acm->write_lock, flags);
- if (!cur)
- return 0;
- }
-
- if (cur->len == acm->writesize) {
- acm_tty_flush_chars(tty);
- goto overflow;
- }
-
- cur->buf[cur->len++] = ch;
- return 1;
-}
-
static int acm_tty_write_room(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
@@ -1585,6 +1513,7 @@ static void acm_disconnect(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
struct tty_struct *tty;
+ int i;
/* sibling interface is already cleaning up */
if (!acm)
@@ -1615,6 +1544,11 @@ static void acm_disconnect(struct usb_interface *intf)
tty_unregister_device(acm_tty_driver, acm->minor);
+ usb_free_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+ usb_free_urb(acm->wb[i].urb);
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_free_urb(acm->read_urbs[i]);
acm_write_buffers_free(acm);
usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm);
@@ -1707,6 +1641,7 @@ static int acm_pre_reset(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
clear_bit(EVENT_RX_STALL, &acm->flags);
+ acm->nb_index = 0; /* pending control transfers are lost */
return 0;
}
@@ -1987,8 +1922,6 @@ static const struct tty_operations acm_ops = {
.cleanup = acm_tty_cleanup,
.hangup = acm_tty_hangup,
.write = acm_tty_write,
- .put_char = acm_tty_put_char,
- .flush_chars = acm_tty_flush_chars,
.write_room = acm_tty_write_room,
.ioctl = acm_tty_ioctl,
.throttle = acm_tty_throttle,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index eacc116e83da..ca06b20d7af9 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -96,7 +96,6 @@ struct acm {
unsigned long read_urbs_free;
struct urb *read_urbs[ACM_NR];
struct acm_rb read_buffers[ACM_NR];
- struct acm_wb *putbuffer; /* for acm_tty_put_char() */
int rx_buflimit;
spinlock_t read_lock;
u8 *notification_buffer; /* to reassemble fragmented notifications */
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 50a2362ed3ea..48277bbc15e4 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -246,6 +246,31 @@ int of_usb_update_otg_caps(struct device_node *np,
}
EXPORT_SYMBOL_GPL(of_usb_update_otg_caps);
+/**
+ * usb_of_get_companion_dev - Find the companion device
+ * @dev: the device pointer to find a companion
+ *
+ * Find the companion device from platform bus.
+ *
+ * Takes a reference to the returned struct device which needs to be dropped
+ * after use.
+ *
+ * Return: On success, a pointer to the companion device, %NULL on failure.
+ */
+struct device *usb_of_get_companion_dev(struct device *dev)
+{
+ struct device_node *node;
+ struct platform_device *pdev = NULL;
+
+ node = of_parse_phandle(dev->of_node, "companion", 0);
+ if (node)
+ pdev = of_find_device_by_node(node);
+
+ of_node_put(node);
+
+ return pdev ? &pdev->dev : NULL;
+}
+EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c
index 15cc76e22123..99116af07f1d 100644
--- a/drivers/usb/common/roles.c
+++ b/drivers/usb/common/roles.c
@@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep,
*/
struct usb_role_switch *usb_role_switch_get(struct device *dev)
{
- return device_connection_find_match(dev, "usb-role-switch", NULL,
- usb_role_switch_match);
+ struct usb_role_switch *sw;
+
+ sw = device_connection_find_match(dev, "usb-role-switch", NULL,
+ usb_role_switch_match);
+
+ if (!IS_ERR_OR_NULL(sw))
+ WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+
+ return sw;
}
EXPORT_SYMBOL_GPL(usb_role_switch_get);
@@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get);
*/
void usb_role_switch_put(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
put_device(&sw->dev);
+ module_put(sw->dev.parent->driver->owner);
+ }
}
EXPORT_SYMBOL_GPL(usb_role_switch_put);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6ce77b33da61..ffccd40ea67d 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
struct async *as = NULL;
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
- int i, ret, is_in, num_sgs = 0, ifnum = -1;
+ int i, ret, num_sgs = 0, ifnum = -1;
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
+ bool is_in;
+ bool allow_short = false;
+ bool allow_zero = false;
unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
@@ -1500,6 +1503,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
is_in = 0;
uurb->endpoint &= ~USB_DIR_IN;
}
+ if (is_in)
+ allow_short = true;
snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
@@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
break;
case USBDEVFS_URB_TYPE_BULK:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
@@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
u |= URB_ISO_ASAP;
- if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+ if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
u |= URB_SHORT_NOT_OK;
- if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
u |= URB_ZERO_PACKET;
if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
u |= URB_NO_INTERRUPT;
as->urb->transfer_flags = u;
+ if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+ if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e76e95f62f76..a1f225f077cd 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
struct device *dev;
struct usb_device *udev;
int retval = 0;
- int lpm_disable_error = -ENODEV;
if (!iface)
return -ENODEV;
@@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
iface->condition = USB_INTERFACE_BOUND;
- /* See the comment about disabling LPM in usb_probe_interface(). */
- if (driver->disable_hub_initiated_lpm) {
- lpm_disable_error = usb_unlocked_disable_lpm(udev);
- if (lpm_disable_error) {
- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n",
- __func__, driver->name);
- return -ENOMEM;
- }
- }
-
/* Claimed interfaces are initially inactive (suspended) and
* runtime-PM-enabled, but only if the driver has autosuspend
* support. Otherwise they are marked active, to prevent the
@@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver,
if (device_is_registered(dev))
retval = device_bind_driver(dev);
- /* Attempt to re-enable USB3 LPM, if the disable was successful. */
- if (!lpm_disable_error)
- usb_unlocked_enable_lpm(udev);
+ if (retval) {
+ dev->driver = NULL;
+ usb_set_intfdata(iface, NULL);
+ iface->needs_remote_wakeup = 0;
+ iface->condition = USB_INTERFACE_UNBOUND;
+
+ /*
+ * Unbound interfaces are always runtime-PM-disabled
+ * and runtime-PM-suspended
+ */
+ if (driver->supports_autosuspend)
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ }
return retval;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 66fe1b78d952..03432467b05f 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -515,8 +515,6 @@ static int resume_common(struct device *dev, int event)
event == PM_EVENT_RESTORE);
if (retval) {
dev_err(dev, "PCI post-resume error %d!\n", retval);
- if (hcd->shared_hcd)
- usb_hc_died(hcd->shared_hcd);
usb_hc_died(hcd);
}
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 228672f2c4a1..bfa5eda0cc26 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1341,6 +1341,11 @@ void usb_enable_interface(struct usb_device *dev,
* is submitted that needs that bandwidth. Some other operating systems
* allocate bandwidth early, when a configuration is chosen.
*
+ * xHCI reserves bandwidth and configures the alternate setting in
+ * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
+ * may be disabled. Drivers cannot rely on any particular alternate
+ * setting being in effect after a failure.
+ *
* This call is synchronous, and may not be used in an interrupt context.
* Also, drivers must not change altsettings while urbs are scheduled for
* endpoints in that interface; all such urbs must first be completed
@@ -1376,6 +1381,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
alternate);
return -EINVAL;
}
+ /*
+ * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
+ * including freeing dropped endpoint ring buffers.
+ * Make sure the interface endpoints are flushed before that
+ */
+ usb_disable_interface(dev, iface, false);
/* Make sure we have enough bandwidth for this alternate interface.
* Remove the current alt setting and add the new alt setting.
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index fd77442c2d12..651708d8c908 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -105,29 +105,3 @@ usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum)
return NULL;
}
EXPORT_SYMBOL_GPL(usb_of_get_interface_node);
-
-/**
- * usb_of_get_companion_dev - Find the companion device
- * @dev: the device pointer to find a companion
- *
- * Find the companion device from platform bus.
- *
- * Takes a reference to the returned struct device which needs to be dropped
- * after use.
- *
- * Return: On success, a pointer to the companion device, %NULL on failure.
- */
-struct device *usb_of_get_companion_dev(struct device *dev)
-{
- struct device_node *node;
- struct platform_device *pdev = NULL;
-
- node = of_parse_phandle(dev->of_node, "companion", 0);
- if (node)
- pdev = of_find_device_by_node(node);
-
- of_node_put(node);
-
- return pdev ? &pdev->dev : NULL;
-}
-EXPORT_SYMBOL_GPL(usb_of_get_companion_dev);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 097057d2eacf..178d6c6063c0 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry),
GFP_KERNEL);
if (!quirk_list) {
+ quirk_count = 0;
mutex_unlock(&quirk_mutex);
return -ENOMEM;
}
@@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = {
.string = quirks_param,
};
-module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
+device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks");
/* Lists of quirky USB devices, split in device quirks and interface quirks.
@@ -178,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* CBM - Flash disk */
{ USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */
+ { USB_DEVICE(0x0218, 0x0201), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* WORLDE easy key (easykey.25) MIDI controller */
{ USB_DEVICE(0x0218, 0x0401), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -406,6 +411,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* DJI CineSSD */
+ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 623be3174fb3..79d8bd7a612e 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting(
struct usb_interface_cache *intf_cache = NULL;
int i;
+ if (!config)
+ return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
== iface_num) {
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 9a53a58e676e..577642895b57 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -412,8 +412,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
dev_dbg(&dev->dev, "mapped PA %08lx to VA %p\n",
(unsigned long)res->start, hsotg->regs);
- hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
-
retval = dwc2_lowlevel_hw_init(hsotg);
if (retval)
return retval;
@@ -438,6 +436,8 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
return retval;
+ hsotg->needs_byte_swap = dwc2_check_core_endianness(hsotg);
+
retval = dwc2_get_dr_mode(hsotg);
if (retval)
goto error;
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 40bf9e0bbc59..4c2771c5e727 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -180,8 +180,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int dwc3_of_simple_runtime_suspend(struct device *dev)
+static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
int i;
@@ -192,7 +191,7 @@ static int dwc3_of_simple_runtime_suspend(struct device *dev)
return 0;
}
-static int dwc3_of_simple_runtime_resume(struct device *dev)
+static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
int ret;
@@ -210,7 +209,7 @@ static int dwc3_of_simple_runtime_resume(struct device *dev)
return 0;
}
-static int dwc3_of_simple_suspend(struct device *dev)
+static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
@@ -220,7 +219,7 @@ static int dwc3_of_simple_suspend(struct device *dev)
return 0;
}
-static int dwc3_of_simple_resume(struct device *dev)
+static int __maybe_unused dwc3_of_simple_resume(struct device *dev)
{
struct dwc3_of_simple *simple = dev_get_drvdata(dev);
@@ -229,7 +228,6 @@ static int dwc3_of_simple_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops dwc3_of_simple_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_of_simple_suspend, dwc3_of_simple_resume)
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 5edd79470368..1286076a8890 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -85,8 +85,8 @@ static int dwc3_byt_enable_ulpi_refclock(struct pci_dev *pci)
u32 value;
reg = pcim_iomap(pci, GP_RWBAR, 0);
- if (IS_ERR(reg))
- return PTR_ERR(reg);
+ if (!reg)
+ return -ENOMEM;
value = readl(reg + GP_RWREG1);
if (!(value & GP_RWREG1_ULPI_REFCLK_DISABLE))
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 032ea7d709ba..2b53194081ba 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -473,7 +473,6 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
/**
* dwc3_gadget_start_config - configure ep resources
- * @dwc: pointer to our controller context structure
* @dep: endpoint that is being enabled
*
* Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index ca8a4b53c59f..1074cb82ec17 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -221,6 +221,8 @@
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
+#include <linux/nospec.h>
+
#include "configfs.h"
@@ -3152,6 +3154,7 @@ static struct config_group *fsg_lun_make(struct config_group *group,
fsg_opts = to_fsg_opts(&group->cg_item);
if (num >= FSG_MAX_LUNS)
return ERR_PTR(-ERANGE);
+ num = array_index_nospec(num, FSG_MAX_LUNS);
mutex_lock(&fsg_opts->lock);
if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 53a48f561458..587c5037ff07 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1063,12 +1063,15 @@ static const struct usb_gadget_ops fotg210_gadget_ops = {
static int fotg210_udc_remove(struct platform_device *pdev)
{
struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+ int i;
usb_del_gadget_udc(&fotg210->gadget);
iounmap(fotg210->reg);
free_irq(platform_get_irq(pdev, 0), fotg210);
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+ kfree(fotg210->ep[i]);
kfree(fotg210);
return 0;
@@ -1099,7 +1102,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
/* initialize udc */
fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
if (fotg210 == NULL)
- goto err_alloc;
+ goto err;
for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
_ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1111,7 +1114,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
fotg210->reg = ioremap(res->start, resource_size(res));
if (fotg210->reg == NULL) {
pr_err("ioremap error.\n");
- goto err_map;
+ goto err_alloc;
}
spin_lock_init(&fotg210->lock);
@@ -1159,7 +1162,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
GFP_KERNEL);
if (fotg210->ep0_req == NULL)
- goto err_req;
+ goto err_map;
fotg210_init(fotg210);
@@ -1187,12 +1190,14 @@ err_req:
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
err_map:
- if (fotg210->reg)
- iounmap(fotg210->reg);
+ iounmap(fotg210->reg);
err_alloc:
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+ kfree(fotg210->ep[i]);
kfree(fotg210);
+err:
return ret;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 318246d8b2e2..b02ab2a8d927 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1545,11 +1545,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
} else {
writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
- stop_activity(dev, dev->driver);
+ stop_activity(dev, NULL);
}
spin_unlock_irqrestore(&dev->lock, flags);
+ if (!is_on && dev->driver)
+ dev->driver->disconnect(&dev->gadget);
+
return 0;
}
@@ -2466,8 +2469,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
nuke(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
- if (driver)
+ if (driver) {
+ spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
usb_reinit(dev);
}
@@ -3341,6 +3347,8 @@ next_endpoints:
BIT(PCI_RETRY_ABORT_INTERRUPT))
static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
+__releases(dev->lock)
+__acquires(dev->lock)
{
struct net2280_ep *ep;
u32 tmp, num, mask, scratch;
@@ -3381,12 +3389,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
if (disconnect || reset) {
stop_activity(dev, dev->driver);
ep0_start(dev);
+ spin_unlock(&dev->lock);
if (reset)
usb_gadget_udc_reset
(&dev->gadget, dev->driver);
else
(dev->driver->disconnect)
(&dev->gadget);
+ spin_lock(&dev->lock);
return;
}
}
@@ -3405,6 +3415,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
if (stat & tmp) {
writel(tmp, &dev->regs->irqstat1);
+ spin_unlock(&dev->lock);
if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
if (dev->driver->suspend)
dev->driver->suspend(&dev->gadget);
@@ -3415,6 +3426,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
dev->driver->resume(&dev->gadget);
/* at high speed, note erratum 0133 */
}
+ spin_lock(&dev->lock);
stat &= ~tmp;
}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 1f879b3f2c96..e1656f361e08 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -812,12 +812,15 @@ static void usb3_irq_epc_int_1_speed(struct renesas_usb3 *usb3)
switch (speed) {
case USB_STA_SPEED_SS:
usb3->gadget.speed = USB_SPEED_SUPER;
+ usb3->gadget.ep0->maxpacket = USB3_EP0_SS_MAX_PACKET_SIZE;
break;
case USB_STA_SPEED_HS:
usb3->gadget.speed = USB_SPEED_HIGH;
+ usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
break;
case USB_STA_SPEED_FS:
usb3->gadget.speed = USB_SPEED_FULL;
+ usb3->gadget.ep0->maxpacket = USB3_EP0_HSFS_MAX_PACKET_SIZE;
break;
default:
usb3->gadget.speed = USB_SPEED_UNKNOWN;
@@ -2513,7 +2516,7 @@ static int renesas_usb3_init_ep(struct renesas_usb3 *usb3, struct device *dev,
/* for control pipe */
usb3->gadget.ep0 = &usb3_ep->ep;
usb_ep_set_maxpacket_limit(&usb3_ep->ep,
- USB3_EP0_HSFS_MAX_PACKET_SIZE);
+ USB3_EP0_SS_MAX_PACKET_SIZE);
usb3_ep->ep.caps.type_control = true;
usb3_ep->ep.caps.dir_in = true;
usb3_ep->ep.caps.dir_out = true;
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 072bd5d5738e..5b8a3d9530c4 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2555,7 +2555,7 @@ static int u132_get_frame(struct usb_hcd *hcd)
} else {
int frame = 0;
dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
- msleep(100);
+ mdelay(100);
return frame;
}
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ef350c33dc4a..b1f27aa38b10 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1613,6 +1613,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci,
in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
in_ep_ctx->deq = out_ep_ctx->deq;
in_ep_ctx->tx_info = out_ep_ctx->tx_info;
+ if (xhci->quirks & XHCI_MTK_HOST) {
+ in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0];
+ in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1];
+ }
}
/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 7334da9e9779..71d0d33c3286 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -642,10 +642,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
xhci_mtk_host_enable(mtk);
xhci_dbg(xhci, "%s: restart port polling\n", __func__);
- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
return 0;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 6372edf339d9..51dd8e00c4f8 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -179,12 +179,16 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
- }
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 8dc77e34a859..94e939249b2b 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -153,7 +153,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
{
const struct xhci_plat_priv *priv_match;
const struct hc_driver *driver;
- struct device *sysdev;
+ struct device *sysdev, *tmpdev;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd;
@@ -273,19 +273,24 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto disable_clk;
}
- if (device_property_read_bool(sysdev, "usb2-lpm-disable"))
- xhci->quirks |= XHCI_HW_LPM_DISABLE;
+ /* imod_interval is the interrupt moderation value in nanoseconds. */
+ xhci->imod_interval = 40000;
- if (device_property_read_bool(sysdev, "usb3-lpm-capable"))
- xhci->quirks |= XHCI_LPM_SUPPORT;
+ /* Iterate over all parent nodes for finding quirks */
+ for (tmpdev = &pdev->dev; tmpdev; tmpdev = tmpdev->parent) {
- if (device_property_read_bool(&pdev->dev, "quirk-broken-port-ped"))
- xhci->quirks |= XHCI_BROKEN_PORT_PED;
+ if (device_property_read_bool(tmpdev, "usb2-lpm-disable"))
+ xhci->quirks |= XHCI_HW_LPM_DISABLE;
- /* imod_interval is the interrupt moderation value in nanoseconds. */
- xhci->imod_interval = 40000;
- device_property_read_u32(sysdev, "imod-interval-ns",
- &xhci->imod_interval);
+ if (device_property_read_bool(tmpdev, "usb3-lpm-capable"))
+ xhci->quirks |= XHCI_LPM_SUPPORT;
+
+ if (device_property_read_bool(tmpdev, "quirk-broken-port-ped"))
+ xhci->quirks |= XHCI_BROKEN_PORT_PED;
+
+ device_property_read_u32(tmpdev, "imod-interval-ns",
+ &xhci->imod_interval);
+ }
hcd->usb_phy = devm_usb_get_phy_by_phandle(sysdev, "usb-phy", 0);
if (IS_ERR(hcd->usb_phy)) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 61f48b17e57b..0420eefa647a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -37,6 +37,21 @@ static unsigned long long quirks;
module_param(quirks, ullong, S_IRUGO);
MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
+static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
+{
+ struct xhci_segment *seg = ring->first_seg;
+
+ if (!td || !td->start_seg)
+ return false;
+ do {
+ if (seg == td->start_seg)
+ return true;
+ seg = seg->next;
+ } while (seg && seg != ring->first_seg);
+
+ return false;
+}
+
/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
* xhci_handshake - spin reading hc until handshake completes or fails
@@ -1571,6 +1586,21 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
+ /*
+ * check ring is not re-allocated since URB was enqueued. If it is, then
+ * make sure none of the ring related pointers in this URB private data
+ * are touched, such as td_list, otherwise we overwrite freed data
+ */
+ if (!td_on_ring(&urb_priv->td[0], ep_ring)) {
+ xhci_err(xhci, "Canceled URB td not found on endpoint ring");
+ for (i = urb_priv->num_tds_done; i < urb_priv->num_tds; i++) {
+ td = &urb_priv->td[i];
+ if (!list_empty(&td->cancelled_td_list))
+ list_del_init(&td->cancelled_td_list);
+ }
+ goto err_giveback;
+ }
+
if (xhci->xhc_state & XHCI_STATE_HALTED) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"HC halted, freeing TD manually.");
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 82f220631bd7..b5d661644263 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -369,7 +369,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch
mask &= 0x0f;
val &= 0x0f;
d = (priv->reg[1] & (~mask)) ^ val;
- if (set_1284_register(pp, 2, d, GFP_KERNEL))
+ if (set_1284_register(pp, 2, d, GFP_ATOMIC))
return 0;
priv->reg[1] = d;
return d & 0xf;
@@ -379,7 +379,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp)
{
unsigned char ret;
- if (get_1284_register(pp, 1, &ret, GFP_KERNEL))
+ if (get_1284_register(pp, 1, &ret, GFP_ATOMIC))
return 0;
return ret & 0xf8;
}
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 3be40eaa1ac9..6d9fd5f64903 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -413,6 +413,9 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->io_mutex);
+ if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
+ return -EIO;
+
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
}
@@ -421,13 +424,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
{
struct usb_yurex *dev;
int i, set = 0, retval = 0;
- char buffer[16];
+ char buffer[16 + 1];
char *data = buffer;
unsigned long long c, c2 = 0;
signed long timeout = 0;
DEFINE_WAIT(wait);
- count = min(sizeof(buffer), count);
+ count = min(sizeof(buffer) - 1, count);
dev = file->private_data;
/* verify that we actually have some data to write */
@@ -446,6 +449,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
retval = -EFAULT;
goto error;
}
+ buffer[count] = 0;
memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
switch (buffer[0]) {
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index eecfd0671362..d045d8458f81 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -107,8 +107,12 @@ static int mtu3_device_enable(struct mtu3 *mtu)
(SSUSB_U2_PORT_DIS | SSUSB_U2_PORT_PDN |
SSUSB_U2_PORT_HOST_SEL));
- if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG)
+ if (mtu->ssusb->dr_mode == USB_DR_MODE_OTG) {
mtu3_setbits(ibase, SSUSB_U2_CTRL(0), SSUSB_U2_PORT_OTG_SEL);
+ if (mtu->is_u3_ip)
+ mtu3_setbits(ibase, SSUSB_U3_CTRL(0),
+ SSUSB_U3_PORT_DUAL_MODE);
+ }
return ssusb_check_clocks(mtu->ssusb, check_clk);
}
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 6ee371478d89..a45bb253939f 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -459,6 +459,7 @@
/* U3D_SSUSB_U3_CTRL_0P */
#define SSUSB_U3_PORT_SSP_SPEED BIT(9)
+#define SSUSB_U3_PORT_DUAL_MODE BIT(7)
#define SSUSB_U3_PORT_HOST_SEL BIT(2)
#define SSUSB_U3_PORT_PDN BIT(1)
#define SSUSB_U3_PORT_DIS BIT(0)
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index df827ff57b0d..23a0df79ef21 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base)
return controller;
}
-static void dsps_dma_controller_destroy(struct dma_controller *c)
-{
- struct musb *musb = c->musb;
- struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
- void __iomem *usbss_base = glue->usbss_base;
-
- musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP);
- cppi41_dma_controller_destroy(c);
-}
-
#ifdef CONFIG_PM_SLEEP
static void dsps_dma_controller_suspend(struct dsps_glue *glue)
{
@@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = {
#ifdef CONFIG_USB_TI_CPPI41_DMA
.dma_init = dsps_dma_controller_create,
- .dma_exit = dsps_dma_controller_destroy,
+ .dma_exit = cppi41_dma_controller_destroy,
#endif
.enable = dsps_musb_enable,
.disable = dsps_musb_disable,
diff --git a/drivers/usb/roles/intel-xhci-usb-role-switch.c b/drivers/usb/roles/intel-xhci-usb-role-switch.c
index 1fb3dd0f1dfa..277de96181f9 100644
--- a/drivers/usb/roles/intel-xhci-usb-role-switch.c
+++ b/drivers/usb/roles/intel-xhci-usb-role-switch.c
@@ -161,6 +161,8 @@ static int intel_xhci_usb_remove(struct platform_device *pdev)
{
struct intel_xhci_usb_data *data = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
+
usb_role_switch_unregister(data->role_sw);
return 0;
}
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index e53c68261017..9bbcee37524e 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -173,7 +173,7 @@ struct ump_interrupt {
} __attribute__((packed));
-#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3)
+#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01)
#define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f)
#define TIUMP_INTERRUPT_CODE_LSR 0x03
#define TIUMP_INTERRUPT_CODE_MSR 0x04
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0215b70c4efc..e72ad9f81c73 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -561,6 +561,9 @@ static void option_instat_callback(struct urb *urb);
/* Interface is reserved */
#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
+/* Interface must have two endpoints */
+#define NUMEP2 BIT(16)
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -1081,8 +1084,9 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
- { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
- .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
+ .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1999,6 +2003,13 @@ static int option_probe(struct usb_serial *serial,
if (device_flags & RSVD(iface_desc->bInterfaceNumber))
return -ENODEV;
+ /*
+ * Allow matching on bNumEndpoints for devices whose interface numbers
+ * can change (e.g. Quectel EP06).
+ */
+ if (device_flags & NUMEP2 && iface_desc->bNumEndpoints != 2)
+ return -ENODEV;
+
/* Store the device flags so we can use them during attach. */
usb_set_serial_data(serial, (void *)device_flags);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 3010878f7f8e..e3c5832337e0 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1119,7 +1119,7 @@ static void ti_break(struct tty_struct *tty, int break_state)
static int ti_get_port_from_code(unsigned char code)
{
- return (code >> 4) - 3;
+ return (code >> 6) & 0x01;
}
static int ti_get_func_from_code(unsigned char code)
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 40864c2bd9dc..4d0273508043 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -84,7 +84,8 @@ DEVICE(moto_modem, MOTO_IDS);
/* Motorola Tetra driver */
#define MOTOROLA_TETRA_IDS() \
- { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
+ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
+ { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
/* Novatel Wireless GPS driver */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index c267f2812a04..e227bb5b794f 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -376,6 +376,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
return 0;
}
+ if ((us->fflags & US_FL_NO_ATA_1X) &&
+ (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) {
+ memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB,
+ sizeof(usb_stor_sense_invalidCDB));
+ srb->result = SAM_STAT_CHECK_CONDITION;
+ done(srb);
+ return 0;
+ }
+
/* enqueue the command and wake up the control thread */
srb->scsi_done = done;
us->srb = srb;
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 9e9de5452860..1f7b401c4d04 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -842,6 +842,27 @@ static int uas_slave_configure(struct scsi_device *sdev)
sdev->skip_ms_page_8 = 1;
sdev->wce_default_on = 1;
}
+
+ /*
+ * Some disks return the total number of blocks in response
+ * to READ CAPACITY rather than the highest block number.
+ * If this device makes that mistake, tell the sd driver.
+ */
+ if (devinfo->flags & US_FL_FIX_CAPACITY)
+ sdev->fix_capacity = 1;
+
+ /*
+ * Some devices don't like MODE SENSE with page=0x3f,
+ * which is the command used for checking if a device
+ * is write-protected. Now that we tell the sd driver
+ * to do a 192-byte transfer with this command the
+ * majority of devices work fine, but a few still can't
+ * handle it. The sd driver will simply assume those
+ * devices are write-enabled.
+ */
+ if (devinfo->flags & US_FL_NO_WP_DETECT)
+ sdev->skip_ms_page_3f = 1;
+
scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
return 0;
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 22fcfccf453a..f7f83b21dc74 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2288,6 +2288,13 @@ UNUSUAL_DEV( 0x2735, 0x100b, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_GO_SLOW ),
+/* Reported-by: Tim Anderson <tsa@biglakesoftware.com> */
+UNUSUAL_DEV( 0x2ca3, 0x0031, 0x0000, 0x9999,
+ "DJI",
+ "CineSSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/*
* Reported by Frederic Marchal <frederic.marchal@wowcompany.com>
* Mio Moov 330
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 95a2b10127db..76299b6ff06d 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -255,12 +255,13 @@ EXPORT_SYMBOL_GPL(typec_altmode_unregister_driver);
/* API for the port drivers */
/**
- * typec_match_altmode - Match SVID to an array of alternate modes
+ * typec_match_altmode - Match SVID and mode to an array of alternate modes
* @altmodes: Array of alternate modes
- * @n: Number of elements in the array, or -1 for NULL termiated arrays
+ * @n: Number of elements in the array, or -1 for NULL terminated arrays
* @svid: Standard or Vendor ID to match with
+ * @mode: Mode to match with
*
- * Return pointer to an alternate mode with SVID mathing @svid, or NULL when no
+ * Return pointer to an alternate mode with SVID matching @svid, or NULL when no
* match is found.
*/
struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index c202975f8097..e61dffb27a0c 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1484,7 +1484,6 @@ EXPORT_SYMBOL_GPL(typec_set_mode);
* typec_port_register_altmode - Register USB Type-C Port Alternate Mode
* @port: USB Type-C Port that supports the alternate mode
* @desc: Description of the alternate mode
- * @drvdata: Private pointer to driver specific info
*
* This routine is used to register an alternate mode that @port is capable of
* supporting.
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index ddaac63ecf12..d990aa510fab 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/usb/typec_mux.h>
@@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev)
mutex_lock(&switch_lock);
sw = device_connection_find_match(dev, "typec-switch", NULL,
typec_switch_match);
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ WARN_ON(!try_module_get(sw->dev->driver->owner));
get_device(sw->dev);
+ }
mutex_unlock(&switch_lock);
return sw;
@@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get);
*/
void typec_switch_put(struct typec_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw))
+ if (!IS_ERR_OR_NULL(sw)) {
+ module_put(sw->dev->driver->owner);
put_device(sw->dev);
+ }
}
EXPORT_SYMBOL_GPL(typec_switch_put);
@@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name)
mutex_lock(&mux_lock);
mux = device_connection_find_match(dev, name, NULL, typec_mux_match);
- if (!IS_ERR_OR_NULL(mux))
+ if (!IS_ERR_OR_NULL(mux)) {
+ WARN_ON(!try_module_get(mux->dev->driver->owner));
get_device(mux->dev);
+ }
mutex_unlock(&mux_lock);
return mux;
@@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get);
*/
void typec_mux_put(struct typec_mux *mux)
{
- if (!IS_ERR_OR_NULL(mux))
+ if (!IS_ERR_OR_NULL(mux)) {
+ module_put(mux->dev->driver->owner);
put_device(mux->dev);
+ }
}
EXPORT_SYMBOL_GPL(typec_mux_put);
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index d11f3f8dad40..1e592ec94ba4 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -318,8 +318,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
struct vhci_hcd *vhci_hcd;
struct vhci *vhci;
int retval = 0;
- int rhport;
+ int rhport = -1;
unsigned long flags;
+ bool invalid_rhport = false;
u32 prev_port_status[VHCI_HC_PORTS];
@@ -334,9 +335,19 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh("typeReq %x wValue %x wIndex %x\n", typeReq, wValue,
wIndex);
- if (wIndex > VHCI_HC_PORTS)
- pr_err("invalid port number %d\n", wIndex);
- rhport = wIndex - 1;
+ /*
+ * wIndex can be 0 for some request types (typeReq). rhport is
+ * in valid range when wIndex >= 1 and < VHCI_HC_PORTS.
+ *
+ * Reference port_status[] only with valid rhport when
+ * invalid_rhport is false.
+ */
+ if (wIndex < 1 || wIndex > VHCI_HC_PORTS) {
+ invalid_rhport = true;
+ if (wIndex > VHCI_HC_PORTS)
+ pr_err("invalid port number %d\n", wIndex);
+ } else
+ rhport = wIndex - 1;
vhci_hcd = hcd_to_vhci_hcd(hcd);
vhci = vhci_hcd->vhci;
@@ -345,8 +356,9 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* store old status and compare now and old later */
if (usbip_dbg_flag_vhci_rh) {
- memcpy(prev_port_status, vhci_hcd->port_status,
- sizeof(prev_port_status));
+ if (!invalid_rhport)
+ memcpy(prev_port_status, vhci_hcd->port_status,
+ sizeof(prev_port_status));
}
switch (typeReq) {
@@ -354,8 +366,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
usbip_dbg_vhci_rh(" ClearHubFeature\n");
break;
case ClearPortFeature:
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (hcd->speed == HCD_USB3) {
@@ -415,9 +429,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
break;
case GetPortStatus:
usbip_dbg_vhci_rh(" GetPortStatus port %x\n", wIndex);
- if (wIndex < 1) {
+ if (invalid_rhport) {
pr_err("invalid port number %d\n", wIndex);
retval = -EPIPE;
+ goto error;
}
/* we do not care about resume. */
@@ -513,16 +528,20 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
break;
case USB_PORT_FEAT_POWER:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_POWER\n");
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
if (hcd->speed == HCD_USB3)
vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
else
@@ -531,8 +550,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_BH_PORT_RESET:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
/* Applicable only for USB3.0 hub */
if (hcd->speed != HCD_USB3) {
pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
@@ -543,8 +564,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_RESET:
usbip_dbg_vhci_rh(
" SetPortFeature: USB_PORT_FEAT_RESET\n");
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
vhci_hcd->port_status[rhport] = 0;
@@ -565,8 +588,10 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
wValue);
- if (rhport < 0)
+ if (invalid_rhport) {
+ pr_err("invalid port number %d\n", wIndex);
goto error;
+ }
if (hcd->speed == HCD_USB3) {
if ((vhci_hcd->port_status[rhport] &
USB_SS_PORT_STAT_POWER) != 0) {
@@ -608,7 +633,7 @@ error:
if (usbip_dbg_flag_vhci_rh) {
pr_debug("port %d\n", rhport);
/* Only dump valid port status */
- if (rhport >= 0) {
+ if (!invalid_rhport) {
dump_port_status_diff(prev_port_status[rhport],
vhci_hcd->port_status[rhport],
hcd->speed == HCD_USB3);
@@ -618,8 +643,10 @@ error:
spin_unlock_irqrestore(&vhci->lock, flags);
- if ((vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0)
+ if (!invalid_rhport &&
+ (vhci_hcd->port_status[rhport] & PORT_C_MASK) != 0) {
usb_hcd_poll_rh_status(hcd);
+ }
return retval;
}
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index 8235b285dbb2..d09bab3bf224 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -333,6 +333,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+extern const u8 aty_postdividers[8];
+
/*
* Hardware cursor support
@@ -359,7 +361,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
extern void aty_reset_engine(const struct atyfb_par *par);
extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index a9a8272f7a6e..05111e90f168 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3087,17 +3087,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
/*
* PLL Reference Divider M:
*/
- M = pll_regs[2];
+ M = pll_regs[PLL_REF_DIV];
/*
* PLL Feedback Divider N (Dependent on CLOCK_CNTL):
*/
- N = pll_regs[7 + (clock_cntl & 3)];
+ N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
/*
* PLL Post Divider P (Dependent on CLOCK_CNTL):
*/
- P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+ P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+ ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
/*
* PLL Divider Q:
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index 74a62aa193c0..f87cc81f4fa2 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -115,7 +115,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
*/
#define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
{
@@ -222,7 +222,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
pll->vclk_post_div += (q < 64*8);
pll->vclk_post_div += (q < 32*8);
}
- pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+ pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
// pll->vclk_post_div <<= 6;
pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@@ -513,7 +513,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
u8 mclk_fb_div, pll_ext_cntl;
pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
- pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+ pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
mclk_fb_div <<= 1;
@@ -535,7 +535,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
xpost_div += (q < 64*8);
xpost_div += (q < 32*8);
}
- pll->ct.xclk_post_div_real = postdividers[xpost_div];
+ pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
#ifdef CONFIG_PPC
@@ -584,7 +584,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
mpost_div += (q < 64*8);
mpost_div += (q < 32*8);
}
- sclk_post_div_real = postdividers[mpost_div];
+ sclk_post_div_real = aty_postdividers[mpost_div];
pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
pll->ct.spll_cntl2 = mpost_div << 4;
#ifdef DEBUG
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 3946649b85c8..ba906876cc45 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -42,6 +42,7 @@ struct bmp_dib_header {
u32 colors_important;
} __packed;
+static bool use_bgrt = true;
static bool request_mem_succeeded = false;
static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
@@ -160,6 +161,9 @@ static void efifb_show_boot_graphics(struct fb_info *info)
void *bgrt_image = NULL;
u8 *dst = info->screen_base;
+ if (!use_bgrt)
+ return;
+
if (!bgrt_tab.image_address) {
pr_info("efifb: No BGRT, not showing boot graphics\n");
return;
@@ -290,6 +294,8 @@ static int efifb_setup(char *options)
screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
else if (!strcmp(this_opt, "nowc"))
mem_flags &= ~EFI_MEMORY_WC;
+ else if (!strcmp(this_opt, "nobgrt"))
+ use_bgrt = false;
}
}
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index ef69273074ba..a3edb20ea4c3 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
return -EFAULT;
+ if (mr->w > 4096 || mr->h > 4096)
+ return -EINVAL;
+
if (mr->w * mr->h * 3 > mr->buffer_size)
return -EINVAL;
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
mr->x, mr->y, mr->w, mr->h);
if (r > 0) {
- if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+ if (copy_to_user(mr->buffer, buf, r))
r = -EFAULT;
}
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index def3a501acd6..d059d04c63ac 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
/*
* enable controller clock
*/
- clk_enable(fbi->clk);
+ clk_prepare_enable(fbi->clk);
pxa168fb_set_par(info);
@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
failed_free_cmap:
fb_dealloc_cmap(&info->cmap);
failed_free_clk:
- clk_disable(fbi->clk);
+ clk_disable_unprepare(fbi->clk);
failed_free_fbmem:
dma_free_coherent(fbi->dev, info->fix.smem_len,
info->screen_base, fbi->fb_start_dma);
@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
info->screen_base, info->fix.smem_start);
- clk_disable(fbi->clk);
+ clk_disable_unprepare(fbi->clk);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 045e8afe398b..9e88e3f594c2 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
dev_name);
goto out_err0;
}
- /* fall though */
+ /* fall through */
case S9000_ID_ARTIST:
case S9000_ID_HCRX:
case S9000_ID_TIMBER:
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index b459edfacff3..90d387b50ab7 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
This value is used to allocate enough space in internal
tables needed for physical memory administration.
-config XEN_SCRUB_PAGES
- bool "Scrub pages before returning them to system"
+config XEN_SCRUB_PAGES_DEFAULT
+ bool "Scrub pages before returning them to system by default"
depends on XEN_BALLOON
default y
help
Scrub pages before returning them to the system for reuse by
other domains. This makes sure that any confidential data
is not accidentally visible to other domains. Is it more
- secure, but slightly less efficient.
+ secure, but slightly less efficient. This can be controlled with
+ xen_scrub_pages=0 parameter and
+ /sys/devices/system/xen_memory/xen_memory0/scrub_pages.
+ This option only sets the default value.
+
If in doubt, say yes.
config XEN_DEV_EVTCHN
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 55ed80c3a17c..f3fbb700f569 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bio.h>
-#include <linux/io.h>
#include <linux/export.h>
+#include <xen/xen.h>
#include <xen/page.h>
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
@@ -20,4 +20,3 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
return false;
#endif
}
-EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index d4265c8ebb22..b1357aa4bc55 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu)
static void disable_hotplug_cpu(int cpu)
{
- if (cpu_online(cpu)) {
- lock_device_hotplug();
+ if (!cpu_is_hotpluggable(cpu))
+ return;
+ lock_device_hotplug();
+ if (cpu_online(cpu))
device_offline(get_cpu_device(cpu));
- unlock_device_hotplug();
- }
- if (cpu_present(cpu))
+ if (!cpu_online(cpu) && cpu_present(cpu)) {
xen_arch_unregister_cpu(cpu);
-
- set_cpu_present(cpu, false);
+ set_cpu_present(cpu, false);
+ }
+ unlock_device_hotplug();
}
static int vcpu_online(unsigned int cpu)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 08e4af04d6f2..e6c1934734b7 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
clear_evtchn_to_irq_row(row);
}
- evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+ evtchn_to_irq[row][col] = irq;
return 0;
}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 57390c7666e5..b0b02a501167 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map,
return true;
}
-static void unmap_if_in_range(struct gntdev_grant_map *map,
- unsigned long start, unsigned long end)
+static int unmap_if_in_range(struct gntdev_grant_map *map,
+ unsigned long start, unsigned long end,
+ bool blockable)
{
unsigned long mstart, mend;
int err;
+ if (!in_range(map, start, end))
+ return 0;
+
+ if (!blockable)
+ return -EAGAIN;
+
mstart = max(start, map->vma->vm_start);
mend = min(end, map->vma->vm_end);
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
@@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map,
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
(mend - mstart) >> PAGE_SHIFT);
WARN_ON(err);
+
+ return 0;
}
static int mn_invl_range_start(struct mmu_notifier *mn,
@@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn,
struct gntdev_grant_map *map;
int ret = 0;
- /* TODO do we really need a mutex here? */
if (blockable)
mutex_lock(&priv->lock);
else if (!mutex_trylock(&priv->lock))
return -EAGAIN;
list_for_each_entry(map, &priv->maps, next) {
- if (in_range(map, start, end)) {
- ret = -EAGAIN;
+ ret = unmap_if_in_range(map, start, end, blockable);
+ if (ret)
goto out_unlock;
- }
- unmap_if_in_range(map, start, end);
}
list_for_each_entry(map, &priv->freeable_maps, next) {
- if (in_range(map, start, end)) {
- ret = -EAGAIN;
+ ret = unmap_if_in_range(map, start, end, blockable);
+ if (ret)
goto out_unlock;
- }
- unmap_if_in_range(map, start, end);
}
out_unlock:
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7bafa703a992..84575baceebc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1040,18 +1040,33 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret;
for (i = 0; i < count; i++) {
- /* Retry eagain maps */
- if (map_ops[i].status == GNTST_eagain)
- gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
- &map_ops[i].status, __func__);
-
- if (map_ops[i].status == GNTST_okay) {
+ switch (map_ops[i].status) {
+ case GNTST_okay:
+ {
struct xen_page_foreign *foreign;
SetPageForeign(pages[i]);
foreign = xen_page_foreign(pages[i]);
foreign->domid = map_ops[i].dom;
foreign->gref = map_ops[i].ref;
+ break;
+ }
+
+ case GNTST_no_device_space:
+ pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
+ break;
+
+ case GNTST_eagain:
+ /* Retry eagain maps */
+ gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
+ map_ops + i,
+ &map_ops[i].status, __func__);
+ /* Test status in next loop iteration. */
+ i--;
+ break;
+
+ default:
+ break;
}
}
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c93d8ef8df34..5bb01a62f214 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
/*
* The Xenstore watch fires directly after registering it and
* after a suspend/resume cycle. So ENOENT is no error but
- * might happen in those cases.
+ * might happen in those cases. ERANGE is observed when we get
+ * an empty value (''), this happens when we acknowledge the
+ * request by writing '\0' below.
*/
- if (err != -ENOENT)
+ if (err != -ENOENT && err != -ERANGE)
pr_err("Error %d reading sysrq code in control/sysrq\n",
err);
xenbus_transaction_end(xbt, 1);
diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c
index 084799c6180e..3782cf070338 100644
--- a/drivers/xen/mem-reservation.c
+++ b/drivers/xen/mem-reservation.c
@@ -14,6 +14,10 @@
#include <xen/interface/memory.h>
#include <xen/mem-reservation.h>
+#include <linux/moduleparam.h>
+
+bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
+core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);
/*
* Use one extent per PAGE_SIZE to avoid to break down the page into
diff --git a/drivers/xen/xen-acpi-pad.c b/drivers/xen/xen-acpi-pad.c
index 23d1808fe027..e25ab76b9c99 100644
--- a/drivers/xen/xen-acpi-pad.c
+++ b/drivers/xen/xen-acpi-pad.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/acpi.h>
+#include <xen/xen.h>
#include <xen/interface/version.h>
#include <xen/xen-ops.h>
#include <asm/xen/hypercall.h>
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 294f35ce9e46..63c1494a8d73 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -44,6 +44,7 @@
#include <xen/xenbus.h>
#include <xen/features.h>
#include <xen/page.h>
+#include <xen/mem-reservation.h>
#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
@@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages);
static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = {
&dev_attr_max_schedule_delay.attr.attr,
&dev_attr_retry_count.attr.attr,
&dev_attr_max_retry_count.attr.attr,
+ &dev_attr_scrub_pages.attr.attr,
NULL
};
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index f3d0bef16d78..6127f0fcd62c 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -514,6 +514,8 @@ static int afs_alloc_anon_key(struct afs_cell *cell)
*/
static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
{
+ struct hlist_node **p;
+ struct afs_cell *pcell;
int ret;
if (!cell->anonymous_key) {
@@ -534,7 +536,18 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
return ret;
mutex_lock(&net->proc_cells_lock);
- list_add_tail(&cell->proc_link, &net->proc_cells);
+ for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
+ pcell = hlist_entry(*p, struct afs_cell, proc_link);
+ if (strcmp(cell->name, pcell->name) < 0)
+ break;
+ }
+
+ cell->proc_link.pprev = p;
+ cell->proc_link.next = *p;
+ rcu_assign_pointer(*p, &cell->proc_link.next);
+ if (cell->proc_link.next)
+ cell->proc_link.next->pprev = &cell->proc_link.next;
+
afs_dynroot_mkdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
return 0;
@@ -550,7 +563,7 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
afs_proc_cell_remove(cell);
mutex_lock(&net->proc_cells_lock);
- list_del_init(&cell->proc_link);
+ hlist_del_rcu(&cell->proc_link);
afs_dynroot_rmdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 1cde710a8013..f29c6dade7f6 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -265,7 +265,7 @@ int afs_dynroot_populate(struct super_block *sb)
return -ERESTARTSYS;
net->dynroot_sb = sb;
- list_for_each_entry(cell, &net->proc_cells, proc_link) {
+ hlist_for_each_entry(cell, &net->proc_cells, proc_link) {
ret = afs_dynroot_mkdir(net, cell);
if (ret < 0)
goto error;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 871a228d7f37..34c02fdcc25f 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -242,7 +242,7 @@ struct afs_net {
seqlock_t cells_lock;
struct mutex proc_cells_lock;
- struct list_head proc_cells;
+ struct hlist_head proc_cells;
/* Known servers. Theoretically each fileserver can only be in one
* cell, but in practice, people create aliases and subsets and there's
@@ -320,7 +320,7 @@ struct afs_cell {
struct afs_net *net;
struct key *anonymous_key; /* anonymous user key for this cell */
struct work_struct manager; /* Manager for init/deinit/dns */
- struct list_head proc_link; /* /proc cell list link */
+ struct hlist_node proc_link; /* /proc cell list link */
#ifdef CONFIG_AFS_FSCACHE
struct fscache_cookie *cache; /* caching cookie */
#endif
diff --git a/fs/afs/main.c b/fs/afs/main.c
index e84fe822a960..107427688edd 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -87,7 +87,7 @@ static int __net_init afs_net_init(struct net *net_ns)
timer_setup(&net->cells_timer, afs_cells_timer, 0);
mutex_init(&net->proc_cells_lock);
- INIT_LIST_HEAD(&net->proc_cells);
+ INIT_HLIST_HEAD(&net->proc_cells);
seqlock_init(&net->fs_lock);
net->fs_servers = RB_ROOT;
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 476dcbb79713..9101f62707af 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -33,9 +33,8 @@ static inline struct afs_net *afs_seq2net_single(struct seq_file *m)
static int afs_proc_cells_show(struct seq_file *m, void *v)
{
struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
- struct afs_net *net = afs_seq2net(m);
- if (v == &net->proc_cells) {
+ if (v == SEQ_START_TOKEN) {
/* display header on line 1 */
seq_puts(m, "USE NAME\n");
return 0;
@@ -50,12 +49,12 @@ static void *afs_proc_cells_start(struct seq_file *m, loff_t *_pos)
__acquires(rcu)
{
rcu_read_lock();
- return seq_list_start_head(&afs_seq2net(m)->proc_cells, *_pos);
+ return seq_hlist_start_head_rcu(&afs_seq2net(m)->proc_cells, *_pos);
}
static void *afs_proc_cells_next(struct seq_file *m, void *v, loff_t *pos)
{
- return seq_list_next(v, &afs_seq2net(m)->proc_cells, pos);
+ return seq_hlist_next_rcu(v, &afs_seq2net(m)->proc_cells, pos);
}
static void afs_proc_cells_stop(struct seq_file *m, void *v)
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 35f2ae30f31f..77a83790a31f 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -690,8 +690,6 @@ static void afs_process_async_call(struct work_struct *work)
}
if (call->state == AFS_CALL_COMPLETE) {
- call->reply[0] = NULL;
-
/* We have two refs to release - one from the alloc and one
* queued with the work item - and we can't just deallocate the
* call because the work item may be queued again.
diff --git a/fs/buffer.c b/fs/buffer.c
index 6f1ae3ac9789..109f55196866 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3060,11 +3060,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
*/
bio = bio_alloc(GFP_NOIO, 1);
- if (wbc) {
- wbc_init_bio(wbc, bio);
- wbc_account_io(wbc, bh->b_page, bh->b_size);
- }
-
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_write_hint = write_hint;
@@ -3084,6 +3079,11 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
op_flags |= REQ_PRIO;
bio_set_op_attrs(bio, op, op_flags);
+ if (wbc) {
+ wbc_init_bio(wbc, bio);
+ wbc_account_io(wbc, bh->b_page, bh->b_size);
+ }
+
submit_bio(bio);
return 0;
}
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index af2b17b21b94..95983c744164 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -343,7 +343,7 @@ try_again:
trap = lock_rename(cache->graveyard, dir);
/* do some checks before getting the grave dentry */
- if (rep->d_parent != dir) {
+ if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
/* the entry was probably culled when we dropped the parent dir
* lock */
unlock_rename(cache->graveyard, dir);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 35c83fe7dba0..abcd78e332fe 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -6,6 +6,7 @@ config CIFS
select CRYPTO_MD4
select CRYPTO_MD5
select CRYPTO_SHA256
+ select CRYPTO_SHA512
select CRYPTO_CMAC
select CRYPTO_HMAC
select CRYPTO_ARC4
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 0c9ab62c3df4..9dcaed031843 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1553,6 +1553,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
/* Flags */
#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
+#define MID_DELETED 2 /* Mid has been dequeued/deleted */
/* Types of response buffer returned from SendReceive2 */
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index dc2f4cf08fe9..5657b79dbc99 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -601,10 +601,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
}
count = 0;
+ /*
+ * We know that all the name entries in the protocols array
+ * are short (< 16 bytes anyway) and are NUL terminated.
+ */
for (i = 0; i < CIFS_NUM_PROT; i++) {
- strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
- count += strlen(protocols[i].name) + 1;
- /* null at end of source and target buffers anyway */
+ size_t len = strlen(protocols[i].name) + 1;
+
+ memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
+ count += len;
}
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 7aa08dba4719..52d71b64c0c6 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -659,7 +659,15 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
mid->mid_state = MID_RESPONSE_MALFORMED;
- list_del_init(&mid->qhead);
+ /*
+ * Trying to handle/dequeue a mid after the send_recv()
+ * function has finished processing it is a bug.
+ */
+ if (mid->mid_flags & MID_DELETED)
+ printk_once(KERN_WARNING
+ "trying to dequeue a deleted mid\n");
+ else
+ list_del_init(&mid->qhead);
spin_unlock(&GlobalMid_Lock);
}
@@ -938,8 +946,7 @@ next_pdu:
} else {
mids[0] = server->ops->find_mid(server, buf);
bufs[0] = buf;
- if (mids[0])
- num_mids = 1;
+ num_mids = 1;
if (!mids[0] || !mids[0]->receive)
length = standard_receive3(server, mids[0]);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index dacb2c05674c..6926685e513c 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -402,9 +402,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
(struct smb_com_transaction_change_notify_rsp *)buf;
struct file_notify_information *pnotify;
__u32 data_offset = 0;
+ size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+
if (get_bcc(buf) > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
+ if (data_offset >
+ len - sizeof(struct file_notify_information)) {
+ cifs_dbg(FYI, "invalid data_offset %u\n",
+ data_offset);
+ return true;
+ }
pnotify = (struct file_notify_information *)
((char *)&pSMBr->hdr.Protocol + data_offset);
cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index eeab81c9452f..e169e1a5fd35 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -376,8 +376,15 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
pfData->FileNameLength;
- } else
- new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
+ } else {
+ u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
+
+ if (old_entry + next_offset < old_entry) {
+ cifs_dbg(VFS, "invalid offset %u\n", next_offset);
+ return NULL;
+ }
+ new_entry = old_entry + next_offset;
+ }
cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry);
/* validate that new_entry is not past end of SMB */
if (new_entry >= end_of_smb) {
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index d954ce36b473..89985a0a6819 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1477,7 +1477,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
}
srch_inf->entries_in_buffer = 0;
- srch_inf->index_of_last_entry = 0;
+ srch_inf->index_of_last_entry = 2;
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c08acfc77abc..f54d07bda067 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2459,14 +2459,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
/* We check for obvious errors in the output buffer length and offset */
if (*plen == 0)
goto ioctl_exit; /* server returned no data */
- else if (*plen > 0xFF00) {
+ else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
*plen = 0;
rc = -EIO;
goto ioctl_exit;
}
- if (rsp_iov.iov_len < le32_to_cpu(rsp->OutputOffset) + *plen) {
+ if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
le32_to_cpu(rsp->OutputOffset));
*plen = 0;
@@ -3577,33 +3577,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
int len;
unsigned int entrycount = 0;
unsigned int next_offset = 0;
- FILE_DIRECTORY_INFO *entryptr;
+ char *entryptr;
+ FILE_DIRECTORY_INFO *dir_info;
if (bufstart == NULL)
return 0;
- entryptr = (FILE_DIRECTORY_INFO *)bufstart;
+ entryptr = bufstart;
while (1) {
- entryptr = (FILE_DIRECTORY_INFO *)
- ((char *)entryptr + next_offset);
-
- if ((char *)entryptr + size > end_of_buf) {
+ if (entryptr + next_offset < entryptr ||
+ entryptr + next_offset > end_of_buf ||
+ entryptr + next_offset + size > end_of_buf) {
cifs_dbg(VFS, "malformed search entry would overflow\n");
break;
}
- len = le32_to_cpu(entryptr->FileNameLength);
- if ((char *)entryptr + len + size > end_of_buf) {
+ entryptr = entryptr + next_offset;
+ dir_info = (FILE_DIRECTORY_INFO *)entryptr;
+
+ len = le32_to_cpu(dir_info->FileNameLength);
+ if (entryptr + len < entryptr ||
+ entryptr + len > end_of_buf ||
+ entryptr + len + size > end_of_buf) {
cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
end_of_buf);
break;
}
- *lastentry = (char *)entryptr;
+ *lastentry = entryptr;
entrycount++;
- next_offset = le32_to_cpu(entryptr->NextEntryOffset);
+ next_offset = le32_to_cpu(dir_info->NextEntryOffset);
if (!next_offset)
break;
}
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 78f96fa3d7d9..b48f43963da6 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -142,7 +142,8 @@ void
cifs_delete_mid(struct mid_q_entry *mid)
{
spin_lock(&GlobalMid_Lock);
- list_del(&mid->qhead);
+ list_del_init(&mid->qhead);
+ mid->mid_flags |= MID_DELETED;
spin_unlock(&GlobalMid_Lock);
DeleteMidQEntry(mid);
@@ -772,6 +773,11 @@ cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
return mid;
}
+static void
+cifs_noop_callback(struct mid_q_entry *mid)
+{
+}
+
int
compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
const int flags, const int num_rqst, struct smb_rqst *rqst,
@@ -826,8 +832,13 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
}
midQ[i]->mid_state = MID_REQUEST_SUBMITTED;
+ /*
+ * We don't invoke the callback compounds unless it is the last
+ * request.
+ */
+ if (i < num_rqst - 1)
+ midQ[i]->callback = cifs_noop_callback;
}
-
cifs_in_send_inc(ses->server);
rc = smb_send_rqst(ses->server, num_rqst, rqst, flags);
cifs_in_send_dec(ses->server);
@@ -908,6 +919,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
midQ[i]->resp_buf = NULL;
}
out:
+ /*
+ * This will dequeue all mids. After this it is important that the
+ * demultiplex_thread will not process any of these mids any futher.
+ * This is prevented above by using a noop callback that will not
+ * wake this thread except for the very last PDU.
+ */
for (i = 0; i < num_rqst; i++)
cifs_delete_mid(midQ[i]);
add_credits(ses->server, credits, optype);
diff --git a/fs/dax.c b/fs/dax.c
index f32d7125ad0f..0fb270f0a0ef 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -447,6 +447,7 @@ bool dax_lock_mapping_entry(struct page *page)
xa_unlock_irq(&mapping->i_pages);
break;
} else if (IS_ERR(entry)) {
+ xa_unlock_irq(&mapping->i_pages);
WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
continue;
}
@@ -665,6 +666,8 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
indices)) {
+ pgoff_t nr_pages = 1;
+
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *pvec_ent = pvec.pages[i];
void *entry;
@@ -679,8 +682,15 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
xa_lock_irq(&mapping->i_pages);
entry = get_unlocked_mapping_entry(mapping, index, NULL);
- if (entry)
+ if (entry) {
page = dax_busy_page(entry);
+ /*
+ * Account for multi-order entries at
+ * the end of the pagevec.
+ */
+ if (i + 1 >= pagevec_count(&pvec))
+ nr_pages = 1UL << dax_radix_order(entry);
+ }
put_unlocked_mapping_entry(mapping, index, entry);
xa_unlock_irq(&mapping->i_pages);
if (page)
@@ -695,7 +705,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
*/
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
- index++;
+ index += nr_pages;
if (page)
break;
@@ -1120,21 +1130,12 @@ static vm_fault_t dax_load_hole(struct address_space *mapping, void *entry,
{
struct inode *inode = mapping->host;
unsigned long vaddr = vmf->address;
- vm_fault_t ret = VM_FAULT_NOPAGE;
- struct page *zero_page;
- pfn_t pfn;
-
- zero_page = ZERO_PAGE(0);
- if (unlikely(!zero_page)) {
- ret = VM_FAULT_OOM;
- goto out;
- }
+ pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
+ vm_fault_t ret;
- pfn = page_to_pfn_t(zero_page);
dax_insert_mapping_entry(mapping, vmf, entry, pfn, RADIX_DAX_ZERO_PAGE,
false);
ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
-out:
trace_dax_load_hole(inode, vmf, ret);
return ret;
}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 7f7ee18fe179..e4bb9386c045 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1448,6 +1448,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
}
inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+ ext2_set_inode_flags(inode);
ei->i_faddr = le32_to_cpu(raw_inode->i_faddr);
ei->i_frag_no = raw_inode->i_frag;
ei->i_frag_size = raw_inode->i_fsize;
@@ -1517,7 +1518,6 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
}
brelse (bh);
- ext2_set_inode_flags(inode);
unlock_new_inode(inode);
return inode;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index e2902d394f1b..f93f9881ec18 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -76,7 +76,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
else if (unlikely(((char *) de - buf) + rlen > size))
- error_msg = "directory entry across range";
+ error_msg = "directory entry overrun";
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
@@ -85,18 +85,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
if (filp)
ext4_error_file(filp, function, line, bh->b_blocknr,
- "bad entry in directory: %s - offset=%u(%u), "
- "inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % size),
- offset, le32_to_cpu(de->inode),
- rlen, de->name_len);
+ "bad entry in directory: %s - offset=%u, "
+ "inode=%u, rec_len=%d, name_len=%d, size=%d",
+ error_msg, offset, le32_to_cpu(de->inode),
+ rlen, de->name_len, size);
else
ext4_error_inode(dir, function, line, bh->b_blocknr,
- "bad entry in directory: %s - offset=%u(%u), "
- "inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % size),
- offset, le32_to_cpu(de->inode),
- rlen, de->name_len);
+ "bad entry in directory: %s - offset=%u, "
+ "inode=%u, rec_len=%d, name_len=%d, size=%d",
+ error_msg, offset, le32_to_cpu(de->inode),
+ rlen, de->name_len, size);
return 1;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0f0edd1cd0cd..caff935fbeb8 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -43,6 +43,17 @@
#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_EXT4_FS_ENCRYPTION)
#include <linux/fscrypt.h>
+#include <linux/compiler.h>
+
+/* Until this gets included into linux/compiler-gcc.h */
+#ifndef __nonstring
+#if defined(GCC_VERSION) && (GCC_VERSION >= 80000)
+#define __nonstring __attribute__((nonstring))
+#else
+#define __nonstring
+#endif
+#endif
+
/*
* The fourth extended filesystem constants/structures
*/
@@ -675,6 +686,9 @@ enum {
/* Max physical block we can address w/o extents */
#define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
+/* Max logical block we can support */
+#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFF
+
/*
* Structure of an inode on the disk
*/
@@ -1226,7 +1240,7 @@ struct ext4_super_block {
__le32 s_feature_ro_compat; /* readonly-compatible feature set */
/*68*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */
/*78*/ char s_volume_name[16]; /* volume name */
-/*88*/ char s_last_mounted[64]; /* directory where last mounted */
+/*88*/ char s_last_mounted[64] __nonstring; /* directory where last mounted */
/*C8*/ __le32 s_algorithm_usage_bitmap; /* For compression */
/*
* Performance hints. Directory preallocation should only
@@ -1277,13 +1291,13 @@ struct ext4_super_block {
__le32 s_first_error_time; /* first time an error happened */
__le32 s_first_error_ino; /* inode involved in first error */
__le64 s_first_error_block; /* block involved of first error */
- __u8 s_first_error_func[32]; /* function where the error happened */
+ __u8 s_first_error_func[32] __nonstring; /* function where the error happened */
__le32 s_first_error_line; /* line number where error happened */
__le32 s_last_error_time; /* most recent time of an error */
__le32 s_last_error_ino; /* inode involved in last error */
__le32 s_last_error_line; /* line number where error happened */
__le64 s_last_error_block; /* block involved of last error */
- __u8 s_last_error_func[32]; /* function where the error happened */
+ __u8 s_last_error_func[32] __nonstring; /* function where the error happened */
#define EXT4_S_ERR_END offsetof(struct ext4_super_block, s_mount_opts)
__u8 s_mount_opts[64];
__le32 s_usr_quota_inum; /* inode for tracking user quota */
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 3543fe80a3c4..7b4736022761 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1753,6 +1753,7 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
{
int err, inline_size;
struct ext4_iloc iloc;
+ size_t inline_len;
void *inline_pos;
unsigned int offset;
struct ext4_dir_entry_2 *de;
@@ -1780,8 +1781,9 @@ bool empty_inline_dir(struct inode *dir, int *has_inline_data)
goto out;
}
+ inline_len = ext4_get_inline_size(dir);
offset = EXT4_INLINE_DOTDOT_SIZE;
- while (offset < dir->i_size) {
+ while (offset < inline_len) {
de = ext4_get_inline_entry(dir, &iloc, offset,
&inline_pos, &inline_size);
if (ext4_check_dir_entry(dir, NULL, de,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d0dd585add6a..d767e993591d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3413,12 +3413,16 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
unsigned int blkbits = inode->i_blkbits;
- unsigned long first_block = offset >> blkbits;
- unsigned long last_block = (offset + length - 1) >> blkbits;
+ unsigned long first_block, last_block;
struct ext4_map_blocks map;
bool delalloc = false;
int ret;
+ if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
+ return -EINVAL;
+ first_block = offset >> blkbits;
+ last_block = min_t(loff_t, (offset + length - 1) >> blkbits,
+ EXT4_MAX_LOGICAL_BLOCK);
if (flags & IOMAP_REPORT) {
if (ext4_has_inline_data(inode)) {
@@ -3948,6 +3952,7 @@ static const struct address_space_operations ext4_dax_aops = {
.writepages = ext4_dax_writepages,
.direct_IO = noop_direct_IO,
.set_page_dirty = noop_set_page_dirty,
+ .bmap = ext4_bmap,
.invalidatepage = noop_invalidatepage,
};
@@ -4192,9 +4197,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
return 0;
}
-static void ext4_wait_dax_page(struct ext4_inode_info *ei, bool *did_unlock)
+static void ext4_wait_dax_page(struct ext4_inode_info *ei)
{
- *did_unlock = true;
up_write(&ei->i_mmap_sem);
schedule();
down_write(&ei->i_mmap_sem);
@@ -4204,14 +4208,12 @@ int ext4_break_layouts(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
struct page *page;
- bool retry;
int error;
if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
return -EINVAL;
do {
- retry = false;
page = dax_layout_busy_page(inode->i_mapping);
if (!page)
return 0;
@@ -4219,8 +4221,8 @@ int ext4_break_layouts(struct inode *inode)
error = ___wait_var_event(&page->_refcount,
atomic_read(&page->_refcount) == 1,
TASK_INTERRUPTIBLE, 0, 0,
- ext4_wait_dax_page(ei, &retry));
- } while (error == 0 && retry);
+ ext4_wait_dax_page(ei));
+ } while (error == 0);
return error;
}
@@ -4895,6 +4897,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
* not initialized on a new filesystem. */
}
ei->i_flags = le32_to_cpu(raw_inode->i_flags);
+ ext4_set_inode_flags(inode);
inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
if (ext4_has_feature_64bit(sb))
@@ -5041,7 +5044,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
goto bad_inode;
}
brelse(iloc.bh);
- ext4_set_inode_flags(inode);
unlock_new_inode(inode);
return inode;
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 39b07c2d3384..2305b4374fd3 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -49,7 +49,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
*/
sb_start_write(sb);
ext4_mmp_csum_set(sb, mmp);
- mark_buffer_dirty(bh);
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 116ff68c5bd4..377d516c475f 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3478,6 +3478,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
int credits;
u8 old_file_type;
+ if (new.inode && new.inode->i_nlink == 0) {
+ EXT4_ERROR_INODE(new.inode,
+ "target of rename is already freed");
+ return -EFSCORRUPTED;
+ }
+
if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) &&
(!projid_eq(EXT4_I(new_dir)->i_projid,
EXT4_I(old_dentry->d_inode)->i_projid)))
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index db7590178dfc..2aa62d58d8dd 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -374,13 +374,13 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
if (!bio)
return -ENOMEM;
- wbc_init_bio(io->io_wbc, bio);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio;
io->io_next_block = bh->b_blocknr;
+ wbc_init_bio(io->io_wbc, bio);
return 0;
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index e5fb38451a73..ebbc663d0798 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -19,6 +19,7 @@
int ext4_resize_begin(struct super_block *sb)
{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
int ret = 0;
if (!capable(CAP_SYS_RESOURCE))
@@ -29,7 +30,7 @@ int ext4_resize_begin(struct super_block *sb)
* because the user tools have no way of handling this. Probably a
* bad time to do it anyways.
*/
- if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+ if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
ext4_warning(sb, "won't resize using backup superblock at %llu",
(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
@@ -1986,6 +1987,26 @@ retry:
}
}
+ /*
+ * Make sure the last group has enough space so that it's
+ * guaranteed to have enough space for all metadata blocks
+ * that it might need to hold. (We might not need to store
+ * the inode table blocks in the last block group, but there
+ * will be cases where this might be needed.)
+ */
+ if ((ext4_group_first_block_no(sb, n_group) +
+ ext4_group_overhead_blocks(sb, n_group) + 2 +
+ sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
+ n_blocks_count = ext4_group_first_block_no(sb, n_group);
+ n_group--;
+ n_blocks_count_retry = 0;
+ if (resize_inode) {
+ iput(resize_inode);
+ resize_inode = NULL;
+ }
+ goto retry;
+ }
+
/* extend the last group */
if (n_group == o_group)
add = n_blocks_count - o_blocks_count;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 5863fd22e90b..1145109968ef 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2145,6 +2145,8 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb,
SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
if (test_opt(sb, DATA_ERR_ABORT))
SEQ_OPTS_PUTS("data_err=abort");
+ if (DUMMY_ENCRYPTION_ENABLED(sbi))
+ SEQ_OPTS_PUTS("test_dummy_encryption");
ext4_show_quota_options(seq, sb);
return 0;
@@ -4378,11 +4380,13 @@ no_journal:
block = ext4_count_free_clusters(sb);
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, block));
+ ext4_superblock_csum_set(sb);
err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
GFP_KERNEL);
if (!err) {
unsigned long freei = ext4_count_free_inodes(sb);
sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+ ext4_superblock_csum_set(sb);
err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
GFP_KERNEL);
}
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index defc2168de91..f58c0cacc531 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -682,6 +682,7 @@ int fat_count_free_clusters(struct super_block *sb)
if (ops->ent_get(&fatent) == FAT_ENT_FREE)
free++;
} while (fat_ent_next(sbi, &fatent));
+ cond_resched();
}
sbi->free_clusters = free;
sbi->free_clus_valid = 1;
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 83bfe04456b6..c550512ce335 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -70,20 +70,7 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
}
/*
- * initialise an cookie jar slab element prior to any use
- */
-void fscache_cookie_init_once(void *_cookie)
-{
- struct fscache_cookie *cookie = _cookie;
-
- memset(cookie, 0, sizeof(*cookie));
- spin_lock_init(&cookie->lock);
- spin_lock_init(&cookie->stores_lock);
- INIT_HLIST_HEAD(&cookie->backing_objects);
-}
-
-/*
- * Set the index key in a cookie. The cookie struct has space for a 12-byte
+ * Set the index key in a cookie. The cookie struct has space for a 16-byte
* key plus length and hash, but if that's not big enough, it's instead a
* pointer to a buffer containing 3 bytes of hash, 1 byte of length and then
* the key data.
@@ -93,20 +80,18 @@ static int fscache_set_key(struct fscache_cookie *cookie,
{
unsigned long long h;
u32 *buf;
+ int bufs;
int i;
- cookie->key_len = index_key_len;
+ bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
if (index_key_len > sizeof(cookie->inline_key)) {
- buf = kzalloc(index_key_len, GFP_KERNEL);
+ buf = kcalloc(bufs, sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
cookie->key = buf;
} else {
buf = (u32 *)cookie->inline_key;
- buf[0] = 0;
- buf[1] = 0;
- buf[2] = 0;
}
memcpy(buf, index_key, index_key_len);
@@ -116,7 +101,8 @@ static int fscache_set_key(struct fscache_cookie *cookie,
*/
h = (unsigned long)cookie->parent;
h += index_key_len + cookie->type;
- for (i = 0; i < (index_key_len + sizeof(u32) - 1) / sizeof(u32); i++)
+
+ for (i = 0; i < bufs; i++)
h += buf[i];
cookie->key_hash = h ^ (h >> 32);
@@ -161,7 +147,7 @@ struct fscache_cookie *fscache_alloc_cookie(
struct fscache_cookie *cookie;
/* allocate and initialise a cookie */
- cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
+ cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
if (!cookie)
return NULL;
@@ -192,6 +178,9 @@ struct fscache_cookie *fscache_alloc_cookie(
cookie->netfs_data = netfs_data;
cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET);
cookie->type = def->type;
+ spin_lock_init(&cookie->lock);
+ spin_lock_init(&cookie->stores_lock);
+ INIT_HLIST_HEAD(&cookie->backing_objects);
/* radix tree insertion won't use the preallocation pool unless it's
* told it may not wait */
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index f83328a7f048..d6209022e965 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -51,7 +51,6 @@ extern struct fscache_cache *fscache_select_cache_for_object(
extern struct kmem_cache *fscache_cookie_jar;
extern void fscache_free_cookie(struct fscache_cookie *);
-extern void fscache_cookie_init_once(void *);
extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *,
const struct fscache_cookie_def *,
const void *, size_t,
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 7dce110bf17d..30ad89db1efc 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -143,9 +143,7 @@ static int __init fscache_init(void)
fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
sizeof(struct fscache_cookie),
- 0,
- 0,
- fscache_cookie_init_once);
+ 0, 0, NULL);
if (!fscache_cookie_jar) {
pr_notice("Failed to allocate a cookie jar\n");
ret = -ENOMEM;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 03128ed1f34e..84544a4f012d 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1057,7 +1057,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
}
}
release_metapath(&mp);
- if (gfs2_is_jdata(ip))
+ if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip))
iomap->page_done = gfs2_iomap_journaled_page_done;
return 0;
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 3212c29235ce..2005529af560 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -230,7 +230,7 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
ret = -EXDEV;
if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
goto fdput;
- ret = do_clone_file_range(src_file.file, off, dst_file, destoff, olen);
+ ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen);
fdput:
fdput(src_file);
return ret;
diff --git a/fs/iomap.c b/fs/iomap.c
index 74762b1ec233..ec15cf2ec696 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -1051,6 +1051,7 @@ iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
} else {
WARN_ON_ONCE(!PageUptodate(page));
iomap_page_create(inode, page);
+ set_page_dirty(page);
}
return length;
@@ -1090,7 +1091,6 @@ int iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
length -= ret;
}
- set_page_dirty(page);
wait_for_stable_page(page);
return VM_FAULT_LOCKED;
out_unlock:
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 34830f6457ea..8220a168282e 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1637,6 +1637,14 @@ static void nfs_state_set_delegation(struct nfs4_state *state,
write_sequnlock(&state->seqlock);
}
+static void nfs_state_clear_delegation(struct nfs4_state *state)
+{
+ write_seqlock(&state->seqlock);
+ nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ write_sequnlock(&state->seqlock);
+}
+
static int update_open_stateid(struct nfs4_state *state,
const nfs4_stateid *open_stateid,
const nfs4_stateid *delegation,
@@ -2145,10 +2153,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
if (IS_ERR(opendata))
return PTR_ERR(opendata);
nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
- write_seqlock(&state->seqlock);
- nfs4_stateid_copy(&state->stateid, &state->open_stateid);
- write_sequnlock(&state->seqlock);
- clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ nfs_state_clear_delegation(state);
switch (type & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ|FMODE_WRITE:
case FMODE_WRITE:
@@ -2601,10 +2606,7 @@ static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
const nfs4_stateid *stateid)
{
nfs_remove_bad_delegation(state->inode, stateid);
- write_seqlock(&state->seqlock);
- nfs4_stateid_copy(&state->stateid, &state->open_stateid);
- write_sequnlock(&state->seqlock);
- clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ nfs_state_clear_delegation(state);
}
static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
@@ -2672,15 +2674,20 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
delegation = rcu_dereference(NFS_I(state->inode)->delegation);
if (delegation == NULL) {
rcu_read_unlock();
+ nfs_state_clear_delegation(state);
return;
}
nfs4_stateid_copy(&stateid, &delegation->stateid);
- if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
- !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
- &delegation->flags)) {
+ if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+ rcu_read_unlock();
+ nfs_state_clear_delegation(state);
+ return;
+ }
+
+ if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
+ &delegation->flags)) {
rcu_read_unlock();
- nfs_finish_clear_delegation_stateid(state, &stateid);
return;
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 3df0eb52da1c..40a08cd483f0 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1390,6 +1390,8 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
if (!nfs4_state_mark_reclaim_nograce(clp, state))
return -EBADF;
+ nfs_inode_find_delegation_state_and_recover(state->inode,
+ &state->stateid);
dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
clp->cl_hostname);
nfs4_schedule_state_manager(clp);
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index a275fba93170..b1483b303e0b 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -1137,7 +1137,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
TP_fast_assign(
__entry->error = error;
__entry->fhandle = nfs_fhandle_hash(fhandle);
- if (inode != NULL) {
+ if (!IS_ERR_OR_NULL(inode)) {
__entry->fileid = NFS_FILEID(inode);
__entry->dev = inode->i_sb->s_dev;
} else {
@@ -1194,7 +1194,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
TP_fast_assign(
__entry->error = error;
__entry->fhandle = nfs_fhandle_hash(fhandle);
- if (inode != NULL) {
+ if (!IS_ERR_OR_NULL(inode)) {
__entry->fileid = NFS_FILEID(inode);
__entry->dev = inode->i_sb->s_dev;
} else {
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index e8f232de484f..7d9a51e6b847 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1740,16 +1740,16 @@ static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
return ret;
}
-static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
+static int pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
{
/*
* send layoutcommit as it can hold up layoutreturn due to lseg
* reference
*/
pnfs_layoutcommit_inode(lo->plh_inode, false);
- return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
+ return wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
nfs_wait_bit_killable,
- TASK_UNINTERRUPTIBLE);
+ TASK_KILLABLE);
}
static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
@@ -1830,7 +1830,9 @@ pnfs_update_layout(struct inode *ino,
}
lookup_again:
- nfs4_client_recover_expired_lease(clp);
+ lseg = ERR_PTR(nfs4_client_recover_expired_lease(clp));
+ if (IS_ERR(lseg))
+ goto out;
first = false;
spin_lock(&ino->i_lock);
lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
@@ -1863,9 +1865,9 @@ lookup_again:
if (list_empty(&lo->plh_segs) &&
atomic_read(&lo->plh_outstanding) != 0) {
spin_unlock(&ino->i_lock);
- if (wait_var_event_killable(&lo->plh_outstanding,
- atomic_read(&lo->plh_outstanding) == 0
- || !list_empty(&lo->plh_segs)))
+ lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
+ atomic_read(&lo->plh_outstanding)));
+ if (IS_ERR(lseg) || !list_empty(&lo->plh_segs))
goto out_put_layout_hdr;
pnfs_put_layout_hdr(lo);
goto lookup_again;
@@ -1898,8 +1900,11 @@ lookup_again:
if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
&lo->plh_flags)) {
spin_unlock(&ino->i_lock);
- wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
- TASK_UNINTERRUPTIBLE);
+ lseg = ERR_PTR(wait_on_bit(&lo->plh_flags,
+ NFS_LAYOUT_FIRST_LAYOUTGET,
+ TASK_KILLABLE));
+ if (IS_ERR(lseg))
+ goto out_put_layout_hdr;
pnfs_put_layout_hdr(lo);
dprintk("%s retrying\n", __func__);
goto lookup_again;
@@ -1925,7 +1930,8 @@ lookup_again:
if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
spin_unlock(&ino->i_lock);
dprintk("%s wait for layoutreturn\n", __func__);
- if (pnfs_prepare_to_retry_layoutget(lo)) {
+ lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
+ if (!IS_ERR(lseg)) {
if (first)
pnfs_clear_first_layoutget(lo);
pnfs_put_layout_hdr(lo);
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 55a099e47ba2..b53e76391e52 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -541,7 +541,8 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
__be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
u64 dst_pos, u64 count)
{
- return nfserrno(do_clone_file_range(src, src_pos, dst, dst_pos, count));
+ return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
+ count));
}
ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d9ebe11c8990..1d098c3c00e0 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -342,6 +342,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
* for this bh as it's not marked locally
* uptodate. */
status = -EIO;
+ clear_buffer_needs_validate(bh);
put_bh(bh);
bhs[i] = NULL;
continue;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index aaca0949fe53..826f0567ec43 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -584,9 +584,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
res->last_used = 0;
- spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->track_lock);
list_add_tail(&res->tracking, &dlm->tracking_list);
- spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm->track_lock);
memset(res->lvb, 0, DLM_LVB_LEN);
memset(res->refmap, 0, sizeof(res->refmap));
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8e712b614e6e..933aac5da193 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -96,7 +96,9 @@ struct ocfs2_unblock_ctl {
};
/* Lockdep class keys */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
+#endif
static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
int new_level);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 7869622af22a..7a5ee145c733 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2946,6 +2946,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
if (map_end & (PAGE_SIZE - 1))
to = map_end & (PAGE_SIZE - 1);
+retry:
page = find_or_create_page(mapping, page_index, GFP_NOFS);
if (!page) {
ret = -ENOMEM;
@@ -2954,11 +2955,18 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
}
/*
- * In case PAGE_SIZE <= CLUSTER_SIZE, This page
- * can't be dirtied before we CoW it out.
+ * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
+ * page, so write it back.
*/
- if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize)
- BUG_ON(PageDirty(page));
+ if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
+ if (PageDirty(page)) {
+ /*
+ * write_on_page will unlock the page on return
+ */
+ ret = write_one_page(page);
+ goto retry;
+ }
+ }
if (!PageUptodate(page)) {
ret = block_read_full_page(page, ocfs2_get_block);
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 296037afecdb..1cc797a08a5b 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -141,7 +141,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
}
/* Try to use clone_file_range to clone up within the same fs */
- error = vfs_clone_file_range(old_file, 0, new_file, 0, len);
+ error = do_clone_file_range(old_file, 0, new_file, 0, len);
if (!error)
goto out;
/* Couldn't clone, so now we try to copy the data */
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 32e9282893c9..986313da0c88 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -131,9 +131,6 @@ static int ovl_open(struct inode *inode, struct file *file)
if (IS_ERR(realfile))
return PTR_ERR(realfile);
- /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
- file->f_mapping = realfile->f_mapping;
-
file->private_data = realfile;
return 0;
@@ -243,8 +240,10 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
goto out_unlock;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ file_start_write(real.file);
ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
ovl_iocb_to_rwf(iocb));
+ file_end_write(real.file);
revert_creds(old_cred);
/* Update size */
@@ -334,6 +333,25 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
return ret;
}
+static int ovl_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+{
+ struct fd real;
+ const struct cred *old_cred;
+ int ret;
+
+ ret = ovl_real_fdget(file, &real);
+ if (ret)
+ return ret;
+
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ ret = vfs_fadvise(real.file, offset, len, advice);
+ revert_creds(old_cred);
+
+ fdput(real);
+
+ return ret;
+}
+
static long ovl_real_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -502,6 +520,7 @@ const struct file_operations ovl_file_operations = {
.fsync = ovl_fsync,
.mmap = ovl_mmap,
.fallocate = ovl_fallocate,
+ .fadvise = ovl_fadvise,
.unlocked_ioctl = ovl_ioctl,
.compat_ioctl = ovl_compat_ioctl,
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index e0bb217c01e2..3b7ed5d2279c 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -467,6 +467,10 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return -EOPNOTSUPP;
old_cred = ovl_override_creds(inode->i_sb);
+
+ if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
+ filemap_write_and_wait(realinode->i_mapping);
+
err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
revert_creds(old_cred);
@@ -500,6 +504,11 @@ static const struct inode_operations ovl_special_inode_operations = {
.update_time = ovl_update_time,
};
+static const struct address_space_operations ovl_aops = {
+ /* For O_DIRECT dentry_open() checks f_mapping->a_ops->direct_IO */
+ .direct_IO = noop_direct_IO,
+};
+
/*
* It is possible to stack overlayfs instance on top of another
* overlayfs instance as lower layer. We need to annonate the
@@ -571,6 +580,7 @@ static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
case S_IFREG:
inode->i_op = &ovl_file_inode_operations;
inode->i_fop = &ovl_file_operations;
+ inode->i_mapping->a_ops = &ovl_aops;
break;
case S_IFDIR:
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index f28711846dd6..9c0ca6a7becf 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -686,7 +686,7 @@ struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
index = NULL;
goto out;
}
- pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n"
+ pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%.*s, err=%i);\n"
"overlayfs: mount with '-o index=off' to disable inodes index.\n",
d_inode(origin)->i_ino, name.len, name.name,
err);
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index f61839e1054c..a3c0d9584312 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -152,8 +152,8 @@ static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
int err = vfs_setxattr(dentry, name, value, size, flags);
- pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n",
- dentry, name, (int) size, (char *) value, flags, err);
+ pr_debug("setxattr(%pd2, \"%s\", \"%*pE\", %zu, 0x%x) = %i\n",
+ dentry, name, min((int)size, 48), value, size, flags, err);
return err;
}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 2e0fc93c2c06..30adc9d408a0 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -982,16 +982,6 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
if (err)
goto out;
- err = -EBUSY;
- if (ovl_inuse_trylock(upperpath->dentry)) {
- ofs->upperdir_locked = true;
- } else if (ofs->config.index) {
- pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
- goto out;
- } else {
- pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
- }
-
upper_mnt = clone_private_mount(upperpath);
err = PTR_ERR(upper_mnt);
if (IS_ERR(upper_mnt)) {
@@ -1002,6 +992,17 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
/* Don't inherit atime flags */
upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
ofs->upper_mnt = upper_mnt;
+
+ err = -EBUSY;
+ if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
+ ofs->upperdir_locked = true;
+ } else if (ofs->config.index) {
+ pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n");
+ goto out;
+ } else {
+ pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
+ }
+
err = 0;
out:
return err;
@@ -1101,8 +1102,10 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
goto out;
}
+ ofs->workbasedir = dget(workpath.dentry);
+
err = -EBUSY;
- if (ovl_inuse_trylock(workpath.dentry)) {
+ if (ovl_inuse_trylock(ofs->workbasedir)) {
ofs->workdir_locked = true;
} else if (ofs->config.index) {
pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n");
@@ -1111,7 +1114,6 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
}
- ofs->workbasedir = dget(workpath.dentry);
err = ovl_make_workdir(ofs, &workpath);
if (err)
goto out;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 8cfb62cc8672..ace4fe4c39a9 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -683,7 +683,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
struct dentry *upperdentry = ovl_dentry_upper(dentry);
struct dentry *index = NULL;
struct inode *inode;
- struct qstr name;
+ struct qstr name = { };
int err;
err = ovl_get_index_name(lowerdentry, &name);
@@ -726,6 +726,7 @@ static void ovl_cleanup_index(struct dentry *dentry)
goto fail;
out:
+ kfree(name.name);
dput(index);
return;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ccf86f16d9f0..7e9f07bf260d 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -407,6 +407,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
unsigned long *entries;
int err;
+ /*
+ * The ability to racily run the kernel stack unwinder on a running task
+ * and then observe the unwinder output is scary; while it is useful for
+ * debugging kernel issues, it can also allow an attacker to leak kernel
+ * stack contents.
+ * Doing this in a manner that is at least safe from races would require
+ * some work to ensure that the remote task can not be scheduled; and
+ * even then, this would still expose the unwinder as local attack
+ * surface.
+ * Therefore, this interface is restricted to root.
+ */
+ if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+ return -EACCES;
+
entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries),
GFP_KERNEL);
if (!entries)
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index ad72261ee3fe..d297fe4472a9 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -464,6 +464,7 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
ret = -EFAULT;
goto out;
}
+ m = NULL; /* skip the list anchor */
} else if (m->type == KCORE_VMALLOC) {
vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index bbd1e357c23d..f4fd2e72add4 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -898,8 +898,22 @@ static struct platform_driver ramoops_driver = {
},
};
-static void ramoops_register_dummy(void)
+static inline void ramoops_unregister_dummy(void)
{
+ platform_device_unregister(dummy);
+ dummy = NULL;
+
+ kfree(dummy_data);
+ dummy_data = NULL;
+}
+
+static void __init ramoops_register_dummy(void)
+{
+ /*
+ * Prepare a dummy platform data structure to carry the module
+ * parameters. If mem_size isn't set, then there are no module
+ * parameters, and we can skip this.
+ */
if (!mem_size)
return;
@@ -932,21 +946,28 @@ static void ramoops_register_dummy(void)
if (IS_ERR(dummy)) {
pr_info("could not create platform device: %ld\n",
PTR_ERR(dummy));
+ dummy = NULL;
+ ramoops_unregister_dummy();
}
}
static int __init ramoops_init(void)
{
+ int ret;
+
ramoops_register_dummy();
- return platform_driver_register(&ramoops_driver);
+ ret = platform_driver_register(&ramoops_driver);
+ if (ret != 0)
+ ramoops_unregister_dummy();
+
+ return ret;
}
late_initcall(ramoops_init);
static void __exit ramoops_exit(void)
{
platform_driver_unregister(&ramoops_driver);
- platform_device_unregister(dummy);
- kfree(dummy_data);
+ ramoops_unregister_dummy();
}
module_exit(ramoops_exit);
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 951a14edcf51..0792595ebcfb 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -429,7 +429,12 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
vaddr = vmap(pages, page_count, VM_MAP, prot);
kfree(pages);
- return vaddr;
+ /*
+ * Since vmap() uses page granularity, we must add the offset
+ * into the page here, to get the byte granularity address
+ * into the mapping to represent the actual "start" location.
+ */
+ return vaddr + offset_in_page(start);
}
static void *persistent_ram_iomap(phys_addr_t start, size_t size,
@@ -448,6 +453,11 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
else
va = ioremap_wc(start, size);
+ /*
+ * Since request_mem_region() and ioremap() are byte-granularity
+ * there is no need handle anything special like we do when the
+ * vmap() case in persistent_ram_vmap() above.
+ */
return va;
}
@@ -468,7 +478,7 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
return -ENOMEM;
}
- prz->buffer = prz->vaddr + offset_in_page(start);
+ prz->buffer = prz->vaddr;
prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
return 0;
@@ -515,7 +525,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
if (prz->vaddr) {
if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
- vunmap(prz->vaddr);
+ /* We must vunmap() at page-granularity. */
+ vunmap(prz->vaddr - offset_in_page(prz->paddr));
} else {
iounmap(prz->vaddr);
release_mem_region(prz->paddr, prz->size);
diff --git a/fs/read_write.c b/fs/read_write.c
index 39b4a21dd933..8a2737f0d61d 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1818,8 +1818,8 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
}
EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
-int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len)
+int do_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, u64 len)
{
struct inode *inode_in = file_inode(file_in);
struct inode *inode_out = file_inode(file_out);
@@ -1866,6 +1866,19 @@ int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
return ret;
}
+EXPORT_SYMBOL(do_clone_file_range);
+
+int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, u64 len)
+{
+ int ret;
+
+ file_start_write(file_out);
+ ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len);
+ file_end_write(file_out);
+
+ return ret;
+}
EXPORT_SYMBOL(vfs_clone_file_range);
/*
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 23e7042666a7..fec62e9dfbe6 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1912,7 +1912,9 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
mutex_unlock(&c->bu_mutex);
}
- ubifs_assert(c, c->lst.taken_empty_lebs > 0);
+ if (!c->need_recovery)
+ ubifs_assert(c, c->lst.taken_empty_lebs > 0);
+
return 0;
}
@@ -1954,6 +1956,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
int dev, vol;
char *endptr;
+ if (!name || !*name)
+ return ERR_PTR(-EINVAL);
+
/* First, try to open using the device node path method */
ubi = ubi_open_volume_path(name, mode);
if (!IS_ERR(ubi))
@@ -2332,8 +2337,8 @@ late_initcall(ubifs_init);
static void __exit ubifs_exit(void)
{
- WARN_ON(list_empty(&ubifs_infos));
- WARN_ON(atomic_long_read(&ubifs_clean_zn_cnt) == 0);
+ WARN_ON(!list_empty(&ubifs_infos));
+ WARN_ON(atomic_long_read(&ubifs_clean_zn_cnt) != 0);
dbg_debugfs_exit();
ubifs_compressors_exit();
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index 61afdfee4b28..f5ad1ede7990 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -152,12 +152,6 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
ui->data_len = size;
mutex_lock(&host_ui->ui_mutex);
-
- if (!host->i_nlink) {
- err = -ENOENT;
- goto out_noent;
- }
-
host->i_ctime = current_time(host);
host_ui->xattr_cnt += 1;
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
@@ -190,7 +184,6 @@ out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_names -= fname_len(nm);
host_ui->flags &= ~UBIFS_CRYPT_FL;
-out_noent:
mutex_unlock(&host_ui->ui_mutex);
out_free:
make_bad_inode(inode);
@@ -242,12 +235,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
mutex_unlock(&ui->ui_mutex);
mutex_lock(&host_ui->ui_mutex);
-
- if (!host->i_nlink) {
- err = -ENOENT;
- goto out_noent;
- }
-
host->i_ctime = current_time(host);
host_ui->xattr_size -= CALC_XATTR_BYTES(old_size);
host_ui->xattr_size += CALC_XATTR_BYTES(size);
@@ -269,7 +256,6 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
out_cancel:
host_ui->xattr_size -= CALC_XATTR_BYTES(size);
host_ui->xattr_size += CALC_XATTR_BYTES(old_size);
-out_noent:
mutex_unlock(&host_ui->ui_mutex);
make_bad_inode(inode);
out_free:
@@ -496,12 +482,6 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host,
return err;
mutex_lock(&host_ui->ui_mutex);
-
- if (!host->i_nlink) {
- err = -ENOENT;
- goto out_noent;
- }
-
host->i_ctime = current_time(host);
host_ui->xattr_cnt -= 1;
host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm));
@@ -521,7 +501,6 @@ out_cancel:
host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm));
host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len);
host_ui->xattr_names += fname_len(nm);
-out_noent:
mutex_unlock(&host_ui->ui_mutex);
ubifs_release_budget(c, &req);
make_bad_inode(inode);
@@ -561,9 +540,6 @@ static int ubifs_xattr_remove(struct inode *host, const char *name)
ubifs_assert(c, inode_is_locked(host));
- if (!host->i_nlink)
- return -ENOENT;
-
if (fname_len(&nm) > UBIFS_MAX_NLEN)
return -ENAMETOOLONG;
diff --git a/fs/xattr.c b/fs/xattr.c
index daa732550088..0d6a6a4af861 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -948,17 +948,19 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
int err = 0;
#ifdef CONFIG_FS_POSIX_ACL
- if (inode->i_acl) {
- err = xattr_list_one(&buffer, &remaining_size,
- XATTR_NAME_POSIX_ACL_ACCESS);
- if (err)
- return err;
- }
- if (inode->i_default_acl) {
- err = xattr_list_one(&buffer, &remaining_size,
- XATTR_NAME_POSIX_ACL_DEFAULT);
- if (err)
- return err;
+ if (IS_POSIXACL(inode)) {
+ if (inode->i_acl) {
+ err = xattr_list_one(&buffer, &remaining_size,
+ XATTR_NAME_POSIX_ACL_ACCESS);
+ if (err)
+ return err;
+ }
+ if (inode->i_default_acl) {
+ err = xattr_list_one(&buffer, &remaining_size,
+ XATTR_NAME_POSIX_ACL_DEFAULT);
+ if (err)
+ return err;
+ }
}
#endif
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 1e671d4eb6fa..c6299f82a6e4 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -587,7 +587,7 @@ xfs_attr_leaf_addname(
*/
error = xfs_attr3_leaf_to_node(args);
if (error)
- goto out_defer_cancel;
+ return error;
error = xfs_defer_finish(&args->trans);
if (error)
return error;
@@ -675,7 +675,7 @@ xfs_attr_leaf_addname(
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
- goto out_defer_cancel;
+ return error;
error = xfs_defer_finish(&args->trans);
if (error)
return error;
@@ -693,9 +693,6 @@ xfs_attr_leaf_addname(
error = xfs_attr3_leaf_clearflag(args);
}
return error;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- return error;
}
/*
@@ -738,15 +735,12 @@ xfs_attr_leaf_removename(
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
- goto out_defer_cancel;
+ return error;
error = xfs_defer_finish(&args->trans);
if (error)
return error;
}
return 0;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- return error;
}
/*
@@ -864,7 +858,7 @@ restart:
state = NULL;
error = xfs_attr3_leaf_to_node(args);
if (error)
- goto out_defer_cancel;
+ goto out;
error = xfs_defer_finish(&args->trans);
if (error)
goto out;
@@ -888,7 +882,7 @@ restart:
*/
error = xfs_da3_split(state);
if (error)
- goto out_defer_cancel;
+ goto out;
error = xfs_defer_finish(&args->trans);
if (error)
goto out;
@@ -984,7 +978,7 @@ restart:
if (retval && (state->path.active > 1)) {
error = xfs_da3_join(state);
if (error)
- goto out_defer_cancel;
+ goto out;
error = xfs_defer_finish(&args->trans);
if (error)
goto out;
@@ -1013,9 +1007,6 @@ out:
if (error)
return error;
return retval;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- goto out;
}
/*
@@ -1107,7 +1098,7 @@ xfs_attr_node_removename(
if (retval && (state->path.active > 1)) {
error = xfs_da3_join(state);
if (error)
- goto out_defer_cancel;
+ goto out;
error = xfs_defer_finish(&args->trans);
if (error)
goto out;
@@ -1138,7 +1129,7 @@ xfs_attr_node_removename(
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (error)
- goto out_defer_cancel;
+ goto out;
error = xfs_defer_finish(&args->trans);
if (error)
goto out;
@@ -1150,9 +1141,6 @@ xfs_attr_node_removename(
out:
xfs_da_state_free(state);
return error;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- goto out;
}
/*
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index af094063e402..d89363c6b523 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -485,7 +485,7 @@ xfs_attr_rmtval_set(
blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
&nmap);
if (error)
- goto out_defer_cancel;
+ return error;
error = xfs_defer_finish(&args->trans);
if (error)
return error;
@@ -553,9 +553,6 @@ xfs_attr_rmtval_set(
}
ASSERT(valuelen == 0);
return 0;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- return error;
}
/*
@@ -625,7 +622,7 @@ xfs_attr_rmtval_remove(
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK, 1, &done);
if (error)
- goto out_defer_cancel;
+ return error;
error = xfs_defer_finish(&args->trans);
if (error)
return error;
@@ -638,7 +635,4 @@ xfs_attr_rmtval_remove(
return error;
}
return 0;
-out_defer_cancel:
- xfs_defer_cancel(args->trans);
- return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 2760314fdf7f..a47670332326 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -673,7 +673,8 @@ xfs_bmap_extents_to_btree(
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
/*
- * Make space in the inode incore.
+ * Make space in the inode incore. This needs to be undone if we fail
+ * to expand the root.
*/
xfs_iroot_realloc(ip, 1, whichfork);
ifp->if_flags |= XFS_IFBROOT;
@@ -711,16 +712,15 @@ xfs_bmap_extents_to_btree(
args.minlen = args.maxlen = args.prod = 1;
args.wasdel = wasdel;
*logflagsp = 0;
- if ((error = xfs_alloc_vextent(&args))) {
- ASSERT(ifp->if_broot == NULL);
- goto err1;
- }
+ error = xfs_alloc_vextent(&args);
+ if (error)
+ goto out_root_realloc;
if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
- ASSERT(ifp->if_broot == NULL);
error = -ENOSPC;
- goto err1;
+ goto out_root_realloc;
}
+
/*
* Allocation can't fail, the space was reserved.
*/
@@ -732,9 +732,10 @@ xfs_bmap_extents_to_btree(
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
if (!abp) {
- error = -ENOSPC;
- goto err2;
+ error = -EFSCORRUPTED;
+ goto out_unreserve_dquot;
}
+
/*
* Fill in the child block.
*/
@@ -775,11 +776,12 @@ xfs_bmap_extents_to_btree(
*logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
return 0;
-err2:
+out_unreserve_dquot:
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
-err1:
+out_root_realloc:
xfs_iroot_realloc(ip, -1, whichfork);
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ ASSERT(ifp->if_broot == NULL);
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
return error;
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 059bc44c27e8..afbe336600e1 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -1016,6 +1016,8 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
+/* Do not use bit 15, di_flags is legacy and unchanging now */
+
#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 30d1d60f1d46..09d9c8cfa4a0 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -415,6 +415,31 @@ xfs_dinode_verify_fork(
return NULL;
}
+static xfs_failaddr_t
+xfs_dinode_verify_forkoff(
+ struct xfs_dinode *dip,
+ struct xfs_mount *mp)
+{
+ if (!XFS_DFORK_Q(dip))
+ return NULL;
+
+ switch (dip->di_format) {
+ case XFS_DINODE_FMT_DEV:
+ if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
+ return __this_address;
+ break;
+ case XFS_DINODE_FMT_LOCAL: /* fall through ... */
+ case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
+ case XFS_DINODE_FMT_BTREE:
+ if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
+ return __this_address;
+ break;
+ default:
+ return __this_address;
+ }
+ return NULL;
+}
+
xfs_failaddr_t
xfs_dinode_verify(
struct xfs_mount *mp,
@@ -470,6 +495,11 @@ xfs_dinode_verify(
if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
return __this_address;
+ /* check for illegal values of forkoff */
+ fa = xfs_dinode_verify_forkoff(dip, mp);
+ if (fa)
+ return fa;
+
/* Do we have appropriate data fork formats for the mode? */
switch (mode & S_IFMT) {
case S_IFIFO:
diff --git a/fs/xfs/scrub/alloc.c b/fs/xfs/scrub/alloc.c
index 036b5c7021eb..376bcb585ae6 100644
--- a/fs/xfs/scrub/alloc.c
+++ b/fs/xfs/scrub/alloc.c
@@ -17,7 +17,6 @@
#include "xfs_sb.h"
#include "xfs_alloc.h"
#include "xfs_rmap.h"
-#include "xfs_alloc.h"
#include "scrub/xfs_scrub.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c
index 5b3b177c0fc9..e386c9b0b4ab 100644
--- a/fs/xfs/scrub/inode.c
+++ b/fs/xfs/scrub/inode.c
@@ -126,6 +126,7 @@ xchk_inode_flags(
{
struct xfs_mount *mp = sc->mp;
+ /* di_flags are all taken, last bit cannot be used */
if (flags & ~XFS_DIFLAG_ANY)
goto bad;
@@ -172,8 +173,9 @@ xchk_inode_flags2(
{
struct xfs_mount *mp = sc->mp;
+ /* Unknown di_flags2 could be from a future kernel */
if (flags2 & ~XFS_DIFLAG2_ANY)
- goto bad;
+ xchk_ino_set_warning(sc, ino);
/* reflink flag requires reflink feature */
if ((flags2 & XFS_DIFLAG2_REFLINK) &&
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index addbd74ecd8e..6de8d90041ff 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -702,13 +702,9 @@ xfs_bmap_punch_delalloc_range(
struct xfs_iext_cursor icur;
int error = 0;
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (!(ifp->if_flags & XFS_IFEXTENTS)) {
- error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
- if (error)
- goto out_unlock;
- }
+ ASSERT(ifp->if_flags & XFS_IFEXTENTS);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
goto out_unlock;
@@ -1584,7 +1580,7 @@ xfs_swap_extent_rmap(
tirec.br_blockcount, &irec,
&nimaps, 0);
if (error)
- goto out_defer;
+ goto out;
ASSERT(nimaps == 1);
ASSERT(tirec.br_startoff == irec.br_startoff);
trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
@@ -1599,22 +1595,22 @@ xfs_swap_extent_rmap(
/* Remove the mapping from the donor file. */
error = xfs_bmap_unmap_extent(tp, tip, &uirec);
if (error)
- goto out_defer;
+ goto out;
/* Remove the mapping from the source file. */
error = xfs_bmap_unmap_extent(tp, ip, &irec);
if (error)
- goto out_defer;
+ goto out;
/* Map the donor file's blocks into the source file. */
error = xfs_bmap_map_extent(tp, ip, &uirec);
if (error)
- goto out_defer;
+ goto out;
/* Map the source file's blocks into the donor file. */
error = xfs_bmap_map_extent(tp, tip, &irec);
if (error)
- goto out_defer;
+ goto out;
error = xfs_defer_finish(tpp);
tp = *tpp;
@@ -1636,8 +1632,6 @@ xfs_swap_extent_rmap(
tip->i_d.di_flags2 = tip_flags2;
return 0;
-out_defer:
- xfs_defer_cancel(tp);
out:
trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
tip->i_d.di_flags2 = tip_flags2;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 1c9d1398980b..12d8455bfbb2 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -532,6 +532,49 @@ xfs_buf_item_push(
}
/*
+ * Drop the buffer log item refcount and take appropriate action. This helper
+ * determines whether the bli must be freed or not, since a decrement to zero
+ * does not necessarily mean the bli is unused.
+ *
+ * Return true if the bli is freed, false otherwise.
+ */
+bool
+xfs_buf_item_put(
+ struct xfs_buf_log_item *bip)
+{
+ struct xfs_log_item *lip = &bip->bli_item;
+ bool aborted;
+ bool dirty;
+
+ /* drop the bli ref and return if it wasn't the last one */
+ if (!atomic_dec_and_test(&bip->bli_refcount))
+ return false;
+
+ /*
+ * We dropped the last ref and must free the item if clean or aborted.
+ * If the bli is dirty and non-aborted, the buffer was clean in the
+ * transaction but still awaiting writeback from previous changes. In
+ * that case, the bli is freed on buffer writeback completion.
+ */
+ aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
+ XFS_FORCED_SHUTDOWN(lip->li_mountp);
+ dirty = bip->bli_flags & XFS_BLI_DIRTY;
+ if (dirty && !aborted)
+ return false;
+
+ /*
+ * The bli is aborted or clean. An aborted item may be in the AIL
+ * regardless of dirty state. For example, consider an aborted
+ * transaction that invalidated a dirty bli and cleared the dirty
+ * state.
+ */
+ if (aborted)
+ xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
+ xfs_buf_item_relse(bip->bli_buf);
+ return true;
+}
+
+/*
* Release the buffer associated with the buf log item. If there is no dirty
* logged data associated with the buffer recorded in the buf log item, then
* free the buf log item and remove the reference to it in the buffer.
@@ -556,76 +599,42 @@ xfs_buf_item_unlock(
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
struct xfs_buf *bp = bip->bli_buf;
- bool aborted;
- bool hold = !!(bip->bli_flags & XFS_BLI_HOLD);
- bool dirty = !!(bip->bli_flags & XFS_BLI_DIRTY);
+ bool released;
+ bool hold = bip->bli_flags & XFS_BLI_HOLD;
+ bool stale = bip->bli_flags & XFS_BLI_STALE;
#if defined(DEBUG) || defined(XFS_WARN)
- bool ordered = !!(bip->bli_flags & XFS_BLI_ORDERED);
+ bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
+ bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
#endif
- aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags);
-
- /* Clear the buffer's association with this transaction. */
- bp->b_transp = NULL;
-
- /*
- * The per-transaction state has been copied above so clear it from the
- * bli.
- */
- bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
-
- /*
- * If the buf item is marked stale, then don't do anything. We'll
- * unlock the buffer and free the buf item when the buffer is unpinned
- * for the last time.
- */
- if (bip->bli_flags & XFS_BLI_STALE) {
- trace_xfs_buf_item_unlock_stale(bip);
- ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
- if (!aborted) {
- atomic_dec(&bip->bli_refcount);
- return;
- }
- }
-
trace_xfs_buf_item_unlock(bip);
/*
- * If the buf item isn't tracking any data, free it, otherwise drop the
- * reference we hold to it. If we are aborting the transaction, this may
- * be the only reference to the buf item, so we free it anyway
- * regardless of whether it is dirty or not. A dirty abort implies a
- * shutdown, anyway.
- *
* The bli dirty state should match whether the blf has logged segments
* except for ordered buffers, where only the bli should be dirty.
*/
ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
(ordered && dirty && !xfs_buf_item_dirty_format(bip)));
+ ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
/*
- * Clean buffers, by definition, cannot be in the AIL. However, aborted
- * buffers may be in the AIL regardless of dirty state. An aborted
- * transaction that invalidates a buffer already in the AIL may have
- * marked it stale and cleared the dirty state, for example.
- *
- * Therefore if we are aborting a buffer and we've just taken the last
- * reference away, we have to check if it is in the AIL before freeing
- * it. We need to free it in this case, because an aborted transaction
- * has already shut the filesystem down and this is the last chance we
- * will have to do so.
+ * Clear the buffer's association with this transaction and
+ * per-transaction state from the bli, which has been copied above.
*/
- if (atomic_dec_and_test(&bip->bli_refcount)) {
- if (aborted) {
- ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
- xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
- xfs_buf_item_relse(bp);
- } else if (!dirty)
- xfs_buf_item_relse(bp);
- }
+ bp->b_transp = NULL;
+ bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
- if (!hold)
- xfs_buf_relse(bp);
+ /*
+ * Unref the item and unlock the buffer unless held or stale. Stale
+ * buffers remain locked until final unpin unless the bli is freed by
+ * the unref call. The latter implies shutdown because buffer
+ * invalidation dirties the bli and transaction.
+ */
+ released = xfs_buf_item_put(bip);
+ if (hold || (stale && !released))
+ return;
+ ASSERT(!stale || test_bit(XFS_LI_ABORTED, &lip->li_flags));
+ xfs_buf_relse(bp);
}
/*
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 3f7d7b72e7e6..90f65f891fab 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -51,6 +51,7 @@ struct xfs_buf_log_item {
int xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
void xfs_buf_item_relse(struct xfs_buf *);
+bool xfs_buf_item_put(struct xfs_buf_log_item *);
void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *);
void xfs_buf_attach_iodone(struct xfs_buf *,
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index d957a46dc1cb..05db9540e459 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1563,7 +1563,7 @@ xfs_itruncate_extents_flags(
error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
XFS_ITRUNC_MAX_EXTENTS, &done);
if (error)
- goto out_bmap_cancel;
+ goto out;
/*
* Duplicate the transaction that has the permanent
@@ -1599,14 +1599,6 @@ xfs_itruncate_extents_flags(
out:
*tpp = tp;
return error;
-out_bmap_cancel:
- /*
- * If the bunmapi call encounters an error, return to the caller where
- * the transaction can be properly aborted. We just need to make sure
- * we're not holding any resources that we were not when we came in.
- */
- xfs_defer_cancel(tp);
- goto out;
}
int
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index c3e74f9128e8..f48ffd7a8d3e 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -471,8 +471,18 @@ xfs_vn_get_link_inline(
struct inode *inode,
struct delayed_call *done)
{
+ char *link;
+
ASSERT(XFS_I(inode)->i_df.if_flags & XFS_IFINLINE);
- return XFS_I(inode)->i_df.if_u1.if_data;
+
+ /*
+ * The VFS crashes on a NULL pointer, so return -EFSCORRUPTED if
+ * if_data is junk.
+ */
+ link = XFS_I(inode)->i_df.if_u1.if_data;
+ if (!link)
+ return ERR_PTR(-EFSCORRUPTED);
+ return link;
}
STATIC int
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index a21dc61ec09e..1fc9e9042e0e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1570,16 +1570,6 @@ xlog_find_zeroed(
if (last_cycle != 0) { /* log completely written to */
xlog_put_bp(bp);
return 0;
- } else if (first_cycle != 1) {
- /*
- * If the cycle of the last block is zero, the cycle of
- * the first block must be 1. If it's not, maybe we're
- * not looking at a log... Bail out.
- */
- xfs_warn(log->l_mp,
- "Log inconsistent or not a log (last==0, first!=1)");
- error = -EINVAL;
- goto bp_err;
}
/* we have a partially zeroed log */
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 38f405415b88..42ea7bab9144 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -352,6 +352,47 @@ xfs_reflink_convert_cow(
return error;
}
+/*
+ * Find the extent that maps the given range in the COW fork. Even if the extent
+ * is not shared we might have a preallocation for it in the COW fork. If so we
+ * use it that rather than trigger a new allocation.
+ */
+static int
+xfs_find_trim_cow_extent(
+ struct xfs_inode *ip,
+ struct xfs_bmbt_irec *imap,
+ bool *shared,
+ bool *found)
+{
+ xfs_fileoff_t offset_fsb = imap->br_startoff;
+ xfs_filblks_t count_fsb = imap->br_blockcount;
+ struct xfs_iext_cursor icur;
+ struct xfs_bmbt_irec got;
+ bool trimmed;
+
+ *found = false;
+
+ /*
+ * If we don't find an overlapping extent, trim the range we need to
+ * allocate to fit the hole we found.
+ */
+ if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) ||
+ got.br_startoff > offset_fsb)
+ return xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
+
+ *shared = true;
+ if (isnullstartblock(got.br_startblock)) {
+ xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
+ return 0;
+ }
+
+ /* real extent found - no need to allocate */
+ xfs_trim_extent(&got, offset_fsb, count_fsb);
+ *imap = got;
+ *found = true;
+ return 0;
+}
+
/* Allocate all CoW reservations covering a range of blocks in a file. */
int
xfs_reflink_allocate_cow(
@@ -363,78 +404,64 @@ xfs_reflink_allocate_cow(
struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t offset_fsb = imap->br_startoff;
xfs_filblks_t count_fsb = imap->br_blockcount;
- struct xfs_bmbt_irec got;
- struct xfs_trans *tp = NULL;
+ struct xfs_trans *tp;
int nimaps, error = 0;
- bool trimmed;
+ bool found;
xfs_filblks_t resaligned;
xfs_extlen_t resblks = 0;
- struct xfs_iext_cursor icur;
-retry:
- ASSERT(xfs_is_reflink_inode(ip));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ ASSERT(xfs_is_reflink_inode(ip));
- /*
- * Even if the extent is not shared we might have a preallocation for
- * it in the COW fork. If so use it.
- */
- if (xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got) &&
- got.br_startoff <= offset_fsb) {
- *shared = true;
-
- /* If we have a real allocation in the COW fork we're done. */
- if (!isnullstartblock(got.br_startblock)) {
- xfs_trim_extent(&got, offset_fsb, count_fsb);
- *imap = got;
- goto convert;
- }
+ error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+ if (error || !*shared)
+ return error;
+ if (found)
+ goto convert;
- xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
- } else {
- error = xfs_reflink_trim_around_shared(ip, imap, shared, &trimmed);
- if (error || !*shared)
- goto out;
- }
+ resaligned = xfs_aligned_fsb_count(imap->br_startoff,
+ imap->br_blockcount, xfs_get_cowextsz_hint(ip));
+ resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
- if (!tp) {
- resaligned = xfs_aligned_fsb_count(imap->br_startoff,
- imap->br_blockcount, xfs_get_cowextsz_hint(ip));
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
+ xfs_iunlock(ip, *lockmode);
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
+ *lockmode = XFS_ILOCK_EXCL;
+ xfs_ilock(ip, *lockmode);
- xfs_iunlock(ip, *lockmode);
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
- *lockmode = XFS_ILOCK_EXCL;
- xfs_ilock(ip, *lockmode);
+ if (error)
+ return error;
- if (error)
- return error;
+ error = xfs_qm_dqattach_locked(ip, false);
+ if (error)
+ goto out_trans_cancel;
- error = xfs_qm_dqattach_locked(ip, false);
- if (error)
- goto out;
- goto retry;
+ /*
+ * Check for an overlapping extent again now that we dropped the ilock.
+ */
+ error = xfs_find_trim_cow_extent(ip, imap, shared, &found);
+ if (error || !*shared)
+ goto out_trans_cancel;
+ if (found) {
+ xfs_trans_cancel(tp);
+ goto convert;
}
error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
XFS_QMOPT_RES_REGBLKS);
if (error)
- goto out;
+ goto out_trans_cancel;
xfs_trans_ijoin(tp, ip, 0);
- nimaps = 1;
-
/* Allocate the entire reservation as unwritten blocks. */
+ nimaps = 1;
error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC,
resblks, imap, &nimaps);
if (error)
- goto out_trans_cancel;
+ goto out_unreserve;
xfs_inode_set_cowblocks_tag(ip);
-
- /* Finish up. */
error = xfs_trans_commit(tp);
if (error)
return error;
@@ -447,12 +474,12 @@ retry:
return -ENOSPC;
convert:
return xfs_reflink_convert_cow_extent(ip, imap, offset_fsb, count_fsb);
-out_trans_cancel:
+
+out_unreserve:
xfs_trans_unreserve_quota_nblks(tp, ip, (long)resblks, 0,
XFS_QMOPT_RES_REGBLKS);
-out:
- if (tp)
- xfs_trans_cancel(tp);
+out_trans_cancel:
+ xfs_trans_cancel(tp);
return error;
}
@@ -666,14 +693,12 @@ xfs_reflink_end_cow(
if (!del.br_blockcount)
goto prev_extent;
- ASSERT(!isnullstartblock(got.br_startblock));
-
/*
- * Don't remap unwritten extents; these are
- * speculatively preallocated CoW extents that have been
- * allocated but have not yet been involved in a write.
+ * Only remap real extent that contain data. With AIO
+ * speculatively preallocations can leak into the range we
+ * are called upon, and we need to skip them.
*/
- if (got.br_state == XFS_EXT_UNWRITTEN)
+ if (!xfs_bmap_is_real_extent(&got))
goto prev_extent;
/* Unmap the old blocks in the data fork. */
@@ -1195,35 +1220,92 @@ retry:
return 0;
}
+/* Unlock both inodes after they've been prepped for a range clone. */
+STATIC void
+xfs_reflink_remap_unlock(
+ struct file *file_in,
+ struct file *file_out)
+{
+ struct inode *inode_in = file_inode(file_in);
+ struct xfs_inode *src = XFS_I(inode_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
+ bool same_inode = (inode_in == inode_out);
+
+ xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
+ if (!same_inode)
+ xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+ inode_unlock(inode_out);
+ if (!same_inode)
+ inode_unlock_shared(inode_in);
+}
+
/*
- * Link a range of blocks from one file to another.
+ * If we're reflinking to a point past the destination file's EOF, we must
+ * zero any speculative post-EOF preallocations that sit between the old EOF
+ * and the destination file offset.
*/
-int
-xfs_reflink_remap_range(
+static int
+xfs_reflink_zero_posteof(
+ struct xfs_inode *ip,
+ loff_t pos)
+{
+ loff_t isize = i_size_read(VFS_I(ip));
+
+ if (pos <= isize)
+ return 0;
+
+ trace_xfs_zero_eof(ip, isize, pos - isize);
+ return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
+ &xfs_iomap_ops);
+}
+
+/*
+ * Prepare two files for range cloning. Upon a successful return both inodes
+ * will have the iolock and mmaplock held, the page cache of the out file will
+ * be truncated, and any leases on the out file will have been broken. This
+ * function borrows heavily from xfs_file_aio_write_checks.
+ *
+ * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
+ * checked that the bytes beyond EOF physically match. Hence we cannot use the
+ * EOF block in the source dedupe range because it's not a complete block match,
+ * hence can introduce a corruption into the file that has it's block replaced.
+ *
+ * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
+ * "block aligned" for the purposes of cloning entire files. However, if the
+ * source file range includes the EOF block and it lands within the existing EOF
+ * of the destination file, then we can expose stale data from beyond the source
+ * file EOF in the destination file.
+ *
+ * XFS doesn't support partial block sharing, so in both cases we have check
+ * these cases ourselves. For dedupe, we can simply round the length to dedupe
+ * down to the previous whole block and ignore the partial EOF block. While this
+ * means we can't dedupe the last block of a file, this is an acceptible
+ * tradeoff for simplicity on implementation.
+ *
+ * For cloning, we want to share the partial EOF block if it is also the new EOF
+ * block of the destination file. If the partial EOF block lies inside the
+ * existing destination EOF, then we have to abort the clone to avoid exposing
+ * stale data in the destination file. Hence we reject these clone attempts with
+ * -EINVAL in this case.
+ */
+STATIC int
+xfs_reflink_remap_prep(
struct file *file_in,
loff_t pos_in,
struct file *file_out,
loff_t pos_out,
- u64 len,
+ u64 *len,
bool is_dedupe)
{
struct inode *inode_in = file_inode(file_in);
struct xfs_inode *src = XFS_I(inode_in);
struct inode *inode_out = file_inode(file_out);
struct xfs_inode *dest = XFS_I(inode_out);
- struct xfs_mount *mp = src->i_mount;
bool same_inode = (inode_in == inode_out);
- xfs_fileoff_t sfsbno, dfsbno;
- xfs_filblks_t fsblen;
- xfs_extlen_t cowextsize;
+ u64 blkmask = i_blocksize(inode_in) - 1;
ssize_t ret;
- if (!xfs_sb_version_hasreflink(&mp->m_sb))
- return -EOPNOTSUPP;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
-
/* Lock both files against IO */
ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out);
if (ret)
@@ -1245,33 +1327,115 @@ xfs_reflink_remap_range(
goto out_unlock;
ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
- &len, is_dedupe);
+ len, is_dedupe);
if (ret <= 0)
goto out_unlock;
+ /*
+ * If the dedupe data matches, chop off the partial EOF block
+ * from the source file so we don't try to dedupe the partial
+ * EOF block.
+ */
+ if (is_dedupe) {
+ *len &= ~blkmask;
+ } else if (*len & blkmask) {
+ /*
+ * The user is attempting to share a partial EOF block,
+ * if it's inside the destination EOF then reject it.
+ */
+ if (pos_out + *len < i_size_read(inode_out)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
/* Attach dquots to dest inode before changing block map */
ret = xfs_qm_dqattach(dest);
if (ret)
goto out_unlock;
- trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
-
/*
- * Clear out post-eof preallocations because we don't have page cache
- * backing the delayed allocations and they'll never get freed on
- * their own.
+ * Zero existing post-eof speculative preallocations in the destination
+ * file.
*/
- if (xfs_can_free_eofblocks(dest, true)) {
- ret = xfs_free_eofblocks(dest);
- if (ret)
- goto out_unlock;
- }
+ ret = xfs_reflink_zero_posteof(dest, pos_out);
+ if (ret)
+ goto out_unlock;
/* Set flags and remap blocks. */
ret = xfs_reflink_set_inode_flag(src, dest);
if (ret)
goto out_unlock;
+ /* Zap any page cache for the destination file's range. */
+ truncate_inode_pages_range(&inode_out->i_data, pos_out,
+ PAGE_ALIGN(pos_out + *len) - 1);
+
+ /* If we're altering the file contents... */
+ if (!is_dedupe) {
+ /*
+ * ...update the timestamps (which will grab the ilock again
+ * from xfs_fs_dirty_inode, so we have to call it before we
+ * take the ilock).
+ */
+ if (!(file_out->f_mode & FMODE_NOCMTIME)) {
+ ret = file_update_time(file_out);
+ if (ret)
+ goto out_unlock;
+ }
+
+ /*
+ * ...clear the security bits if the process is not being run
+ * by root. This keeps people from modifying setuid and setgid
+ * binaries.
+ */
+ ret = file_remove_privs(file_out);
+ if (ret)
+ goto out_unlock;
+ }
+
+ return 1;
+out_unlock:
+ xfs_reflink_remap_unlock(file_in, file_out);
+ return ret;
+}
+
+/*
+ * Link a range of blocks from one file to another.
+ */
+int
+xfs_reflink_remap_range(
+ struct file *file_in,
+ loff_t pos_in,
+ struct file *file_out,
+ loff_t pos_out,
+ u64 len,
+ bool is_dedupe)
+{
+ struct inode *inode_in = file_inode(file_in);
+ struct xfs_inode *src = XFS_I(inode_in);
+ struct inode *inode_out = file_inode(file_out);
+ struct xfs_inode *dest = XFS_I(inode_out);
+ struct xfs_mount *mp = src->i_mount;
+ xfs_fileoff_t sfsbno, dfsbno;
+ xfs_filblks_t fsblen;
+ xfs_extlen_t cowextsize;
+ ssize_t ret;
+
+ if (!xfs_sb_version_hasreflink(&mp->m_sb))
+ return -EOPNOTSUPP;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ /* Prepare and then clone file data. */
+ ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
+ &len, is_dedupe);
+ if (ret <= 0)
+ return ret;
+
+ trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
dfsbno = XFS_B_TO_FSBT(mp, pos_out);
sfsbno = XFS_B_TO_FSBT(mp, pos_in);
fsblen = XFS_B_TO_FSB(mp, len);
@@ -1280,10 +1444,6 @@ xfs_reflink_remap_range(
if (ret)
goto out_unlock;
- /* Zap any page cache for the destination file's range. */
- truncate_inode_pages_range(&inode_out->i_data, pos_out,
- PAGE_ALIGN(pos_out + len) - 1);
-
/*
* Carry the cowextsize hint from src to dest if we're sharing the
* entire source file to the entire destination file, the source file
@@ -1300,12 +1460,7 @@ xfs_reflink_remap_range(
is_dedupe);
out_unlock:
- xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
- if (!same_inode)
- xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
- inode_unlock(inode_out);
- if (!same_inode)
- inode_unlock_shared(inode_in);
+ xfs_reflink_remap_unlock(file_in, file_out);
if (ret)
trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
return ret;
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index ad315e83bc02..3043e5ed6495 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -473,7 +473,6 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pin);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unpin_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
-DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index bedc5a5133a5..912b42f5fe4a 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -259,6 +259,14 @@ xfs_trans_alloc(
struct xfs_trans *tp;
int error;
+ /*
+ * Allocate the handle before we do our freeze accounting and setting up
+ * GFP_NOFS allocation context so that we avoid lockdep false positives
+ * by doing GFP_KERNEL allocations inside sb_start_intwrite().
+ */
+ tp = kmem_zone_zalloc(xfs_trans_zone,
+ (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
+
if (!(flags & XFS_TRANS_NO_WRITECOUNT))
sb_start_intwrite(mp->m_super);
@@ -270,8 +278,6 @@ xfs_trans_alloc(
mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
atomic_inc(&mp->m_active_trans);
- tp = kmem_zone_zalloc(xfs_trans_zone,
- (flags & XFS_TRANS_NOFS) ? KM_NOFS : KM_SLEEP);
tp->t_magic = XFS_TRANS_HEADER_MAGIC;
tp->t_flags = flags;
tp->t_mountp = mp;
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 15919f67a88f..286a287ac57a 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -322,49 +322,38 @@ xfs_trans_read_buf_map(
}
/*
- * Release the buffer bp which was previously acquired with one of the
- * xfs_trans_... buffer allocation routines if the buffer has not
- * been modified within this transaction. If the buffer is modified
- * within this transaction, do decrement the recursion count but do
- * not release the buffer even if the count goes to 0. If the buffer is not
- * modified within the transaction, decrement the recursion count and
- * release the buffer if the recursion count goes to 0.
+ * Release a buffer previously joined to the transaction. If the buffer is
+ * modified within this transaction, decrement the recursion count but do not
+ * release the buffer even if the count goes to 0. If the buffer is not modified
+ * within the transaction, decrement the recursion count and release the buffer
+ * if the recursion count goes to 0.
*
- * If the buffer is to be released and it was not modified before
- * this transaction began, then free the buf_log_item associated with it.
+ * If the buffer is to be released and it was not already dirty before this
+ * transaction began, then also free the buf_log_item associated with it.
*
- * If the transaction pointer is NULL, make this just a normal
- * brelse() call.
+ * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
*/
void
xfs_trans_brelse(
- xfs_trans_t *tp,
- xfs_buf_t *bp)
+ struct xfs_trans *tp,
+ struct xfs_buf *bp)
{
- struct xfs_buf_log_item *bip;
- int freed;
+ struct xfs_buf_log_item *bip = bp->b_log_item;
- /*
- * Default to a normal brelse() call if the tp is NULL.
- */
- if (tp == NULL) {
- ASSERT(bp->b_transp == NULL);
+ ASSERT(bp->b_transp == tp);
+
+ if (!tp) {
xfs_buf_relse(bp);
return;
}
- ASSERT(bp->b_transp == tp);
- bip = bp->b_log_item;
+ trace_xfs_trans_brelse(bip);
ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
- ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
- ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
- trace_xfs_trans_brelse(bip);
-
/*
- * If the release is just for a recursive lock,
- * then decrement the count and return.
+ * If the release is for a recursive lookup, then decrement the count
+ * and return.
*/
if (bip->bli_recur > 0) {
bip->bli_recur--;
@@ -372,64 +361,24 @@ xfs_trans_brelse(
}
/*
- * If the buffer is dirty within this transaction, we can't
+ * If the buffer is invalidated or dirty in this transaction, we can't
* release it until we commit.
*/
if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
return;
-
- /*
- * If the buffer has been invalidated, then we can't release
- * it until the transaction commits to disk unless it is re-dirtied
- * as part of this transaction. This prevents us from pulling
- * the item from the AIL before we should.
- */
if (bip->bli_flags & XFS_BLI_STALE)
return;
- ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
-
/*
- * Free up the log item descriptor tracking the released item.
+ * Unlink the log item from the transaction and clear the hold flag, if
+ * set. We wouldn't want the next user of the buffer to get confused.
*/
+ ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
xfs_trans_del_item(&bip->bli_item);
+ bip->bli_flags &= ~XFS_BLI_HOLD;
- /*
- * Clear the hold flag in the buf log item if it is set.
- * We wouldn't want the next user of the buffer to
- * get confused.
- */
- if (bip->bli_flags & XFS_BLI_HOLD) {
- bip->bli_flags &= ~XFS_BLI_HOLD;
- }
-
- /*
- * Drop our reference to the buf log item.
- */
- freed = atomic_dec_and_test(&bip->bli_refcount);
-
- /*
- * If the buf item is not tracking data in the log, then we must free it
- * before releasing the buffer back to the free pool.
- *
- * If the fs has shutdown and we dropped the last reference, it may fall
- * on us to release a (possibly dirty) bli if it never made it to the
- * AIL (e.g., the aborted unpin already happened and didn't release it
- * due to our reference). Since we're already shutdown and need
- * ail_lock, just force remove from the AIL and release the bli here.
- */
- if (XFS_FORCED_SHUTDOWN(tp->t_mountp) && freed) {
- xfs_trans_ail_remove(&bip->bli_item, SHUTDOWN_LOG_IO_ERROR);
- xfs_buf_item_relse(bp);
- } else if (!(bip->bli_flags & XFS_BLI_DIRTY)) {
-/***
- ASSERT(bp->b_pincount == 0);
-***/
- ASSERT(atomic_read(&bip->bli_refcount) == 0);
- ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
- ASSERT(!(bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF));
- xfs_buf_item_relse(bp);
- }
+ /* drop the reference to the bli */
+ xfs_buf_item_put(bip);
bp->b_transp = NULL;
xfs_buf_relse(bp);
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 66d1d45fa2e1..d356f802945a 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1026,7 +1026,8 @@ static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
#define ioport_map ioport_map
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
- return PCI_IOBASE + (port & MMIO_UPPER_LIMIT);
+ port &= IO_SPACE_LIMIT;
+ return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
}
#endif
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index b3353e21f3b3..6be86c1c5c58 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -20,6 +20,8 @@
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#ifdef CONFIG_MMU
+
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
/*
* Semi RCU freeing of the page directories.
@@ -97,12 +99,30 @@ struct mmu_gather {
#endif
unsigned long start;
unsigned long end;
- /* we are in the middle of an operation to clear
- * a full mm and can make some optimizations */
- unsigned int fullmm : 1,
- /* we have performed an operation which
- * requires a complete flush of the tlb */
- need_flush_all : 1;
+ /*
+ * we are in the middle of an operation to clear
+ * a full mm and can make some optimizations
+ */
+ unsigned int fullmm : 1;
+
+ /*
+ * we have performed an operation which
+ * requires a complete flush of the tlb
+ */
+ unsigned int need_flush_all : 1;
+
+ /*
+ * we have removed page directories
+ */
+ unsigned int freed_tables : 1;
+
+ /*
+ * at which levels have we cleared entries?
+ */
+ unsigned int cleared_ptes : 1;
+ unsigned int cleared_pmds : 1;
+ unsigned int cleared_puds : 1;
+ unsigned int cleared_p4ds : 1;
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
@@ -118,6 +138,7 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force);
+void tlb_flush_mmu_free(struct mmu_gather *tlb);
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
int page_size);
@@ -137,6 +158,11 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->start = TASK_SIZE;
tlb->end = 0;
}
+ tlb->freed_tables = 0;
+ tlb->cleared_ptes = 0;
+ tlb->cleared_pmds = 0;
+ tlb->cleared_puds = 0;
+ tlb->cleared_p4ds = 0;
}
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -186,6 +212,25 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
}
#endif
+static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
+{
+ if (tlb->cleared_ptes)
+ return PAGE_SHIFT;
+ if (tlb->cleared_pmds)
+ return PMD_SHIFT;
+ if (tlb->cleared_puds)
+ return PUD_SHIFT;
+ if (tlb->cleared_p4ds)
+ return P4D_SHIFT;
+
+ return PAGE_SHIFT;
+}
+
+static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
+{
+ return 1UL << tlb_get_unmap_shift(tlb);
+}
+
/*
* In the case of tlb vma handling, we can optimise these away in the
* case where we're doing a full MM flush. When we're doing a munmap,
@@ -219,13 +264,19 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_tlb_entry(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->cleared_ptes = 1; \
__tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
- do { \
- __tlb_adjust_range(tlb, address, huge_page_size(h)); \
- __tlb_remove_tlb_entry(tlb, ptep, address); \
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
+ do { \
+ unsigned long _sz = huge_page_size(h); \
+ __tlb_adjust_range(tlb, address, _sz); \
+ if (_sz == PMD_SIZE) \
+ tlb->cleared_pmds = 1; \
+ else if (_sz == PUD_SIZE) \
+ tlb->cleared_puds = 1; \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
} while (0)
/**
@@ -239,6 +290,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
+ tlb->cleared_pmds = 1; \
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
} while (0)
@@ -253,6 +305,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
+ tlb->cleared_puds = 1; \
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
} while (0)
@@ -278,6 +331,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pte_free_tlb(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ tlb->cleared_pmds = 1; \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
#endif
@@ -285,7 +340,9 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ tlb->cleared_puds = 1; \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
#endif
@@ -295,6 +352,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define pud_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
+ tlb->cleared_p4ds = 1; \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
@@ -304,12 +363,15 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \
do { \
- __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ __tlb_adjust_range(tlb, address, PAGE_SIZE); \
+ tlb->freed_tables = 1; \
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
#endif
+#endif /* CONFIG_MMU */
+
#define tlb_migrate_finish(mm) do {} while (0)
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 7b75ff6e2fce..d7701d466b60 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -68,7 +68,7 @@
*/
#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
-#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
+#define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
#define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
#define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
@@ -613,8 +613,8 @@
#define EXIT_DATA \
*(.exit.data .exit.data.*) \
- *(.fini_array) \
- *(.dtors) \
+ *(.fini_array .fini_array.*) \
+ *(.dtors .dtors.*) \
MEM_DISCARD(exit.data*) \
MEM_DISCARD(exit.rodata*)
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index da9d95a19580..1e713154f00e 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -153,6 +153,17 @@ struct __drm_planes_state {
struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state, *old_state, *new_state;
+
+ /**
+ * @commit:
+ *
+ * A reference to the CRTC commit object that is kept for use by
+ * drm_atomic_helper_wait_for_flip_done() after
+ * drm_atomic_helper_commit_hw_done() is called. This ensures that a
+ * concurrent commit won't free a commit object that is still in use.
+ */
+ struct drm_crtc_commit *commit;
+
s32 __user *out_fence_ptr;
u64 last_vblank_count;
};
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 989f8e52864d..971bb7853776 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -87,9 +87,10 @@ struct drm_client_dev {
struct drm_file *file;
};
-int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
- const char *name, const struct drm_client_funcs *funcs);
+int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs);
void drm_client_release(struct drm_client_dev *client);
+void drm_client_add(struct drm_client_dev *client);
void drm_client_dev_unregister(struct drm_device *dev);
void drm_client_dev_hotplug(struct drm_device *dev);
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 46a8009784df..152b3055e9e1 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -675,7 +675,7 @@ static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
{
return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
- dev->mode_config.funcs->atomic_commit != NULL;
+ (dev->mode_config.funcs && dev->mode_config.funcs->atomic_commit != NULL);
}
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b25d12ef120a..e3c404833115 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -214,9 +214,9 @@ struct detailed_timing {
#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
/* YCBCR 420 deep color modes */
-#define DRM_EDID_YCBCR420_DC_48 (1 << 6)
-#define DRM_EDID_YCBCR420_DC_36 (1 << 5)
-#define DRM_EDID_YCBCR420_DC_30 (1 << 4)
+#define DRM_EDID_YCBCR420_DC_48 (1 << 2)
+#define DRM_EDID_YCBCR420_DC_36 (1 << 1)
+#define DRM_EDID_YCBCR420_DC_30 (1 << 0)
#define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \
DRM_EDID_YCBCR420_DC_36 | \
DRM_EDID_YCBCR420_DC_30)
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 582a0ec0aa70..777814755fa6 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -89,7 +89,6 @@ struct drm_panel {
struct drm_device *drm;
struct drm_connector *connector;
struct device *dev;
- struct device_link *link;
const struct drm_panel_funcs *funcs;
diff --git a/include/linux/amifd.h b/include/linux/amifd.h
deleted file mode 100644
index 202a77dbe46d..000000000000
--- a/include/linux/amifd.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _AMIFD_H
-#define _AMIFD_H
-
-/* Definitions for the Amiga floppy driver */
-
-#include <linux/fd.h>
-
-#define FD_MAX_UNITS 4 /* Max. Number of drives */
-#define FLOPPY_MAX_SECTORS 22 /* Max. Number of sectors per track */
-
-#ifndef ASSEMBLER
-
-struct fd_data_type {
- char *name; /* description of data type */
- int sects; /* sectors per track */
-#ifdef __STDC__
- int (*read_fkt)(int);
- void (*write_fkt)(int);
-#else
- int (*read_fkt)(); /* read whole track */
- void (*write_fkt)(); /* write whole track */
-#endif
-};
-
-/*
-** Floppy type descriptions
-*/
-
-struct fd_drive_type {
- unsigned long code; /* code returned from drive */
- char *name; /* description of drive */
- unsigned int tracks; /* number of tracks */
- unsigned int heads; /* number of heads */
- unsigned int read_size; /* raw read size for one track */
- unsigned int write_size; /* raw write size for one track */
- unsigned int sect_mult; /* sectors and gap multiplier (HD = 2) */
- unsigned int precomp1; /* start track for precomp 1 */
- unsigned int precomp2; /* start track for precomp 2 */
- unsigned int step_delay; /* time (in ms) for delay after step */
- unsigned int settle_time; /* time to settle after dir change */
- unsigned int side_time; /* time needed to change sides */
-};
-
-struct amiga_floppy_struct {
- struct fd_drive_type *type; /* type of floppy for this unit */
- struct fd_data_type *dtype; /* type of floppy for this unit */
- int track; /* current track (-1 == unknown) */
- unsigned char *trackbuf; /* current track (kmaloc()'d */
-
- int blocks; /* total # blocks on disk */
-
- int changed; /* true when not known */
- int disk; /* disk in drive (-1 == unknown) */
- int motor; /* true when motor is at speed */
- int busy; /* true when drive is active */
- int dirty; /* true when trackbuf is not on disk */
- int status; /* current error code for unit */
- struct gendisk *gendisk;
-};
-#endif
-
-#endif
diff --git a/include/linux/amifdreg.h b/include/linux/amifdreg.h
deleted file mode 100644
index 9b514d05ec70..000000000000
--- a/include/linux/amifdreg.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_AMIFDREG_H
-#define _LINUX_AMIFDREG_H
-
-/*
-** CIAAPRA bits (read only)
-*/
-
-#define DSKRDY (0x1<<5) /* disk ready when low */
-#define DSKTRACK0 (0x1<<4) /* head at track zero when low */
-#define DSKPROT (0x1<<3) /* disk protected when low */
-#define DSKCHANGE (0x1<<2) /* low when disk removed */
-
-/*
-** CIAAPRB bits (read/write)
-*/
-
-#define DSKMOTOR (0x1<<7) /* motor on when low */
-#define DSKSEL3 (0x1<<6) /* select drive 3 when low */
-#define DSKSEL2 (0x1<<5) /* select drive 2 when low */
-#define DSKSEL1 (0x1<<4) /* select drive 1 when low */
-#define DSKSEL0 (0x1<<3) /* select drive 0 when low */
-#define DSKSIDE (0x1<<2) /* side selection: 0 = upper, 1 = lower */
-#define DSKDIREC (0x1<<1) /* step direction: 0=in, 1=out (to trk 0) */
-#define DSKSTEP (0x1) /* pulse low to step head 1 track */
-
-/*
-** DSKBYTR bits (read only)
-*/
-
-#define DSKBYT (1<<15) /* register contains valid byte when set */
-#define DMAON (1<<14) /* disk DMA enabled */
-#define DISKWRITE (1<<13) /* disk write bit in DSKLEN enabled */
-#define WORDEQUAL (1<<12) /* DSKSYNC register match when true */
-/* bits 7-0 are data */
-
-/*
-** ADKCON/ADKCONR bits
-*/
-
-#ifndef SETCLR
-#define ADK_SETCLR (1<<15) /* control bit */
-#endif
-#define ADK_PRECOMP1 (1<<14) /* precompensation selection */
-#define ADK_PRECOMP0 (1<<13) /* 00=none, 01=140ns, 10=280ns, 11=500ns */
-#define ADK_MFMPREC (1<<12) /* 0=GCR precomp., 1=MFM precomp. */
-#define ADK_WORDSYNC (1<<10) /* enable DSKSYNC auto DMA */
-#define ADK_MSBSYNC (1<<9) /* when 1, enable sync on MSbit (for GCR) */
-#define ADK_FAST (1<<8) /* bit cell: 0=2us (GCR), 1=1us (MFM) */
-
-/*
-** DSKLEN bits
-*/
-
-#define DSKLEN_DMAEN (1<<15)
-#define DSKLEN_WRITE (1<<14)
-
-/*
-** INTENA/INTREQ bits
-*/
-
-#define DSKINDEX (0x1<<4) /* DSKINDEX bit */
-
-/*
-** Misc
-*/
-
-#define MFM_SYNC 0x4489 /* standard MFM sync value */
-
-/* Values for FD_COMMAND */
-#define FD_RECALIBRATE 0x07 /* move to track 0 */
-#define FD_SEEK 0x0F /* seek track */
-#define FD_READ 0xE6 /* read with MT, MFM, SKip deleted */
-#define FD_WRITE 0xC5 /* write with MT, MFM */
-#define FD_SENSEI 0x08 /* Sense Interrupt Status */
-#define FD_SPECIFY 0x03 /* specify HUT etc */
-#define FD_FORMAT 0x4D /* format one track */
-#define FD_VERSION 0x10 /* get version code */
-#define FD_CONFIGURE 0x13 /* configure FIFO operation */
-#define FD_PERPENDICULAR 0x12 /* perpendicular r/w mode */
-
-#endif /* _LINUX_AMIFDREG_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 51371740d2a8..b47c7f716731 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -21,12 +21,8 @@
#include <linux/highmem.h>
#include <linux/mempool.h>
#include <linux/ioprio.h>
-#include <linux/bug.h>
#ifdef CONFIG_BLOCK
-
-#include <asm/io.h>
-
/* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
#include <linux/blk_types.h>
@@ -133,32 +129,6 @@ static inline bool bio_full(struct bio *bio)
}
/*
- * will die
- */
-#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
-
-/*
- * merge helpers etc
- */
-
-/* Default implementation of BIOVEC_PHYS_MERGEABLE */
-#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
-
-/*
- * allow arch override, for eg virtualized architectures (put in asm/io.h)
- */
-#ifndef BIOVEC_PHYS_MERGEABLE
-#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
- __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
-#endif
-
-#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
- (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
-#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
- __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
-
-/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
@@ -170,27 +140,11 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
{
iter->bi_sector += bytes >> 9;
- if (bio_no_advance_iter(bio)) {
+ if (bio_no_advance_iter(bio))
iter->bi_size -= bytes;
- iter->bi_done += bytes;
- } else {
+ else
bvec_iter_advance(bio->bi_io_vec, iter, bytes);
/* TODO: It is reasonable to complete bio with error here. */
- }
-}
-
-static inline bool bio_rewind_iter(struct bio *bio, struct bvec_iter *iter,
- unsigned int bytes)
-{
- iter->bi_sector -= bytes >> 9;
-
- if (bio_no_advance_iter(bio)) {
- iter->bi_size += bytes;
- iter->bi_done -= bytes;
- return true;
- }
-
- return bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
}
#define __bio_for_each_segment(bvl, bio, iter, start) \
@@ -353,6 +307,8 @@ struct bio_integrity_payload {
unsigned short bip_max_vcnt; /* integrity bio_vec slots */
unsigned short bip_flags; /* control flags */
+ struct bvec_iter bio_iter; /* for rewinding parent bio */
+
struct work_struct bip_work; /* I/O completion */
struct bio_vec *bip_vec;
@@ -547,23 +503,31 @@ do { \
disk_devt((bio)->bi_disk)
#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
+int bio_associate_blkg_from_page(struct bio *bio, struct page *page);
#else
-static inline int bio_associate_blkcg_from_page(struct bio *bio,
- struct page *page) { return 0; }
+static inline int bio_associate_blkg_from_page(struct bio *bio,
+ struct page *page) { return 0; }
#endif
#ifdef CONFIG_BLK_CGROUP
-int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
+int bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css);
+int bio_associate_create_blkg(struct request_queue *q, struct bio *bio);
+int bio_reassociate_blkg(struct request_queue *q, struct bio *bio);
void bio_disassociate_task(struct bio *bio);
-void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
+void bio_clone_blkg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
-static inline int bio_associate_blkcg(struct bio *bio,
- struct cgroup_subsys_state *blkcg_css) { return 0; }
+static inline int bio_associate_blkg_from_css(struct bio *bio,
+ struct cgroup_subsys_state *css)
+{ return 0; }
+static inline int bio_associate_create_blkg(struct request_queue *q,
+ struct bio *bio) { return 0; }
+static inline int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
+{ return 0; }
static inline void bio_disassociate_task(struct bio *bio) { }
-static inline void bio_clone_blkcg_association(struct bio *dst,
- struct bio *src) { }
+static inline void bio_clone_blkg_association(struct bio *dst,
+ struct bio *src) { }
#endif /* CONFIG_BLK_CGROUP */
#ifdef CONFIG_HIGHMEM
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 6d766a19f2bb..1e76ceebeb5d 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -126,7 +126,7 @@ struct blkcg_gq {
struct request_list rl;
/* reference count */
- atomic_t refcnt;
+ struct percpu_ref refcnt;
/* is this blkg online? protected by both blkcg and q locks */
bool online;
@@ -184,6 +184,8 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q);
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
@@ -230,22 +232,59 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
char *input, struct blkg_conf_ctx *ctx);
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
+/**
+ * blkcg_css - find the current css
+ *
+ * Find the css associated with either the kthread or the current task.
+ * This may return a dying css, so it is up to the caller to use tryget logic
+ * to confirm it is alive and well.
+ */
+static inline struct cgroup_subsys_state *blkcg_css(void)
+{
+ struct cgroup_subsys_state *css;
+
+ css = kthread_blkcg();
+ if (css)
+ return css;
+ return task_css(current, io_cgrp_id);
+}
static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
return css ? container_of(css, struct blkcg, css) : NULL;
}
-static inline struct blkcg *bio_blkcg(struct bio *bio)
+/**
+ * __bio_blkcg - internal version of bio_blkcg for bfq and cfq
+ *
+ * DO NOT USE.
+ * There is a flaw using this version of the function. In particular, this was
+ * used in a broken paradigm where association was called on the given css. It
+ * is possible though that the returned css from task_css() is in the process
+ * of dying due to migration of the current task. So it is improper to assume
+ * *_get() is going to succeed. Both BFQ and CFQ rely on this logic and will
+ * take additional work to handle more gracefully.
+ */
+static inline struct blkcg *__bio_blkcg(struct bio *bio)
{
- struct cgroup_subsys_state *css;
+ if (bio && bio->bi_blkg)
+ return bio->bi_blkg->blkcg;
+ return css_to_blkcg(blkcg_css());
+}
- if (bio && bio->bi_css)
- return css_to_blkcg(bio->bi_css);
- css = kthread_blkcg();
- if (css)
- return css_to_blkcg(css);
- return css_to_blkcg(task_css(current, io_cgrp_id));
+/**
+ * bio_blkcg - grab the blkcg associated with a bio
+ * @bio: target bio
+ *
+ * This returns the blkcg associated with a bio, NULL if not associated.
+ * Callers are expected to either handle NULL or know association has been
+ * done prior to calling this.
+ */
+static inline struct blkcg *bio_blkcg(struct bio *bio)
+{
+ if (bio && bio->bi_blkg)
+ return bio->bi_blkg->blkcg;
+ return NULL;
}
static inline bool blk_cgroup_congested(void)
@@ -451,26 +490,35 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
*/
static inline void blkg_get(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- atomic_inc(&blkg->refcnt);
+ percpu_ref_get(&blkg->refcnt);
}
/**
- * blkg_try_get - try and get a blkg reference
+ * blkg_tryget - try and get a blkg reference
* @blkg: blkg to get
*
* This is for use when doing an RCU lookup of the blkg. We may be in the midst
* of freeing this blkg, so we can only use it if the refcnt is not zero.
*/
-static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
+static inline bool blkg_tryget(struct blkcg_gq *blkg)
{
- if (atomic_inc_not_zero(&blkg->refcnt))
- return blkg;
- return NULL;
+ return percpu_ref_tryget(&blkg->refcnt);
}
+/**
+ * blkg_tryget_closest - try and get a blkg ref on the closet blkg
+ * @blkg: blkg to get
+ *
+ * This walks up the blkg tree to find the closest non-dying blkg and returns
+ * the blkg that it did association with as it may not be the passed in blkg.
+ */
+static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
+{
+ while (!percpu_ref_tryget(&blkg->refcnt))
+ blkg = blkg->parent;
-void __blkg_release_rcu(struct rcu_head *rcu);
+ return blkg;
+}
/**
* blkg_put - put a blkg reference
@@ -478,9 +526,7 @@ void __blkg_release_rcu(struct rcu_head *rcu);
*/
static inline void blkg_put(struct blkcg_gq *blkg)
{
- WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
- if (atomic_dec_and_test(&blkg->refcnt))
- call_rcu(&blkg->rcu_head, __blkg_release_rcu);
+ percpu_ref_put(&blkg->refcnt);
}
/**
@@ -533,25 +579,36 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
rcu_read_lock();
- blkcg = bio_blkcg(bio);
+ if (bio && bio->bi_blkg) {
+ blkcg = bio->bi_blkg->blkcg;
+ if (blkcg == &blkcg_root)
+ goto rl_use_root;
+
+ blkg_get(bio->bi_blkg);
+ rcu_read_unlock();
+ return &bio->bi_blkg->rl;
+ }
- /* bypass blkg lookup and use @q->root_rl directly for root */
+ blkcg = css_to_blkcg(blkcg_css());
if (blkcg == &blkcg_root)
- goto root_rl;
+ goto rl_use_root;
- /*
- * Try to use blkg->rl. blkg lookup may fail under memory pressure
- * or if either the blkcg or queue is going away. Fall back to
- * root_rl in such cases.
- */
blkg = blkg_lookup(blkcg, q);
if (unlikely(!blkg))
- goto root_rl;
+ blkg = __blkg_lookup_create(blkcg, q);
+
+ if (blkg->blkcg == &blkcg_root || !blkg_tryget(blkg))
+ goto rl_use_root;
- blkg_get(blkg);
rcu_read_unlock();
return &blkg->rl;
-root_rl:
+
+ /*
+ * Each blkg has its own request_list, however, the root blkcg
+ * uses the request_queue's root_rl. This is to avoid most
+ * overhead for the root blkcg.
+ */
+rl_use_root:
rcu_read_unlock();
return &q->root_rl;
}
@@ -797,32 +854,26 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
struct bio *bio) { return false; }
#endif
+
+static inline void blkcg_bio_issue_init(struct bio *bio)
+{
+ bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+}
+
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio)
{
- struct blkcg *blkcg;
struct blkcg_gq *blkg;
bool throtl = false;
rcu_read_lock();
- blkcg = bio_blkcg(bio);
-
- /* associate blkcg if bio hasn't attached one */
- bio_associate_blkcg(bio, &blkcg->css);
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- spin_unlock_irq(q->queue_lock);
- }
+ bio_associate_create_blkg(q, bio);
+ blkg = bio->bi_blkg;
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
- blkg = blkg ?: q->root_blkg;
/*
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
* is a split bio and we would have already accounted for the
@@ -834,6 +885,8 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
+ blkcg_bio_issue_init(bio);
+
rcu_read_unlock();
return !throtl;
}
@@ -930,6 +983,7 @@ static inline int blkcg_activate_policy(struct request_queue *q,
static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { }
+static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
@@ -945,6 +999,7 @@ static inline void blk_put_rl(struct request_list *rl) { }
static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
+static inline void blkcg_bio_issue_init(struct bio *bio) { }
static inline bool blkcg_bio_issue_check(struct request_queue *q,
struct bio *bio) { return true; }
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 1da59c16f637..2286dc12c6bc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -203,6 +203,10 @@ enum {
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q);
+struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
+ const struct blk_mq_ops *ops,
+ unsigned int queue_depth,
+ unsigned int set_flags);
int blk_mq_register_dev(struct device *, struct request_queue *);
void blk_mq_unregister_dev(struct device *, struct request_queue *);
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
new file mode 100644
index 000000000000..b80c65aba249
--- /dev/null
+++ b/include/linux/blk-pm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _BLK_PM_H_
+#define _BLK_PM_H_
+
+struct device;
+struct request_queue;
+
+/*
+ * block layer runtime pm functions
+ */
+#ifdef CONFIG_PM
+extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
+extern int blk_pre_runtime_suspend(struct request_queue *q);
+extern void blk_post_runtime_suspend(struct request_queue *q, int err);
+extern void blk_pre_runtime_resume(struct request_queue *q);
+extern void blk_post_runtime_resume(struct request_queue *q, int err);
+extern void blk_set_runtime_active(struct request_queue *q);
+#else
+static inline void blk_pm_runtime_init(struct request_queue *q,
+ struct device *dev) {}
+#endif
+
+#endif /* _BLK_PM_H_ */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index f6dfb30737d8..9578c7ab1eb6 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -178,7 +178,6 @@ struct bio {
* release. Read comment on top of bio_associate_current().
*/
struct io_context *bi_ioc;
- struct cgroup_subsys_state *bi_css;
struct blkcg_gq *bi_blkg;
struct bio_issue bi_issue;
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index d6869e0e2b64..61207560e826 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -54,7 +54,7 @@ struct blk_stat_callback;
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 3
+#define BLKCG_MAX_POLS 5
typedef void (rq_end_io_fn)(struct request *, blk_status_t);
@@ -108,7 +108,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_QUIET ((__force req_flags_t)(1 << 11))
/* elevator private data attached */
#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
-/* account I/O stat */
+/* account into disk and partition IO statistics */
#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
/* request came from our alloc pool */
#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
@@ -116,7 +116,7 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_PM ((__force req_flags_t)(1 << 15))
/* on IO scheduler merge hash */
#define RQF_HASHED ((__force req_flags_t)(1 << 16))
-/* IO stats tracking on */
+/* track IO completion time */
#define RQF_STATS ((__force req_flags_t)(1 << 17))
/* Look at ->special_vec for the actual data payload instead of the
bio chain. */
@@ -504,6 +504,12 @@ struct request_queue {
* various queue flags, see QUEUE_* below
*/
unsigned long queue_flags;
+ /*
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
+ * processed.
+ */
+ atomic_t pm_only;
/*
* ida allocated id for this queue. Used to index queues from
@@ -679,7 +685,7 @@ struct request_queue {
#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
-#define QUEUE_FLAG_IO_STAT 10 /* do IO stats */
+#define QUEUE_FLAG_IO_STAT 10 /* do disk/partitions IO accounting */
#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */
@@ -693,12 +699,11 @@ struct request_queue {
#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
#define QUEUE_FLAG_DAX 23 /* device supports DAX */
-#define QUEUE_FLAG_STATS 24 /* track rq completion times */
+#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
-#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
@@ -736,12 +741,11 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
-#define blk_queue_preempt_only(q) \
- test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
-extern int blk_set_preempt_only(struct request_queue *q);
-extern void blk_clear_preempt_only(struct request_queue *q);
+extern void blk_set_pm_only(struct request_queue *q);
+extern void blk_clear_pm_only(struct request_queue *q);
static inline int queue_in_flight(struct request_queue *q)
{
@@ -1281,29 +1285,6 @@ extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
/*
- * block layer runtime pm functions
- */
-#ifdef CONFIG_PM
-extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
-extern int blk_pre_runtime_suspend(struct request_queue *q);
-extern void blk_post_runtime_suspend(struct request_queue *q, int err);
-extern void blk_pre_runtime_resume(struct request_queue *q);
-extern void blk_post_runtime_resume(struct request_queue *q, int err);
-extern void blk_set_runtime_active(struct request_queue *q);
-#else
-static inline void blk_pm_runtime_init(struct request_queue *q,
- struct device *dev) {}
-static inline int blk_pre_runtime_suspend(struct request_queue *q)
-{
- return -ENOSYS;
-}
-static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
-static inline void blk_pre_runtime_resume(struct request_queue *q) {}
-static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
-static inline void blk_set_runtime_active(struct request_queue *q) {}
-#endif
-
-/*
* blk_plug permits building a queue of related requests by holding the I/O
* fragments for a short period. This allows merging of sequential requests
* into single larger request. As the requests are moved from a per-task list to
@@ -1676,94 +1657,6 @@ static inline void put_dev_sector(Sector p)
put_page(p.v);
}
-static inline bool __bvec_gap_to_prev(struct request_queue *q,
- struct bio_vec *bprv, unsigned int offset)
-{
- return offset ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
-}
-
-/*
- * Check if adding a bio_vec after bprv with offset would create a gap in
- * the SG list. Most drivers don't care about this, but some do.
- */
-static inline bool bvec_gap_to_prev(struct request_queue *q,
- struct bio_vec *bprv, unsigned int offset)
-{
- if (!queue_virt_boundary(q))
- return false;
- return __bvec_gap_to_prev(q, bprv, offset);
-}
-
-/*
- * Check if the two bvecs from two bios can be merged to one segment.
- * If yes, no need to check gap between the two bios since the 1st bio
- * and the 1st bvec in the 2nd bio can be handled in one segment.
- */
-static inline bool bios_segs_mergeable(struct request_queue *q,
- struct bio *prev, struct bio_vec *prev_last_bv,
- struct bio_vec *next_first_bv)
-{
- if (!BIOVEC_PHYS_MERGEABLE(prev_last_bv, next_first_bv))
- return false;
- if (!BIOVEC_SEG_BOUNDARY(q, prev_last_bv, next_first_bv))
- return false;
- if (prev->bi_seg_back_size + next_first_bv->bv_len >
- queue_max_segment_size(q))
- return false;
- return true;
-}
-
-static inline bool bio_will_gap(struct request_queue *q,
- struct request *prev_rq,
- struct bio *prev,
- struct bio *next)
-{
- if (bio_has_data(prev) && queue_virt_boundary(q)) {
- struct bio_vec pb, nb;
-
- /*
- * don't merge if the 1st bio starts with non-zero
- * offset, otherwise it is quite difficult to respect
- * sg gap limit. We work hard to merge a huge number of small
- * single bios in case of mkfs.
- */
- if (prev_rq)
- bio_get_first_bvec(prev_rq->bio, &pb);
- else
- bio_get_first_bvec(prev, &pb);
- if (pb.bv_offset)
- return true;
-
- /*
- * We don't need to worry about the situation that the
- * merged segment ends in unaligned virt boundary:
- *
- * - if 'pb' ends aligned, the merged segment ends aligned
- * - if 'pb' ends unaligned, the next bio must include
- * one single bvec of 'nb', otherwise the 'nb' can't
- * merge with 'pb'
- */
- bio_get_last_bvec(prev, &pb);
- bio_get_first_bvec(next, &nb);
-
- if (!bios_segs_mergeable(q, prev, &pb, &nb))
- return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
- }
-
- return false;
-}
-
-static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
-{
- return bio_will_gap(req->q, req, req->biotail, bio);
-}
-
-static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
-{
- return bio_will_gap(req->q, NULL, bio, req->bio);
-}
-
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
@@ -1843,26 +1736,6 @@ queue_max_integrity_segments(struct request_queue *q)
return q->limits.max_integrity_segments;
}
-static inline bool integrity_req_gap_back_merge(struct request *req,
- struct bio *next)
-{
- struct bio_integrity_payload *bip = bio_integrity(req->bio);
- struct bio_integrity_payload *bip_next = bio_integrity(next);
-
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
- bip_next->bip_vec[0].bv_offset);
-}
-
-static inline bool integrity_req_gap_front_merge(struct request *req,
- struct bio *bio)
-{
- struct bio_integrity_payload *bip = bio_integrity(bio);
- struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
-
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
- bip_next->bip_vec[0].bv_offset);
-}
-
/**
* bio_integrity_intervals - Return number of integrity intervals for a bio
* @bi: blk_integrity profile for device
@@ -1947,17 +1820,6 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
return true;
}
-static inline bool integrity_req_gap_back_merge(struct request *req,
- struct bio *next)
-{
- return false;
-}
-static inline bool integrity_req_gap_front_merge(struct request *req,
- struct bio *bio)
-{
- return false;
-}
-
static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
unsigned int sectors)
{
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index fe7a22dd133b..02c73c6aa805 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -40,8 +40,6 @@ struct bvec_iter {
unsigned int bi_idx; /* current index into bvl_vec */
- unsigned int bi_done; /* number of bytes completed */
-
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
};
@@ -85,7 +83,6 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
bytes -= len;
iter->bi_size -= len;
iter->bi_bvec_done += len;
- iter->bi_done += len;
if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
iter->bi_bvec_done = 0;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ff20b677fb9f..22254c1fe1c5 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -412,6 +412,7 @@ struct cgroup {
* specific task are charged to the dom_cgrp.
*/
struct cgroup *dom_cgrp;
+ struct cgroup *old_dom_cgrp; /* used while enabling threaded */
/* per-cpu recursive resource statistics */
struct cgroup_rstat_cpu __percpu *rstat_cpu;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 32c553556bbd..b8bcbdeb2eac 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -93,6 +93,8 @@ extern struct css_set init_css_set;
bool css_has_online_children(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
+ struct cgroup_subsys *ss);
struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 1a3c4f37e908..de0c13bdcd2c 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -103,6 +103,9 @@ typedef struct compat_sigaltstack {
compat_size_t ss_size;
} compat_stack_t;
#endif
+#ifndef COMPAT_MINSIGSTKSZ
+#define COMPAT_MINSIGSTKSZ MINSIGSTKSZ
+#endif
#define compat_jiffies_to_clock_t(x) \
(((unsigned long)(x) * COMPAT_USER_HZ) / HZ)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 763bbad1e258..4d36b27214fd 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -79,20 +79,6 @@
#define __noretpoline __attribute__((indirect_branch("keep")))
#endif
-/*
- * it doesn't make sense on ARM (currently the only user of __naked)
- * to trace naked functions because then mcount is called without
- * stack and frame pointer being set up and there is no chance to
- * restore the lr register to the value before mcount was called.
- *
- * The asm() bodies of naked functions often depend on standard calling
- * conventions, therefore they must be noinline and noclone.
- *
- * GCC 4.[56] currently fail to enforce this, so we must do so ourselves.
- * See GCC PR44290.
- */
-#define __naked __attribute__((naked)) noinline __noclone notrace
-
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
#define __optimize(level) __attribute__((__optimize__(level)))
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3525c179698c..db192becfec4 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -226,6 +226,14 @@ struct ftrace_likely_data {
#define notrace __attribute__((no_instrument_function))
#endif
+/*
+ * it doesn't make sense on ARM (currently the only user of __naked)
+ * to trace naked functions because then mcount is called without
+ * stack and frame pointer being set up and there is no chance to
+ * restore the lr register to the value before mcount was called.
+ */
+#define __naked __attribute__((naked)) notrace
+
#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
/*
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index a02deea30185..015bb59c0331 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -111,7 +111,7 @@ struct elevator_mq_ops {
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct request *);
+ void (*completed_request)(struct request *, u64);
void (*started_request)(struct request *);
void (*requeue_request)(struct request *);
struct request *(*former_request)(struct request_queue *, struct request *);
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 8942e61f0028..8ab5df769923 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -53,12 +53,20 @@ enum fpga_mgr_states {
FPGA_MGR_STATE_OPERATING,
};
-/*
- * FPGA Manager flags
- * FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
- * FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
- * FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
- * FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
+/**
+ * DOC: FPGA Manager flags
+ *
+ * Flags used in the &fpga_image_info->flags field
+ *
+ * %FPGA_MGR_PARTIAL_RECONFIG: do partial reconfiguration if supported
+ *
+ * %FPGA_MGR_EXTERNAL_CONFIG: FPGA has been configured prior to Linux booting
+ *
+ * %FPGA_MGR_ENCRYPTED_BITSTREAM: indicates bitstream is encrypted
+ *
+ * %FPGA_MGR_BITSTREAM_LSB_FIRST: SPI bitstream bit order is LSB first
+ *
+ * %FPGA_MGR_COMPRESSED_BITSTREAM: FPGA bitstream is compressed
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 33322702c910..897eae8faee1 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1763,6 +1763,7 @@ struct file_operations {
u64);
int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t,
u64);
+ int (*fadvise)(struct file *, loff_t, loff_t, int);
} __randomize_layout;
struct inode_operations {
@@ -1827,8 +1828,10 @@ extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
struct inode *inode_out, loff_t pos_out,
u64 *len, bool is_dedupe);
+extern int do_clone_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, u64 len);
extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out, u64 len);
+ struct file *file_out, loff_t pos_out, u64 len);
extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
struct inode *dest, loff_t destoff,
loff_t len, bool *is_same);
@@ -2772,19 +2775,6 @@ static inline void file_end_write(struct file *file)
__sb_end_write(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
-static inline int do_clone_file_range(struct file *file_in, loff_t pos_in,
- struct file *file_out, loff_t pos_out,
- u64 len)
-{
- int ret;
-
- file_start_write(file_out);
- ret = vfs_clone_file_range(file_in, pos_in, file_out, pos_out, len);
- file_end_write(file_out);
-
- return ret;
-}
-
/*
* get_write_access() gets write permission for a file.
* put_write_access() releases this write permission.
@@ -3459,4 +3449,8 @@ static inline bool dir_relax_shared(struct inode *inode)
extern bool path_noexec(const struct path *path);
extern void inode_nohighmem(struct inode *inode);
+/* mm/fadvise.c */
+extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice);
+
#endif /* _LINUX_FS_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 57864422a2c8..70fc838e6773 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -83,10 +83,10 @@ struct partition {
} __attribute__((packed));
struct disk_stats {
+ u64 nsecs[NR_STAT_GROUPS];
unsigned long sectors[NR_STAT_GROUPS];
unsigned long ios[NR_STAT_GROUPS];
unsigned long merges[NR_STAT_GROUPS];
- unsigned long ticks[NR_STAT_GROUPS];
unsigned long io_ticks;
unsigned long time_in_queue;
};
@@ -354,6 +354,9 @@ static inline void free_part_stats(struct hd_struct *part)
#endif /* CONFIG_SMP */
+#define part_stat_read_msecs(part, which) \
+ div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC)
+
#define part_stat_read_accum(part, field) \
(part_stat_read(part, field[STAT_READ]) + \
part_stat_read(part, field[STAT_WRITE]) + \
@@ -399,10 +402,11 @@ static inline void free_part_info(struct hd_struct *part)
extern void part_round_stats(struct request_queue *q, int cpu, struct hd_struct *part);
/* block/genhd.c */
-extern void device_add_disk(struct device *parent, struct gendisk *disk);
+extern void device_add_disk(struct device *parent, struct gendisk *disk,
+ const struct attribute_group **groups);
static inline void add_disk(struct gendisk *disk)
{
- device_add_disk(NULL, disk);
+ device_add_disk(NULL, disk, NULL);
}
extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk);
static inline void add_disk_no_queue_reg(struct gendisk *disk)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 0ea328e71ec9..a4d5eb37744a 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -95,6 +95,13 @@ struct gpio_irq_chip {
unsigned int num_parents;
/**
+ * @parent_irq:
+ *
+ * For use by gpiochip_set_cascaded_irqchip()
+ */
+ unsigned int parent_irq;
+
+ /**
* @parents:
*
* A list of interrupt parents of a GPIO chip. This is owned by the
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 834e6461a690..d44a78362942 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -526,6 +526,7 @@ struct hid_input {
const char *name;
bool registered;
struct list_head reports; /* the list of reports */
+ unsigned int application; /* application usage for this input */
};
enum hid_type {
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 99c19b06d9a4..fdcb45999b26 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -43,7 +43,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
+ pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6b68e345f0ca..087fd5f48c91 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
pte_t *huge_pte_offset(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pd(struct vm_area_struct *vma,
@@ -170,6 +172,18 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
+static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
+ pte_t *ptep)
+{
+ return 0;
+}
+
+static inline void adjust_range_if_pmd_sharing_possible(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 0205aee44ded..c926698040e0 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -733,8 +733,6 @@ bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible);
-void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
-void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
void kvm_flush_remote_tlbs(struct kvm *kvm);
void kvm_reload_remote_mmus(struct kvm *kvm);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index e9e0d1c7eaf5..2fdeac1a420d 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -86,8 +86,8 @@ struct nvm_chk_meta;
typedef int (nvm_id_fn)(struct nvm_dev *);
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
-typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, struct nvm_chk_meta *,
- sector_t, int);
+typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int,
+ struct nvm_chk_meta *);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
typedef int (nvm_submit_io_sync_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
@@ -305,6 +305,8 @@ struct nvm_rq {
u64 ppa_status; /* ppa media status */
int error;
+ int is_seq; /* Sequential hint flag. 1.2 only */
+
void *private;
};
@@ -318,6 +320,11 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
return rqdata + 1;
}
+static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd)
+{
+ return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr;
+}
+
enum {
NVM_BLK_ST_FREE = 0x1, /* Free block */
NVM_BLK_ST_TGT = 0x2, /* Block in use by target */
@@ -485,6 +492,144 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev,
return l;
}
+static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf,
+ struct ppa_addr p)
+{
+ struct nvm_geo *geo = &dev->geo;
+ u64 caddr;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf;
+
+ caddr = (u64)p.g.pg << ppaf->pg_offset;
+ caddr |= (u64)p.g.pl << ppaf->pln_offset;
+ caddr |= (u64)p.g.sec << ppaf->sec_offset;
+ } else {
+ caddr = p.m.sec;
+ }
+
+ return caddr;
+}
+
+static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev,
+ void *addrf, u32 ppa32)
+{
+ struct ppa_addr ppa64;
+
+ ppa64.ppa = 0;
+
+ if (ppa32 == -1) {
+ ppa64.ppa = ADDR_EMPTY;
+ } else if (ppa32 & (1U << 31)) {
+ ppa64.c.line = ppa32 & ((~0U) >> 1);
+ ppa64.c.is_cached = 1;
+ } else {
+ struct nvm_geo *geo = &dev->geo;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = addrf;
+
+ ppa64.g.ch = (ppa32 & ppaf->ch_mask) >>
+ ppaf->ch_offset;
+ ppa64.g.lun = (ppa32 & ppaf->lun_mask) >>
+ ppaf->lun_offset;
+ ppa64.g.blk = (ppa32 & ppaf->blk_mask) >>
+ ppaf->blk_offset;
+ ppa64.g.pg = (ppa32 & ppaf->pg_mask) >>
+ ppaf->pg_offset;
+ ppa64.g.pl = (ppa32 & ppaf->pln_mask) >>
+ ppaf->pln_offset;
+ ppa64.g.sec = (ppa32 & ppaf->sec_mask) >>
+ ppaf->sec_offset;
+ } else {
+ struct nvm_addrf *lbaf = addrf;
+
+ ppa64.m.grp = (ppa32 & lbaf->ch_mask) >>
+ lbaf->ch_offset;
+ ppa64.m.pu = (ppa32 & lbaf->lun_mask) >>
+ lbaf->lun_offset;
+ ppa64.m.chk = (ppa32 & lbaf->chk_mask) >>
+ lbaf->chk_offset;
+ ppa64.m.sec = (ppa32 & lbaf->sec_mask) >>
+ lbaf->sec_offset;
+ }
+ }
+
+ return ppa64;
+}
+
+static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev,
+ void *addrf, struct ppa_addr ppa64)
+{
+ u32 ppa32 = 0;
+
+ if (ppa64.ppa == ADDR_EMPTY) {
+ ppa32 = ~0U;
+ } else if (ppa64.c.is_cached) {
+ ppa32 |= ppa64.c.line;
+ ppa32 |= 1U << 31;
+ } else {
+ struct nvm_geo *geo = &dev->geo;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ struct nvm_addrf_12 *ppaf = addrf;
+
+ ppa32 |= ppa64.g.ch << ppaf->ch_offset;
+ ppa32 |= ppa64.g.lun << ppaf->lun_offset;
+ ppa32 |= ppa64.g.blk << ppaf->blk_offset;
+ ppa32 |= ppa64.g.pg << ppaf->pg_offset;
+ ppa32 |= ppa64.g.pl << ppaf->pln_offset;
+ ppa32 |= ppa64.g.sec << ppaf->sec_offset;
+ } else {
+ struct nvm_addrf *lbaf = addrf;
+
+ ppa32 |= ppa64.m.grp << lbaf->ch_offset;
+ ppa32 |= ppa64.m.pu << lbaf->lun_offset;
+ ppa32 |= ppa64.m.chk << lbaf->chk_offset;
+ ppa32 |= ppa64.m.sec << lbaf->sec_offset;
+ }
+ }
+
+ return ppa32;
+}
+
+static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev,
+ struct ppa_addr *ppa)
+{
+ struct nvm_geo *geo = &dev->geo;
+ int last = 0;
+
+ if (geo->version == NVM_OCSSD_SPEC_12) {
+ int sec = ppa->g.sec;
+
+ sec++;
+ if (sec == geo->ws_min) {
+ int pg = ppa->g.pg;
+
+ sec = 0;
+ pg++;
+ if (pg == geo->num_pg) {
+ int pl = ppa->g.pl;
+
+ pg = 0;
+ pl++;
+ if (pl == geo->num_pln)
+ last = 1;
+
+ ppa->g.pl = pl;
+ }
+ ppa->g.pg = pg;
+ }
+ ppa->g.sec = sec;
+ } else {
+ ppa->m.sec++;
+ if (ppa->m.sec == geo->clba)
+ last = 1;
+ }
+
+ return last;
+}
+
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
@@ -493,9 +638,15 @@ typedef void (nvm_tgt_exit_fn)(void *, bool);
typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
+enum {
+ NVM_TGT_F_DEV_L2P = 0,
+ NVM_TGT_F_HOST_L2P = 1 << 0,
+};
+
struct nvm_tgt_type {
const char *name;
unsigned int version[3];
+ int flags;
/* target entry points */
nvm_tgt_make_rq_fn *make_rq;
@@ -524,18 +675,13 @@ extern struct nvm_dev *nvm_alloc_dev(int);
extern int nvm_register(struct nvm_dev *);
extern void nvm_unregister(struct nvm_dev *);
-
-extern int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev,
- struct nvm_chk_meta *meta, struct ppa_addr ppa,
- int nchks);
-
-extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
+extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr,
+ int, struct nvm_chk_meta *);
+extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *,
int, int);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *);
extern void nvm_end_io(struct nvm_rq *);
-extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int);
-extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *);
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/mfd/da9063/pdata.h b/include/linux/mfd/da9063/pdata.h
index 8a125701ef7b..50bed4f89c1a 100644
--- a/include/linux/mfd/da9063/pdata.h
+++ b/include/linux/mfd/da9063/pdata.h
@@ -21,7 +21,7 @@
/*
* Regulator configuration
*/
-/* DA9063 regulator IDs */
+/* DA9063 and DA9063L regulator IDs */
enum {
/* BUCKs */
DA9063_ID_BCORE1,
@@ -37,18 +37,20 @@ enum {
DA9063_ID_BMEM_BIO_MERGED,
/* When two BUCKs are merged, they cannot be reused separately */
- /* LDOs */
+ /* LDOs on both DA9063 and DA9063L */
+ DA9063_ID_LDO3,
+ DA9063_ID_LDO7,
+ DA9063_ID_LDO8,
+ DA9063_ID_LDO9,
+ DA9063_ID_LDO11,
+
+ /* DA9063-only LDOs */
DA9063_ID_LDO1,
DA9063_ID_LDO2,
- DA9063_ID_LDO3,
DA9063_ID_LDO4,
DA9063_ID_LDO5,
DA9063_ID_LDO6,
- DA9063_ID_LDO7,
- DA9063_ID_LDO8,
- DA9063_ID_LDO9,
DA9063_ID_LDO10,
- DA9063_ID_LDO11,
};
/* Regulators platform data */
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
index a528747f8aed..e8338e5dc10b 100644
--- a/include/linux/mfd/rohm-bd718x7.h
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -78,9 +78,9 @@ enum {
BD71837_REG_TRANS_COND0 = 0x1F,
BD71837_REG_TRANS_COND1 = 0x20,
BD71837_REG_VRFAULTEN = 0x21,
- BD71837_REG_MVRFLTMASK0 = 0x22,
- BD71837_REG_MVRFLTMASK1 = 0x23,
- BD71837_REG_MVRFLTMASK2 = 0x24,
+ BD718XX_REG_MVRFLTMASK0 = 0x22,
+ BD718XX_REG_MVRFLTMASK1 = 0x23,
+ BD718XX_REG_MVRFLTMASK2 = 0x24,
BD71837_REG_RCVCFG = 0x25,
BD71837_REG_RCVNUM = 0x26,
BD71837_REG_PWRONCONFIG0 = 0x27,
@@ -159,6 +159,33 @@ enum {
#define BUCK8_MASK 0x3F
#define BUCK8_DEFAULT 0x1E
+/* BD718XX Voltage monitoring masks */
+#define BD718XX_BUCK1_VRMON80 0x1
+#define BD718XX_BUCK1_VRMON130 0x2
+#define BD718XX_BUCK2_VRMON80 0x4
+#define BD718XX_BUCK2_VRMON130 0x8
+#define BD718XX_1ST_NODVS_BUCK_VRMON80 0x1
+#define BD718XX_1ST_NODVS_BUCK_VRMON130 0x2
+#define BD718XX_2ND_NODVS_BUCK_VRMON80 0x4
+#define BD718XX_2ND_NODVS_BUCK_VRMON130 0x8
+#define BD718XX_3RD_NODVS_BUCK_VRMON80 0x10
+#define BD718XX_3RD_NODVS_BUCK_VRMON130 0x20
+#define BD718XX_4TH_NODVS_BUCK_VRMON80 0x40
+#define BD718XX_4TH_NODVS_BUCK_VRMON130 0x80
+#define BD718XX_LDO1_VRMON80 0x1
+#define BD718XX_LDO2_VRMON80 0x2
+#define BD718XX_LDO3_VRMON80 0x4
+#define BD718XX_LDO4_VRMON80 0x8
+#define BD718XX_LDO5_VRMON80 0x10
+#define BD718XX_LDO6_VRMON80 0x20
+
+/* BD71837 specific voltage monitoring masks */
+#define BD71837_BUCK3_VRMON80 0x10
+#define BD71837_BUCK3_VRMON130 0x20
+#define BD71837_BUCK4_VRMON80 0x40
+#define BD71837_BUCK4_VRMON130 0x80
+#define BD71837_LDO7_VRMON80 0x40
+
/* BD71837_REG_IRQ bits */
#define IRQ_SWRST 0x40
#define IRQ_PWRON_S 0x20
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 7a452716de4b..88a041b73abf 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -362,8 +362,8 @@ struct mlx5_frag_buf {
struct mlx5_frag_buf_ctrl {
struct mlx5_frag_buf frag_buf;
u32 sz_m1;
- u32 frag_sz_m1;
- u32 strides_offset;
+ u16 frag_sz_m1;
+ u16 strides_offset;
u8 log_sz;
u8 log_stride;
u8 log_frag_strides;
@@ -995,7 +995,7 @@ static inline u32 mlx5_base_mkey(const u32 key)
}
static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
- u32 strides_offset,
+ u16 strides_offset,
struct mlx5_frag_buf_ctrl *fbc)
{
fbc->log_stride = log_stride;
@@ -1032,6 +1032,14 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
}
+static inline u32
+mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
+{
+ u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
+
+ return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
+}
+
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
@@ -1052,7 +1060,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index 83a33a1873a6..7f5ca2cd3a32 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -90,6 +90,8 @@ struct mlx5_hairpin {
u32 *rqn;
u32 *sqn;
+
+ bool peer_gone;
};
struct mlx5_hairpin *
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a61ebe8ad4ca..0416a7204be3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2455,6 +2455,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
+static inline bool range_in_vma(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ return (vma && vma->vm_start <= start && end <= vma->vm_end);
+}
+
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index cd2bc939efd0..5ed8f6292a53 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -341,7 +341,7 @@ struct mm_struct {
struct {
struct vm_area_struct *mmap; /* list of VMAs */
struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
+ u64 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 5fe87687664c..d7016dcb245e 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -32,7 +32,7 @@
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
struct vmacache {
- u32 seqnum;
+ u64 seqnum;
struct vm_area_struct *vmas[VMACACHE_SIZE];
};
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1e22d96734e0..d4b0c79d2924 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -668,16 +668,6 @@ typedef struct pglist_data {
wait_queue_head_t kcompactd_wait;
struct task_struct *kcompactd;
#endif
-#ifdef CONFIG_NUMA_BALANCING
- /* Lock serializing the migrate rate limiting window */
- spinlock_t numabalancing_migrate_lock;
-
- /* Rate limiting time interval */
- unsigned long numabalancing_migrate_next_window;
-
- /* Number of pages migrated during the rate limiting time interval */
- unsigned long numabalancing_migrate_nr_pages;
-#endif
/*
* This is a per-node reserve of pages that are not available
* to userspace allocations.
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 1298a7daa57d..01797cb4587e 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -754,6 +754,7 @@ struct tb_service_id {
* struct typec_device_id - USB Type-C alternate mode identifiers
* @svid: Standard or Vendor ID
* @mode: Mode index
+ * @driver_data: Driver specific data
*/
struct typec_device_id {
__u16 svid;
diff --git a/include/linux/module.h b/include/linux/module.h
index f807f15bebbe..e19ae08c7fb8 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -20,6 +20,7 @@
#include <linux/export.h>
#include <linux/rbtree_latch.h>
#include <linux/error-injection.h>
+#include <linux/tracepoint-defs.h>
#include <linux/percpu.h>
#include <asm/module.h>
@@ -430,7 +431,7 @@ struct module {
#ifdef CONFIG_TRACEPOINTS
unsigned int num_tracepoints;
- struct tracepoint * const *tracepoints_ptrs;
+ tracepoint_ptr_t *tracepoints_ptrs;
#endif
#ifdef HAVE_JUMP_LABEL
struct jump_entry *jump_entries;
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h
index e93837f647de..1d3ade69d39a 100644
--- a/include/linux/mtd/blktrans.h
+++ b/include/linux/mtd/blktrans.h
@@ -23,7 +23,6 @@
#include <linux/mutex.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
-#include <linux/workqueue.h>
struct hd_geometry;
struct mtd_info;
@@ -44,9 +43,9 @@ struct mtd_blktrans_dev {
struct kref ref;
struct gendisk *disk;
struct attribute_group *disk_attributes;
- struct workqueue_struct *wq;
- struct work_struct work;
struct request_queue *rq;
+ struct list_head rq_list;
+ struct blk_mq_tag_set *tag_set;
spinlock_t queue_lock;
void *priv;
fmode_t file_mode;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ca5ab98053c8..d837dad24b4c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1730,6 +1730,8 @@ enum netdev_priv_flags {
* switch driver and used to set the phys state of the
* switch port.
*
+ * @wol_enabled: Wake-on-LAN is enabled
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -2014,6 +2016,7 @@ struct net_device {
struct lock_class_key *qdisc_tx_busylock;
struct lock_class_key *qdisc_running_key;
bool proto_down;
+ unsigned wol_enabled:1;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2455,6 +2458,13 @@ struct netdev_notifier_info {
struct netlink_ext_ack *extack;
};
+struct netdev_notifier_info_ext {
+ struct netdev_notifier_info info; /* must be first */
+ union {
+ u32 mtu;
+ } ext;
+};
+
struct netdev_notifier_change_info {
struct netdev_notifier_info info; /* must be first */
unsigned int flags_changed;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 07efffd0c759..bbe99d2b28b4 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
break;
case NFPROTO_ARP:
#ifdef CONFIG_NETFILTER_FAMILY_ARP
+ if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
+ break;
hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
#endif
break;
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 67662d01130a..3ef82d3a78db 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -49,8 +49,9 @@ struct netpoll_info {
};
#ifdef CONFIG_NETPOLL
-extern void netpoll_poll_disable(struct net_device *dev);
-extern void netpoll_poll_enable(struct net_device *dev);
+void netpoll_poll_dev(struct net_device *dev);
+void netpoll_poll_disable(struct net_device *dev);
+void netpoll_poll_enable(struct net_device *dev);
#else
static inline void netpoll_poll_disable(struct net_device *dev) { return; }
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 68e91ef5494c..818dbe9331be 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1241,6 +1241,7 @@ enum {
NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303,
+ NVME_SC_HOST_PATH_ERROR = 0x370,
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e72ca8dd6241..6925828f9f25 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1235,6 +1235,9 @@ void pci_bus_remove_resources(struct pci_bus *bus);
int devm_request_pci_bus_resources(struct device *dev,
struct list_head *resources);
+/* Temporary until new and working PCI SBR API in place */
+int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
+
#define pci_bus_for_each_resource(bus, res, i) \
for (i = 0; \
(res = pci_bus_resource_n(bus, i)) || i < PCI_BRIDGE_RESOURCE_NUM; \
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 009cdf3d65b6..b297cd1cd4f1 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -108,6 +108,7 @@ void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
+void percpu_ref_resurrect(struct percpu_ref *ref);
void percpu_ref_reinit(struct percpu_ref *ref);
/**
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 10f92e1d8e7b..bf309ff6f244 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -99,6 +99,7 @@ struct arm_pmu {
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
+ int (*filter_match)(struct perf_event *event);
int num_events;
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 3468703d663a..a459a5e973a7 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -48,9 +48,9 @@ struct regulator;
* DISABLE_IN_SUSPEND - turn off regulator in suspend states
* ENABLE_IN_SUSPEND - keep regulator on in suspend states
*/
-#define DO_NOTHING_IN_SUSPEND (-1)
-#define DISABLE_IN_SUSPEND 0
-#define ENABLE_IN_SUSPEND 1
+#define DO_NOTHING_IN_SUSPEND 0
+#define DISABLE_IN_SUSPEND 1
+#define ENABLE_IN_SUSPEND 2
/* Regulator active discharge flags */
enum regulator_active_discharge {
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index c0e795d95477..1c89611e0e06 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -36,6 +36,7 @@ enum {
SCIx_SH4_SCIF_FIFODATA_REGTYPE,
SCIx_SH7705_SCIF_REGTYPE,
SCIx_HSCIF_REGTYPE,
+ SCIx_RZ_SCIFA_REGTYPE,
SCIx_NR_REGTYPES,
};
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index b2bd4b4127c4..69ee30456864 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -81,8 +81,10 @@ enum spi_mem_data_dir {
* @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
* @data.buswidth: number of IO lanes used to send/receive the data
* @data.dir: direction of the transfer
- * @data.buf.in: input buffer
- * @data.buf.out: output buffer
+ * @data.nbytes: number of data bytes to send/receive. Can be zero if the
+ * operation does not involve transferring data
+ * @data.buf.in: input buffer (must be DMA-able)
+ * @data.buf.out: output buffer (must be DMA-able)
*/
struct spi_mem_op {
struct {
@@ -105,7 +107,6 @@ struct spi_mem_op {
u8 buswidth;
enum spi_mem_data_dir dir;
unsigned int nbytes;
- /* buf.{in,out} must be DMA-able. */
union {
void *in;
const void *out;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index c43e9a01b892..7ddfc65586b0 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -30,6 +30,7 @@
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
+#define STMMAC_CH_MAX 8
#define STMMAC_RX_COE_NONE 0
#define STMMAC_RX_COE_TYPE1 1
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5a28ac9284f0..3f529ad9a9d2 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -251,6 +251,7 @@ static inline bool idle_should_enter_s2idle(void)
return unlikely(s2idle_state == S2IDLE_STATE_ENTER);
}
+extern bool pm_suspend_via_s2idle(void);
extern void __init pm_states_init(void);
extern void s2idle_set_ops(const struct platform_s2idle_ops *ops);
extern void s2idle_wake(void);
@@ -282,6 +283,7 @@ static inline void pm_set_suspend_via_firmware(void) {}
static inline void pm_set_resume_via_firmware(void) {}
static inline bool pm_suspend_via_firmware(void) { return false; }
static inline bool pm_resume_via_firmware(void) { return false; }
+static inline bool pm_suspend_via_s2idle(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 5d738804e3d6..a5a3cfc3c2fa 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -258,8 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
-void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
- struct timespec64 *boot_offset);
+void read_persistent_wall_and_boot_offset(struct timespec64 *wall_clock,
+ struct timespec64 *boot_offset);
extern int update_persistent_clock64(struct timespec64 now);
/*
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index 22c5a46e9693..49ba9cde7e4b 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -35,6 +35,12 @@ struct tracepoint {
struct tracepoint_func __rcu *funcs;
};
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+typedef const int tracepoint_ptr_t;
+#else
+typedef struct tracepoint * const tracepoint_ptr_t;
+#endif
+
struct bpf_raw_event_map {
struct tracepoint *tp;
void *bpf_func;
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 041f7e56a289..538ba1a58f5b 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -99,6 +99,29 @@ extern void syscall_unregfunc(void);
#define TRACE_DEFINE_ENUM(x)
#define TRACE_DEFINE_SIZEOF(x)
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+{
+ return offset_to_ptr(p);
+}
+
+#define __TRACEPOINT_ENTRY(name) \
+ asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \
+ " .balign 4 \n" \
+ " .long __tracepoint_" #name " - . \n" \
+ " .previous \n")
+#else
+static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
+{
+ return *p;
+}
+
+#define __TRACEPOINT_ENTRY(name) \
+ static tracepoint_ptr_t __tracepoint_ptr_##name __used \
+ __attribute__((section("__tracepoints_ptrs"))) = \
+ &__tracepoint_##name
+#endif
+
#endif /* _LINUX_TRACEPOINT_H */
/*
@@ -253,19 +276,6 @@ extern void syscall_unregfunc(void);
return static_key_false(&__tracepoint_##name.key); \
}
-#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-#define __TRACEPOINT_ENTRY(name) \
- asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \
- " .balign 4 \n" \
- " .long __tracepoint_" #name " - . \n" \
- " .previous \n")
-#else
-#define __TRACEPOINT_ENTRY(name) \
- static struct tracepoint * const __tracepoint_ptr_##name __used \
- __attribute__((section("__tracepoints_ptrs"))) = \
- &__tracepoint_##name
-#endif
-
/*
* We have no guarantee that gcc and the linker won't up-align the tracepoint
* structures, so we create an array of pointers that will be used for iteration
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 409c845d4cd3..422b1c01ee0d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -172,7 +172,7 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
static __always_inline __must_check
size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
{
- if (unlikely(!check_copy_size(addr, bytes, false)))
+ if (unlikely(!check_copy_size(addr, bytes, true)))
return 0;
else
return _copy_to_iter_mcsafe(addr, bytes, i);
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index a34539b7f750..7e6ac0114d55 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -133,15 +133,18 @@ struct vga_switcheroo_handler {
* @can_switch: check if the device is in a position to switch now.
* Mandatory. The client should return false if a user space process
* has one of its device files open
+ * @gpu_bound: notify the client id to audio client when the GPU is bound.
*
* Client callbacks. A client can be either a GPU or an audio device on a GPU.
* The @set_gpu_state and @can_switch methods are mandatory, @reprobe may be
* set to NULL. For audio clients, the @reprobe member is bogus.
+ * OTOH, @gpu_bound is only for audio clients, and not used for GPU clients.
*/
struct vga_switcheroo_client_ops {
void (*set_gpu_state)(struct pci_dev *dev, enum vga_switcheroo_state);
void (*reprobe)(struct pci_dev *dev);
bool (*can_switch)(struct pci_dev *dev);
+ void (*gpu_bound)(struct pci_dev *dev, enum vga_switcheroo_client_id);
};
#if defined(CONFIG_VGA_SWITCHEROO)
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 9397628a1967..cb462f9ab7dd 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -5,6 +5,24 @@
#include <linux/if_vlan.h>
#include <uapi/linux/virtio_net.h>
+static inline int virtio_net_hdr_set_proto(struct sk_buff *skb,
+ const struct virtio_net_hdr *hdr)
+{
+ switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ case VIRTIO_NET_HDR_GSO_UDP:
+ skb->protocol = cpu_to_be16(ETH_P_IP);
+ break;
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ skb->protocol = cpu_to_be16(ETH_P_IPV6);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
const struct virtio_net_hdr *hdr,
bool little_endian)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 5c7f010676a7..47a3441cf4c4 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_DEBUG_VM_VMACACHE
VMACACHE_FIND_CALLS,
VMACACHE_FIND_HITS,
- VMACACHE_FULL_FLUSHES,
#endif
#ifdef CONFIG_SWAP
SWAP_RA,
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index 3e9a963edd6a..6fce268a4588 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -10,7 +10,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
}
-extern void vmacache_flush_all(struct mm_struct *mm);
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
unsigned long addr);
@@ -24,10 +23,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
static inline void vmacache_invalidate(struct mm_struct *mm)
{
mm->vmacache_seqnum++;
-
- /* deal with overflows */
- if (unlikely(mm->vmacache_seqnum == 0))
- vmacache_flush_all(mm);
}
#endif /* __LINUX_VMACACHE_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fdfd04e348f6..738a0c24874f 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -246,7 +246,8 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
*
* @bio is a part of the writeback in progress controlled by @wbc. Perform
* writeback specific initialization. This is used to apply the cgroup
- * writeback context.
+ * writeback context. Must be called after the bio has been associated with
+ * a device.
*/
static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
{
@@ -257,7 +258,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
* regular writeback instead of writing things out itself.
*/
if (wbc->wb)
- bio_associate_blkcg(bio, wbc->wb->blkcg_css);
+ bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
}
#else /* CONFIG_CGROUP_WRITEBACK */
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index ea73fef8bdc0..8586cfb49828 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -38,10 +38,13 @@ struct v4l2_ctrl_handler;
* @prio: priority of the file handler, as defined by &enum v4l2_priority
*
* @wait: event' s wait queue
+ * @subscribe_lock: serialise changes to the subscribed list; guarantee that
+ * the add and del event callbacks are orderly called
* @subscribed: list of subscribed events
* @available: list of events waiting to be dequeued
* @navailable: number of available events at @available list
* @sequence: event sequence number
+ *
* @m2m_ctx: pointer to &struct v4l2_m2m_ctx
*/
struct v4l2_fh {
@@ -52,6 +55,7 @@ struct v4l2_fh {
/* Events */
wait_queue_head_t wait;
+ struct mutex subscribe_lock;
struct list_head subscribed;
struct list_head available;
unsigned int navailable;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index a2d058170ea3..b46d68acf701 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -139,12 +139,6 @@ struct bond_parm_tbl {
int mode;
};
-struct netdev_notify_work {
- struct delayed_work work;
- struct net_device *dev;
- struct netdev_bonding_info bonding_info;
-};
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -172,6 +166,7 @@ struct slave {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
+ struct delayed_work notify_work;
struct kobject kobj;
struct rtnl_link_stats64 slave_stats;
};
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 8ebabc9873d1..4de121e24ce5 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4852,8 +4852,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
*
* @alpha2: the ISO/IEC 3166 alpha2 wmm rule to be queried.
* @freq: the freqency(in MHz) to be queried.
- * @ptr: pointer where the regdb wmm data is to be stored (or %NULL if
- * irrelevant). This can be used later for deduplication.
* @rule: pointer to store the wmm rule from the regulatory db.
*
* Self-managed wireless drivers can use this function to query
diff --git a/include/net/devlink.h b/include/net/devlink.h
index b9b89d6604d4..99efc156a309 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -298,7 +298,7 @@ struct devlink_resource {
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
-#define DEVLINK_PARAM_MAX_STRING_VALUE 32
+#define __DEVLINK_PARAM_MAX_STRING_VALUE 32
enum devlink_param_type {
DEVLINK_PARAM_TYPE_U8,
DEVLINK_PARAM_TYPE_U16,
@@ -311,7 +311,7 @@ union devlink_param_value {
u8 vu8;
u16 vu16;
u32 vu32;
- const char *vstr;
+ char vstr[__DEVLINK_PARAM_MAX_STRING_VALUE];
bool vbool;
};
@@ -553,6 +553,8 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
union devlink_param_value init_val);
void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+void devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src);
struct devlink_region *devlink_region_create(struct devlink *devlink,
const char *region_name,
u32 region_max_snapshots,
@@ -789,6 +791,12 @@ devlink_param_value_changed(struct devlink *devlink, u32 param_id)
{
}
+static inline void
+devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src)
+{
+}
+
static inline struct devlink_region *
devlink_region_create(struct devlink *devlink,
const char *region_name,
diff --git a/include/net/dst.h b/include/net/dst.h
index 7f735e76ca73..6cf0870414c7 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -527,4 +527,14 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
dst->ops->update_pmtu(dst, NULL, skb, mtu);
}
+static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
+ struct dst_entry *encap_dst,
+ int headroom)
+{
+ u32 encap_mtu = dst_mtu(encap_dst);
+
+ if (skb->len > encap_mtu - headroom)
+ skb_dst_update_pmtu(skb, encap_mtu - headroom);
+}
+
#endif /* _NET_DST_H */
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index e03b93360f33..a80fd0ac4563 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -130,12 +130,6 @@ static inline int inet_request_bound_dev_if(const struct sock *sk,
return sk->sk_bound_dev_if;
}
-static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
-{
- return rcu_dereference_check(ireq->ireq_opt,
- refcount_read(&ireq->req.rsk_refcnt) > 0);
-}
-
struct inet_cork {
unsigned int flags;
__be32 addr;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3d4930528db0..2d31e22babd8 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -159,6 +159,10 @@ struct fib6_info {
struct rt6_info * __percpu *rt6i_pcpu;
struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
+#ifdef CONFIG_IPV6_ROUTER_PREF
+ unsigned long last_probe;
+#endif
+
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 69c91d1934c1..c9b7b136939d 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -394,6 +394,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index d5f62cc6c2ae..3394d75e1c80 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -30,7 +30,7 @@ struct nf_conn_timeout {
};
static inline unsigned int *
-nf_ct_timeout_data(struct nf_conn_timeout *t)
+nf_ct_timeout_data(const struct nf_conn_timeout *t)
{
struct nf_ct_timeout *timeout;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 0c154f98e987..39e1d875d507 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -153,7 +153,7 @@
* nla_find() find attribute in stream of attributes
* nla_find_nested() find attribute in nested attributes
* nla_parse() parse and validate stream of attrs
- * nla_parse_nested() parse nested attribuets
+ * nla_parse_nested() parse nested attributes
* nla_for_each_attr() loop over all attributes
* nla_for_each_nested() loop over the nested attributes
*=========================================================================
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 316694dafa5b..008f466d1da7 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
* According to specification 102 622 chapter 4.4 Pipes,
* the pipe identifier is 7 bits long.
*/
-#define NFC_HCI_MAX_PIPES 127
+#define NFC_HCI_MAX_PIPES 128
struct nfc_hci_init_data {
u8 gate_count;
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 5ef1bad81ef5..9e3d32746430 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -347,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
__u16 size;
size = ntohs(chunk->chunk_hdr->length);
- size -= sctp_datahdr_len(&chunk->asoc->stream);
+ size -= sctp_datachk_len(&chunk->asoc->stream);
return size;
}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 28a7c8e44636..a11f93790476 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -876,6 +876,8 @@ struct sctp_transport {
unsigned long sackdelay;
__u32 sackfreq;
+ atomic_t mtu_info;
+
/* When was the last time that we heard from this transport? We use
* this to pick new active and retran paths.
*/
diff --git a/include/net/tls.h b/include/net/tls.h
index d5c683e8bb22..0a769cf2f5f3 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -171,15 +171,14 @@ struct cipher_context {
char *rec_seq;
};
+union tls_crypto_context {
+ struct tls_crypto_info info;
+ struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
+};
+
struct tls_context {
- union {
- struct tls_crypto_info crypto_send;
- struct tls12_crypto_info_aes_gcm_128 crypto_send_aes_gcm_128;
- };
- union {
- struct tls_crypto_info crypto_recv;
- struct tls12_crypto_info_aes_gcm_128 crypto_recv_aes_gcm_128;
- };
+ union tls_crypto_context crypto_send;
+ union tls_crypto_context crypto_recv;
struct list_head list;
struct net_device *netdev;
@@ -367,8 +366,8 @@ static inline void tls_fill_prepend(struct tls_context *ctx,
* size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
*/
buf[0] = record_type;
- buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.version);
- buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.version);
+ buf[1] = TLS_VERSION_MINOR(ctx->crypto_send.info.version);
+ buf[2] = TLS_VERSION_MAJOR(ctx->crypto_send.info.version);
/* we can use IV for nonce explicit according to spec */
buf[3] = pkt_len >> 8;
buf[4] = pkt_len & 0xFF;
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
index eaaf56df4086..5b99cb2ea5ef 100644
--- a/include/soc/fsl/bman.h
+++ b/include/soc/fsl/bman.h
@@ -126,4 +126,12 @@ int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num);
*/
int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
+/**
+ * bman_is_probed - Check if bman is probed
+ *
+ * Returns 1 if the bman driver successfully probed, -1 if the bman driver
+ * failed to probe or 0 if the bman driver did not probed yet.
+ */
+int bman_is_probed(void);
+
#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index d4dfefdee6c1..597783b8a3a0 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1186,4 +1186,12 @@ int qman_alloc_cgrid_range(u32 *result, u32 count);
*/
int qman_release_cgrid(u32 id);
+/**
+ * qman_is_probed - Check if qman is probed
+ *
+ * Returns 1 if the qman driver successfully probed, -1 if the qman driver
+ * failed to probe or 0 if the qman driver did not probed yet.
+ */
+int qman_is_probed(void);
+
#endif /* __FSL_QMAN_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 6f1e1f3b3063..cd1773d0e08f 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -412,6 +412,7 @@ void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_stop_cmd_io(struct hdac_bus *bus);
void snd_hdac_bus_enter_link_reset(struct hdac_bus *bus);
void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset);
void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index af9ef16cc34d..fdaaafdc7a00 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -407,6 +407,7 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
+ struct snd_soc_pcm_runtime *rtd,
const struct snd_soc_pcm_stream *params,
unsigned int num_params,
struct snd_soc_dapm_widget *source,
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
new file mode 100644
index 000000000000..a9834c37ac40
--- /dev/null
+++ b/include/trace/events/kyber.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kyber
+
+#if !defined(_TRACE_KYBER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KYBER_H
+
+#include <linux/blkdev.h>
+#include <linux/tracepoint.h>
+
+#define DOMAIN_LEN 16
+#define LATENCY_TYPE_LEN 8
+
+TRACE_EVENT(kyber_latency,
+
+ TP_PROTO(struct request_queue *q, const char *domain, const char *type,
+ unsigned int percentile, unsigned int numerator,
+ unsigned int denominator, unsigned int samples),
+
+ TP_ARGS(q, domain, type, percentile, numerator, denominator, samples),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __array( char, domain, DOMAIN_LEN )
+ __array( char, type, LATENCY_TYPE_LEN )
+ __field( u8, percentile )
+ __field( u8, numerator )
+ __field( u8, denominator )
+ __field( unsigned int, samples )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ strlcpy(__entry->type, type, DOMAIN_LEN);
+ __entry->percentile = percentile;
+ __entry->numerator = numerator;
+ __entry->denominator = denominator;
+ __entry->samples = samples;
+ ),
+
+ TP_printk("%d,%d %s %s p%u %u/%u samples=%u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->domain,
+ __entry->type, __entry->percentile, __entry->numerator,
+ __entry->denominator, __entry->samples)
+);
+
+TRACE_EVENT(kyber_adjust,
+
+ TP_PROTO(struct request_queue *q, const char *domain,
+ unsigned int depth),
+
+ TP_ARGS(q, domain, depth),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __array( char, domain, DOMAIN_LEN )
+ __field( unsigned int, depth )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ __entry->depth = depth;
+ ),
+
+ TP_printk("%d,%d %s %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->domain,
+ __entry->depth)
+);
+
+TRACE_EVENT(kyber_throttled,
+
+ TP_PROTO(struct request_queue *q, const char *domain),
+
+ TP_ARGS(q, domain),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __array( char, domain, DOMAIN_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
+ strlcpy(__entry->domain, domain, DOMAIN_LEN);
+ ),
+
+ TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->domain)
+);
+
+#define _TRACE_KYBER_H
+#endif /* _TRACE_KYBER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/migrate.h b/include/trace/events/migrate.h
index 711372845945..705b33d1e395 100644
--- a/include/trace/events/migrate.h
+++ b/include/trace/events/migrate.h
@@ -70,33 +70,6 @@ TRACE_EVENT(mm_migrate_pages,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
-
-TRACE_EVENT(mm_numa_migrate_ratelimit,
-
- TP_PROTO(struct task_struct *p, int dst_nid, unsigned long nr_pages),
-
- TP_ARGS(p, dst_nid, nr_pages),
-
- TP_STRUCT__entry(
- __array( char, comm, TASK_COMM_LEN)
- __field( pid_t, pid)
- __field( int, dst_nid)
- __field( unsigned long, nr_pages)
- ),
-
- TP_fast_assign(
- memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
- __entry->pid = p->pid;
- __entry->dst_nid = dst_nid;
- __entry->nr_pages = nr_pages;
- ),
-
- TP_printk("comm=%s pid=%d dst_nid=%d nr_pages=%lu",
- __entry->comm,
- __entry->pid,
- __entry->dst_nid,
- __entry->nr_pages)
-);
#endif /* _TRACE_MIGRATE_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 196587b8f204..573d5b901fb1 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -56,7 +56,6 @@ enum rxrpc_peer_trace {
rxrpc_peer_new,
rxrpc_peer_processing,
rxrpc_peer_put,
- rxrpc_peer_queued_error,
};
enum rxrpc_conn_trace {
@@ -257,8 +256,7 @@ enum rxrpc_tx_point {
EM(rxrpc_peer_got, "GOT") \
EM(rxrpc_peer_new, "NEW") \
EM(rxrpc_peer_processing, "PRO") \
- EM(rxrpc_peer_put, "PUT") \
- E_(rxrpc_peer_queued_error, "QER")
+ E_(rxrpc_peer_put, "PUT")
#define rxrpc_conn_traces \
EM(rxrpc_conn_got, "GOT") \
@@ -933,6 +931,7 @@ TRACE_EVENT(rxrpc_tx_packet,
TP_fast_assign(
__entry->call = call_id;
memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
+ __entry->where = where;
),
TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
diff --git a/include/uapi/asm-generic/hugetlb_encode.h b/include/uapi/asm-generic/hugetlb_encode.h
index e4732d3c2998..b0f8e87235bd 100644
--- a/include/uapi/asm-generic/hugetlb_encode.h
+++ b/include/uapi/asm-generic/hugetlb_encode.h
@@ -26,7 +26,9 @@
#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB (25 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB (29 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 910cc4334b21..7b8c9e19bad1 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -65,7 +65,7 @@
/* keyctl structures */
struct keyctl_dh_params {
- __s32 dh_private;
+ __s32 private;
__s32 prime;
__s32 base;
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 07548de5c988..251be353f950 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -952,6 +952,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_HPAGE_1M 156
#define KVM_CAP_NESTED_STATE 157
#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
+#define KVM_CAP_MSR_PLATFORM_INFO 159
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/memfd.h b/include/uapi/linux/memfd.h
index 015a4c0bbb47..7a8a26751c23 100644
--- a/include/uapi/linux/memfd.h
+++ b/include/uapi/linux/memfd.h
@@ -25,7 +25,9 @@
#define MFD_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define MFD_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define MFD_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define MFD_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
#define MFD_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MFD_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
#define MFD_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define MFD_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MFD_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h
index bfd5938fede6..d0f515d53299 100644
--- a/include/uapi/linux/mman.h
+++ b/include/uapi/linux/mman.h
@@ -28,7 +28,9 @@
#define MAP_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define MAP_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define MAP_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define MAP_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
#define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MAP_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
#define MAP_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index eeb787b1c53c..f35eb72739c0 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
- __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
+ __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
};
/*
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index b479db5c71d9..34dd3d497f2c 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -301,6 +301,7 @@ enum sctp_sinfo_flags {
SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */
/* 2 bits here have been used by SCTP_PR_SCTP_MASK */
SCTP_SENDALL = (1 << 6),
+ SCTP_PR_SCTP_ALL = (1 << 7),
SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */
SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */
};
diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h
index dde1344f047c..6507ad0afc81 100644
--- a/include/uapi/linux/shm.h
+++ b/include/uapi/linux/shm.h
@@ -65,7 +65,9 @@ struct shmid_ds {
#define SHM_HUGE_2MB HUGETLB_FLAG_ENCODE_2MB
#define SHM_HUGE_8MB HUGETLB_FLAG_ENCODE_8MB
#define SHM_HUGE_16MB HUGETLB_FLAG_ENCODE_16MB
+#define SHM_HUGE_32MB HUGETLB_FLAG_ENCODE_32MB
#define SHM_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define SHM_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
#define SHM_HUGE_1GB HUGETLB_FLAG_ENCODE_1GB
#define SHM_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB
#define SHM_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index ac9e8c96d9bd..8cb3a6fef553 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -18,14 +18,17 @@ struct smc_diag_req {
* on the internal clcsock, and more SMC-related socket data
*/
struct smc_diag_msg {
- __u8 diag_family;
- __u8 diag_state;
- __u8 diag_mode;
- __u8 diag_shutdown;
+ __u8 diag_family;
+ __u8 diag_state;
+ union {
+ __u8 diag_mode;
+ __u8 diag_fallback; /* the old name of the field */
+ };
+ __u8 diag_shutdown;
struct inet_diag_sockid id;
- __u32 diag_uid;
- __u64 diag_inode;
+ __u32 diag_uid;
+ __aligned_u64 diag_inode;
};
/* Mode of a connection */
@@ -99,11 +102,11 @@ struct smc_diag_fallback {
};
struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
- __u32 linkid; /* Link identifier */
- __u64 peer_gid; /* Peer GID */
- __u64 my_gid; /* My GID */
- __u64 token; /* Token of DMB */
- __u64 peer_token; /* Token of remote DMBE */
+ __u32 linkid; /* Link identifier */
+ __aligned_u64 peer_gid; /* Peer GID */
+ __aligned_u64 my_gid; /* My GID */
+ __aligned_u64 token; /* Token of DMB */
+ __aligned_u64 peer_token; /* Token of remote DMBE */
};
#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 09d00f8c442b..09502de447f5 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -40,5 +40,6 @@ struct udphdr {
#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */
#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
+#define UDP_ENCAP_RXRPC 6
#endif /* _UAPI_LINUX_UDP_H */
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index f58cafa42f18..f39352cef382 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -10,6 +10,8 @@
#ifndef __HDA_TPLG_INTERFACE_H__
#define __HDA_TPLG_INTERFACE_H__
+#include <linux/types.h>
+
/*
* Default types range from 0~12. type can range from 0 to 0xff
* SST types start at higher to avoid any overlapping in future
@@ -143,10 +145,10 @@ enum skl_module_param_type {
};
struct skl_dfw_algo_data {
- u32 set_params:2;
- u32 rsvd:30;
- u32 param_id;
- u32 max;
+ __u32 set_params:2;
+ __u32 rsvd:30;
+ __u32 param_id;
+ __u32 max;
char params[0];
} __packed;
@@ -163,68 +165,68 @@ enum skl_tuple_type {
/* v4 configuration data */
struct skl_dfw_v4_module_pin {
- u16 module_id;
- u16 instance_id;
+ __u16 module_id;
+ __u16 instance_id;
} __packed;
struct skl_dfw_v4_module_fmt {
- u32 channels;
- u32 freq;
- u32 bit_depth;
- u32 valid_bit_depth;
- u32 ch_cfg;
- u32 interleaving_style;
- u32 sample_type;
- u32 ch_map;
+ __u32 channels;
+ __u32 freq;
+ __u32 bit_depth;
+ __u32 valid_bit_depth;
+ __u32 ch_cfg;
+ __u32 interleaving_style;
+ __u32 sample_type;
+ __u32 ch_map;
} __packed;
struct skl_dfw_v4_module_caps {
- u32 set_params:2;
- u32 rsvd:30;
- u32 param_id;
- u32 caps_size;
- u32 caps[HDA_SST_CFG_MAX];
+ __u32 set_params:2;
+ __u32 rsvd:30;
+ __u32 param_id;
+ __u32 caps_size;
+ __u32 caps[HDA_SST_CFG_MAX];
} __packed;
struct skl_dfw_v4_pipe {
- u8 pipe_id;
- u8 pipe_priority;
- u16 conn_type:4;
- u16 rsvd:4;
- u16 memory_pages:8;
+ __u8 pipe_id;
+ __u8 pipe_priority;
+ __u16 conn_type:4;
+ __u16 rsvd:4;
+ __u16 memory_pages:8;
} __packed;
struct skl_dfw_v4_module {
char uuid[SKL_UUID_STR_SZ];
- u16 module_id;
- u16 instance_id;
- u32 max_mcps;
- u32 mem_pages;
- u32 obs;
- u32 ibs;
- u32 vbus_id;
-
- u32 max_in_queue:8;
- u32 max_out_queue:8;
- u32 time_slot:8;
- u32 core_id:4;
- u32 rsvd1:4;
-
- u32 module_type:8;
- u32 conn_type:4;
- u32 dev_type:4;
- u32 hw_conn_type:4;
- u32 rsvd2:12;
-
- u32 params_fixup:8;
- u32 converter:8;
- u32 input_pin_type:1;
- u32 output_pin_type:1;
- u32 is_dynamic_in_pin:1;
- u32 is_dynamic_out_pin:1;
- u32 is_loadable:1;
- u32 rsvd3:11;
+ __u16 module_id;
+ __u16 instance_id;
+ __u32 max_mcps;
+ __u32 mem_pages;
+ __u32 obs;
+ __u32 ibs;
+ __u32 vbus_id;
+
+ __u32 max_in_queue:8;
+ __u32 max_out_queue:8;
+ __u32 time_slot:8;
+ __u32 core_id:4;
+ __u32 rsvd1:4;
+
+ __u32 module_type:8;
+ __u32 conn_type:4;
+ __u32 dev_type:4;
+ __u32 hw_conn_type:4;
+ __u32 rsvd2:12;
+
+ __u32 params_fixup:8;
+ __u32 converter:8;
+ __u32 input_pin_type:1;
+ __u32 output_pin_type:1;
+ __u32 is_dynamic_in_pin:1;
+ __u32 is_dynamic_out_pin:1;
+ __u32 is_loadable:1;
+ __u32 rsvd3:11;
struct skl_dfw_v4_pipe pipe;
struct skl_dfw_v4_module_fmt in_fmt[MAX_IN_QUEUE];
diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h
index 80b52b4945e9..a2ab516fcd2c 100644
--- a/include/xen/mem-reservation.h
+++ b/include/xen/mem-reservation.h
@@ -17,11 +17,12 @@
#include <xen/page.h>
+extern bool xen_scrub_pages;
+
static inline void xenmem_reservation_scrub_page(struct page *page)
{
-#ifdef CONFIG_XEN_SCRUB_PAGES
- clear_highpage(page);
-#endif
+ if (xen_scrub_pages)
+ clear_highpage(page);
}
#ifdef CONFIG_XEN_HAVE_PVMMU
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 1e1d9bd0bd37..d7a2678da77f 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -39,4 +39,8 @@ extern uint32_t xen_start_flags;
#define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */
+struct bio_vec;
+bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+
#endif /* _XEN_XEN_H */
diff --git a/ipc/shm.c b/ipc/shm.c
index 4cd402e4cfeb..1c65fb357395 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -206,7 +206,7 @@ err:
* Callers of shm_lock() must validate the status of the returned ipc
* object pointer and error out as appropriate.
*/
- return (void *)ipcp;
+ return ERR_CAST(ipcp);
}
static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 2590700237c1..138f0302692e 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -1844,7 +1844,7 @@ static int btf_check_all_metas(struct btf_verifier_env *env)
hdr = &btf->hdr;
cur = btf->nohdr_data + hdr->type_off;
- end = btf->nohdr_data + hdr->type_len;
+ end = cur + hdr->type_len;
env->log_type_id = 1;
while (cur < end) {
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 22ad967d1e5f..830d7f095748 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -129,7 +129,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
struct bpf_cgroup_storage *storage;
struct bpf_storage_buffer *new;
- if (flags & BPF_NOEXIST)
+ if (flags != BPF_ANY && flags != BPF_EXIST)
return -EINVAL;
storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map,
@@ -195,6 +195,9 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
return ERR_PTR(-EINVAL);
+ if (attr->value_size == 0)
+ return ERR_PTR(-EINVAL);
+
if (attr->value_size > PAGE_SIZE)
return ERR_PTR(-E2BIG);
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 488ef9663c01..0a0f2ec75370 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -132,6 +132,7 @@ struct smap_psock {
struct work_struct gc_work;
struct proto *sk_proto;
+ void (*save_unhash)(struct sock *sk);
void (*save_close)(struct sock *sk, long timeout);
void (*save_data_ready)(struct sock *sk);
void (*save_write_space)(struct sock *sk);
@@ -143,6 +144,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
+static void bpf_tcp_unhash(struct sock *sk);
static void bpf_tcp_close(struct sock *sk, long timeout);
static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
@@ -184,6 +186,7 @@ static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
struct proto *base)
{
prot[SOCKMAP_BASE] = *base;
+ prot[SOCKMAP_BASE].unhash = bpf_tcp_unhash;
prot[SOCKMAP_BASE].close = bpf_tcp_close;
prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
@@ -217,6 +220,7 @@ static int bpf_tcp_init(struct sock *sk)
return -EBUSY;
}
+ psock->save_unhash = sk->sk_prot->unhash;
psock->save_close = sk->sk_prot->close;
psock->sk_proto = sk->sk_prot;
@@ -305,30 +309,12 @@ static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
return e;
}
-static void bpf_tcp_close(struct sock *sk, long timeout)
+static void bpf_tcp_remove(struct sock *sk, struct smap_psock *psock)
{
- void (*close_fun)(struct sock *sk, long timeout);
struct smap_psock_map_entry *e;
struct sk_msg_buff *md, *mtmp;
- struct smap_psock *psock;
struct sock *osk;
- lock_sock(sk);
- rcu_read_lock();
- psock = smap_psock_sk(sk);
- if (unlikely(!psock)) {
- rcu_read_unlock();
- release_sock(sk);
- return sk->sk_prot->close(sk, timeout);
- }
-
- /* The psock may be destroyed anytime after exiting the RCU critial
- * section so by the time we use close_fun the psock may no longer
- * be valid. However, bpf_tcp_close is called with the sock lock
- * held so the close hook and sk are still valid.
- */
- close_fun = psock->save_close;
-
if (psock->cork) {
free_start_sg(psock->sock, psock->cork, true);
kfree(psock->cork);
@@ -379,6 +365,42 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
kfree(e);
e = psock_map_pop(sk, psock);
}
+}
+
+static void bpf_tcp_unhash(struct sock *sk)
+{
+ void (*unhash_fun)(struct sock *sk);
+ struct smap_psock *psock;
+
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ if (sk->sk_prot->unhash)
+ sk->sk_prot->unhash(sk);
+ return;
+ }
+ unhash_fun = psock->save_unhash;
+ bpf_tcp_remove(sk, psock);
+ rcu_read_unlock();
+ unhash_fun(sk);
+}
+
+static void bpf_tcp_close(struct sock *sk, long timeout)
+{
+ void (*close_fun)(struct sock *sk, long timeout);
+ struct smap_psock *psock;
+
+ lock_sock(sk);
+ rcu_read_lock();
+ psock = smap_psock_sk(sk);
+ if (unlikely(!psock)) {
+ rcu_read_unlock();
+ release_sock(sk);
+ return sk->sk_prot->close(sk, timeout);
+ }
+ close_fun = psock->save_close;
+ bpf_tcp_remove(sk, psock);
rcu_read_unlock();
release_sock(sk);
close_fun(sk, timeout);
@@ -2097,8 +2119,12 @@ static int sock_map_update_elem(struct bpf_map *map,
return -EINVAL;
}
+ /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+ * state.
+ */
if (skops.sk->sk_type != SOCK_STREAM ||
- skops.sk->sk_protocol != IPPROTO_TCP) {
+ skops.sk->sk_protocol != IPPROTO_TCP ||
+ skops.sk->sk_state != TCP_ESTABLISHED) {
fput(socket->file);
return -EOPNOTSUPP;
}
@@ -2453,6 +2479,16 @@ static int sock_hash_update_elem(struct bpf_map *map,
return -EINVAL;
}
+ /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+ * state.
+ */
+ if (skops.sk->sk_type != SOCK_STREAM ||
+ skops.sk->sk_protocol != IPPROTO_TCP ||
+ skops.sk->sk_state != TCP_ESTABLISHED) {
+ fput(socket->file);
+ return -EOPNOTSUPP;
+ }
+
lock_sock(skops.sk);
preempt_disable();
rcu_read_lock();
@@ -2543,10 +2579,22 @@ const struct bpf_map_ops sock_hash_ops = {
.map_check_btf = map_check_no_btf,
};
+static bool bpf_is_valid_sock_op(struct bpf_sock_ops_kern *ops)
+{
+ return ops->op == BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB ||
+ ops->op == BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB;
+}
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
struct bpf_map *, map, void *, key, u64, flags)
{
WARN_ON_ONCE(!rcu_read_lock_held());
+
+ /* ULPs are currently supported only for TCP sockets in ESTABLISHED
+ * state. This checks that the sock ops triggering the update is
+ * one indicating we are (or will be soon) in an ESTABLISHED state.
+ */
+ if (!bpf_is_valid_sock_op(bpf_sock))
+ return -EOPNOTSUPP;
return sock_map_ctx_update_elem(bpf_sock, map, key, flags);
}
@@ -2565,6 +2613,9 @@ BPF_CALL_4(bpf_sock_hash_update, struct bpf_sock_ops_kern *, bpf_sock,
struct bpf_map *, map, void *, key, u64, flags)
{
WARN_ON_ONCE(!rcu_read_lock_held());
+
+ if (!bpf_is_valid_sock_op(bpf_sock))
+ return -EOPNOTSUPP;
return sock_hash_ctx_update_elem(bpf_sock, map, key, flags);
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 92246117d2b0..465952a8e465 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2896,6 +2896,15 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
+ if (insn_bitness == 32) {
+ /* Relevant for 32-bit RSH: Information can propagate towards
+ * LSB, so it isn't sufficient to only truncate the output to
+ * 32 bits.
+ */
+ coerce_reg_to_size(dst_reg, 4);
+ coerce_reg_to_size(&src_reg, 4);
+ }
+
smin_val = src_reg.smin_value;
smax_val = src_reg.smax_value;
umin_val = src_reg.umin_value;
@@ -3131,7 +3140,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops are (32,32)->32 */
coerce_reg_to_size(dst_reg, 4);
- coerce_reg_to_size(&src_reg, 4);
}
__reg_deduce_bounds(dst_reg);
@@ -3163,7 +3171,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
* an arbitrary scalar. Disallow all math except
* pointer subtraction
*/
- if (opcode == BPF_SUB){
+ if (opcode == BPF_SUB && env->allow_ptr_leaks) {
mark_reg_unknown(env, regs, insn->dst_reg);
return 0;
}
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 9f8463afda9c..47147c9e184d 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -192,11 +192,8 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
sock_hold(sock->sk);
old_xs = xchg(&m->xsk_map[i], xs);
- if (old_xs) {
- /* Make sure we've flushed everything. */
- synchronize_net();
+ if (old_xs)
sock_put((struct sock *)old_xs);
- }
sockfd_put(sock);
return 0;
@@ -212,11 +209,8 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
return -EINVAL;
old_xs = xchg(&m->xsk_map[k], NULL);
- if (old_xs) {
- /* Make sure we've flushed everything. */
- synchronize_net();
+ if (old_xs)
sock_put((struct sock *)old_xs);
- }
return 0;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index aae10baf1902..4c1cf0969a80 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -492,7 +492,7 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
}
/**
- * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
+ * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest (%NULL returns @cgrp->self)
*
@@ -501,8 +501,8 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
* enabled. If @ss is associated with the hierarchy @cgrp is on, this
* function is guaranteed to return non-NULL css.
*/
-static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
- struct cgroup_subsys *ss)
+static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
{
lockdep_assert_held(&cgroup_mutex);
@@ -523,6 +523,35 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
}
/**
+ * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @ss: the subsystem of interest
+ *
+ * Find and get the effective css of @cgrp for @ss. The effective css is
+ * defined as the matching css of the nearest ancestor including self which
+ * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
+ * the root css is returned, so this function always returns a valid css.
+ *
+ * The returned css is not guaranteed to be online, and therefore it is the
+ * callers responsiblity to tryget a reference for it.
+ */
+struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
+{
+ struct cgroup_subsys_state *css;
+
+ do {
+ css = cgroup_css(cgrp, ss);
+
+ if (css)
+ return css;
+ cgrp = cgroup_parent(cgrp);
+ } while (cgrp);
+
+ return init_css_set.subsys[ss->id];
+}
+
+/**
* cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
* @cgrp: the cgroup of interest
* @ss: the subsystem of interest
@@ -604,10 +633,11 @@ EXPORT_SYMBOL_GPL(of_css);
*
* Should be called under cgroup_[tree_]mutex.
*/
-#define for_each_e_css(css, ssid, cgrp) \
- for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
- if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
- ; \
+#define for_each_e_css(css, ssid, cgrp) \
+ for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++) \
+ if (!((css) = cgroup_e_css_by_mask(cgrp, \
+ cgroup_subsys[(ssid)]))) \
+ ; \
else
/**
@@ -1006,7 +1036,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
* @ss is in this hierarchy, so we want the
* effective css from @cgrp.
*/
- template[i] = cgroup_e_css(cgrp, ss);
+ template[i] = cgroup_e_css_by_mask(cgrp, ss);
} else {
/*
* @ss is not in this hierarchy, so we don't want
@@ -2836,11 +2866,12 @@ restart:
}
/**
- * cgroup_save_control - save control masks of a subtree
+ * cgroup_save_control - save control masks and dom_cgrp of a subtree
* @cgrp: root of the target subtree
*
- * Save ->subtree_control and ->subtree_ss_mask to the respective old_
- * prefixed fields for @cgrp's subtree including @cgrp itself.
+ * Save ->subtree_control, ->subtree_ss_mask and ->dom_cgrp to the
+ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
+ * itself.
*/
static void cgroup_save_control(struct cgroup *cgrp)
{
@@ -2850,6 +2881,7 @@ static void cgroup_save_control(struct cgroup *cgrp)
cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
dsct->old_subtree_control = dsct->subtree_control;
dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
+ dsct->old_dom_cgrp = dsct->dom_cgrp;
}
}
@@ -2875,11 +2907,12 @@ static void cgroup_propagate_control(struct cgroup *cgrp)
}
/**
- * cgroup_restore_control - restore control masks of a subtree
+ * cgroup_restore_control - restore control masks and dom_cgrp of a subtree
* @cgrp: root of the target subtree
*
- * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
- * prefixed fields for @cgrp's subtree including @cgrp itself.
+ * Restore ->subtree_control, ->subtree_ss_mask and ->dom_cgrp from the
+ * respective old_ prefixed fields for @cgrp's subtree including @cgrp
+ * itself.
*/
static void cgroup_restore_control(struct cgroup *cgrp)
{
@@ -2889,6 +2922,7 @@ static void cgroup_restore_control(struct cgroup *cgrp)
cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
dsct->subtree_control = dsct->old_subtree_control;
dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
+ dsct->dom_cgrp = dsct->old_dom_cgrp;
}
}
@@ -3019,7 +3053,7 @@ static int cgroup_apply_control(struct cgroup *cgrp)
return ret;
/*
- * At this point, cgroup_e_css() results reflect the new csses
+ * At this point, cgroup_e_css_by_mask() results reflect the new csses
* making the following cgroup_update_dfl_csses() properly update
* css associations of all tasks in the subtree.
*/
@@ -3196,6 +3230,8 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
{
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup *dom_cgrp = parent->dom_cgrp;
+ struct cgroup *dsct;
+ struct cgroup_subsys_state *d_css;
int ret;
lockdep_assert_held(&cgroup_mutex);
@@ -3225,12 +3261,13 @@ static int cgroup_enable_threaded(struct cgroup *cgrp)
*/
cgroup_save_control(cgrp);
- cgrp->dom_cgrp = dom_cgrp;
+ cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)
+ if (dsct == cgrp || cgroup_is_threaded(dsct))
+ dsct->dom_cgrp = dom_cgrp;
+
ret = cgroup_apply_control(cgrp);
if (!ret)
parent->nr_threaded_children++;
- else
- cgrp->dom_cgrp = cgrp;
cgroup_finalize_control(cgrp, ret);
return ret;
diff --git a/kernel/cpu.c b/kernel/cpu.c
index aa7fe85ad62e..0097acec1c71 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -607,15 +607,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
bool bringup = st->bringup;
enum cpuhp_state state;
+ if (WARN_ON_ONCE(!st->should_run))
+ return;
+
/*
* ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
* that if we see ->should_run we also see the rest of the state.
*/
smp_mb();
- if (WARN_ON_ONCE(!st->should_run))
- return;
-
cpuhp_lock_acquire(bringup);
if (st->single) {
@@ -916,7 +916,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
if (ret) {
st->target = prev_state;
- undo_cpu_down(cpu, st);
+ if (st->state < prev_state)
+ undo_cpu_down(cpu, st);
break;
}
}
@@ -969,7 +970,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
* to do the further cleanups.
*/
ret = cpuhp_down_callbacks(cpu, st, target);
- if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
+ if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
cpuhp_reset_state(st, prev_state);
__cpuhp_kick_ap(st);
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2a62b96600ad..5a97f34bc14c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2867,16 +2867,11 @@ static int perf_event_modify_breakpoint(struct perf_event *bp,
_perf_event_disable(bp);
err = modify_user_hw_breakpoint_check(bp, attr, true);
- if (err) {
- if (!bp->attr.disabled)
- _perf_event_enable(bp);
- return err;
- }
-
- if (!attr->disabled)
+ if (!bp->attr.disabled)
_perf_event_enable(bp);
- return 0;
+
+ return err;
}
static int perf_event_modify_attr(struct perf_event *event,
@@ -3940,6 +3935,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
goto out;
}
+ /* If this is a pinned event it must be running on this CPU */
+ if (event->attr.pinned && event->oncpu != smp_processor_id()) {
+ ret = -EBUSY;
+ goto out;
+ }
+
/*
* If the event is currently on this CPU, its either a per-task event,
* or local to this CPU. Furthermore it means its ACTIVE (otherwise
@@ -5948,6 +5949,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
unsigned long sp;
unsigned int rem;
u64 dyn_size;
+ mm_segment_t fs;
/*
* We dump:
@@ -5965,7 +5967,10 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
/* Data. */
sp = perf_user_stack_pointer(regs);
+ fs = get_fs();
+ set_fs(USER_DS);
rem = __output_copy_user(handle, (void *) sp, dump_size);
+ set_fs(fs);
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
@@ -8309,6 +8314,8 @@ void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
goto unlock;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ if (event->cpu != smp_processor_id())
+ continue;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
continue;
if (event->attr.config != entry->type)
@@ -9426,9 +9433,7 @@ static void free_pmu_context(struct pmu *pmu)
if (pmu->task_ctx_nr > perf_invalid_context)
return;
- mutex_lock(&pmus_lock);
free_percpu(pmu->pmu_cpu_context);
- mutex_unlock(&pmus_lock);
}
/*
@@ -9684,12 +9689,8 @@ EXPORT_SYMBOL_GPL(perf_pmu_register);
void perf_pmu_unregister(struct pmu *pmu)
{
- int remove_device;
-
mutex_lock(&pmus_lock);
- remove_device = pmu_bus_running;
list_del_rcu(&pmu->entry);
- mutex_unlock(&pmus_lock);
/*
* We dereference the pmu list under both SRCU and regular RCU, so
@@ -9701,13 +9702,14 @@ void perf_pmu_unregister(struct pmu *pmu)
free_percpu(pmu->pmu_disable_count);
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
- if (remove_device) {
+ if (pmu_bus_running) {
if (pmu->nr_addr_filters)
device_remove_file(pmu->dev, &dev_attr_nr_addr_filters);
device_del(pmu->dev);
put_device(pmu->dev);
}
free_pmu_context(pmu);
+ mutex_unlock(&pmus_lock);
}
EXPORT_SYMBOL_GPL(perf_pmu_unregister);
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index b3814fce5ecb..d6b56180827c 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -509,6 +509,8 @@ modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *a
*/
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
+ int err;
+
/*
* modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
* will not be possible to raise IPIs that invoke __perf_event_disable.
@@ -520,15 +522,12 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
else
perf_event_disable(bp);
- if (!attr->disabled) {
- int err = modify_user_hw_breakpoint_check(bp, attr, false);
+ err = modify_user_hw_breakpoint_check(bp, attr, false);
- if (err)
- return err;
+ if (!bp->attr.disabled)
perf_event_enable(bp);
- bp->attr.disabled = 0;
- }
- return 0;
+
+ return err;
}
EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 01ebdf1f9f40..2e62503bea0d 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -678,7 +678,7 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
case MODULE_STATE_COMING:
ret = jump_label_add_module(mod);
if (ret) {
- WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
+ WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
jump_label_del_module(mod);
}
break;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e406c5fdb41e..dd13f865ad40 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -55,7 +55,6 @@
#include "lockdep_internals.h"
-#include <trace/events/preemptirq.h>
#define CREATE_TRACE_POINTS
#include <trace/events/lock.h>
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 1a81a1257b3f..3f8a35104285 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -389,7 +389,7 @@ static bool __ww_mutex_wound(struct mutex *lock,
/*
* wake_up_process() paired with set_current_state()
* inserts sufficient barriers to make sure @owner either sees
- * it's wounded in __ww_mutex_lock_check_stamp() or has a
+ * it's wounded in __ww_mutex_check_kill() or has a
* wakeup pending to re-read the wounded state.
*/
if (owner != current)
@@ -946,7 +946,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
}
debug_mutex_lock_common(lock, &waiter);
- debug_mutex_add_waiter(lock, &waiter, current);
lock_contended(&lock->dep_map, ip);
diff --git a/kernel/locking/test-ww_mutex.c b/kernel/locking/test-ww_mutex.c
index 5b915b370d5a..65a3b7e55b9f 100644
--- a/kernel/locking/test-ww_mutex.c
+++ b/kernel/locking/test-ww_mutex.c
@@ -260,7 +260,7 @@ static void test_cycle_work(struct work_struct *work)
{
struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
struct ww_acquire_ctx ctx;
- int err;
+ int err, erra = 0;
ww_acquire_init(&ctx, &ww_class);
ww_mutex_lock(&cycle->a_mutex, &ctx);
@@ -270,17 +270,19 @@ static void test_cycle_work(struct work_struct *work)
err = ww_mutex_lock(cycle->b_mutex, &ctx);
if (err == -EDEADLK) {
+ err = 0;
ww_mutex_unlock(&cycle->a_mutex);
ww_mutex_lock_slow(cycle->b_mutex, &ctx);
- err = ww_mutex_lock(&cycle->a_mutex, &ctx);
+ erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
}
if (!err)
ww_mutex_unlock(cycle->b_mutex);
- ww_mutex_unlock(&cycle->a_mutex);
+ if (!erra)
+ ww_mutex_unlock(&cycle->a_mutex);
ww_acquire_fini(&ctx);
- cycle->result = err;
+ cycle->result = err ?: erra;
}
static int __test_cycle(unsigned int nthreads)
@@ -324,7 +326,7 @@ static int __test_cycle(unsigned int nthreads)
if (!cycle->result)
continue;
- pr_err("cylic deadlock not resolved, ret[%d/%d] = %d\n",
+ pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
n, nthreads, cycle->result);
ret = -EINVAL;
break;
diff --git a/kernel/pid.c b/kernel/pid.c
index de1cfc4f75a2..cdf63e53a014 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -195,7 +195,7 @@ struct pid *alloc_pid(struct pid_namespace *ns)
idr_preload_end();
if (nr < 0) {
- retval = nr;
+ retval = (nr == -ENOSPC) ? -EAGAIN : nr;
goto out_free;
}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 5342f6fc022e..0bd595a0b610 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -63,6 +63,12 @@ static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head);
enum s2idle_states __read_mostly s2idle_state;
static DEFINE_RAW_SPINLOCK(s2idle_lock);
+bool pm_suspend_via_s2idle(void)
+{
+ return mem_sleep_current == PM_SUSPEND_TO_IDLE;
+}
+EXPORT_SYMBOL_GPL(pm_suspend_via_s2idle);
+
void s2idle_set_ops(const struct platform_s2idle_ops *ops)
{
lock_system_sleep();
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index fd6f8ed28e01..9bf5404397e0 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -351,7 +351,6 @@ static int console_msg_format = MSG_FORMAT_DEFAULT;
*/
enum log_flags {
- LOG_NOCONS = 1, /* suppress print, do not print to console */
LOG_NEWLINE = 2, /* text ended with a newline */
LOG_PREFIX = 4, /* text started with a prefix */
LOG_CONT = 8, /* text is a fragment of a continuation line */
@@ -1881,9 +1880,6 @@ int vprintk_store(int facility, int level,
if (dict)
lflags |= LOG_PREFIX|LOG_NEWLINE;
- if (suppress_message_printing(level))
- lflags |= LOG_NOCONS;
-
return log_output(facility, level, lflags,
dict, dictlen, text, text_len);
}
@@ -2032,6 +2028,7 @@ static void call_console_drivers(const char *ext_text, size_t ext_len,
const char *text, size_t len) {}
static size_t msg_print_text(const struct printk_log *msg,
bool syslog, char *buf, size_t size) { return 0; }
+static bool suppress_message_printing(int level) { return false; }
#endif /* CONFIG_PRINTK */
@@ -2368,10 +2365,11 @@ skip:
break;
msg = log_from_idx(console_idx);
- if (msg->flags & LOG_NOCONS) {
+ if (suppress_message_printing(msg->level)) {
/*
- * Skip record if !ignore_loglevel, and
- * record has level above the console loglevel.
+ * Skip record we have buffered and already printed
+ * directly to the console when we received it, and
+ * record that has level above the console loglevel.
*/
console_idx = log_next(console_idx);
console_seq++;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 625bc9897f62..ad97f3ba5ec5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1167,7 +1167,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (task_cpu(p) != new_cpu) {
if (p->sched_class->migrate_task_rq)
- p->sched_class->migrate_task_rq(p);
+ p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
rseq_migrate(p);
perf_event_task_migrate(p);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 997ea7b839fa..91e4202b0634 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1607,7 +1607,7 @@ out:
return cpu;
}
-static void migrate_task_rq_dl(struct task_struct *p)
+static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
{
struct rq *rq;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 60caf1fb94e0..6383aa6a60ca 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -89,12 +89,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
static void sched_feat_disable(int i)
{
- static_key_disable(&sched_feat_keys[i]);
+ static_key_disable_cpuslocked(&sched_feat_keys[i]);
}
static void sched_feat_enable(int i)
{
- static_key_enable(&sched_feat_keys[i]);
+ static_key_enable_cpuslocked(&sched_feat_keys[i]);
}
#else
static void sched_feat_disable(int i) { };
@@ -146,9 +146,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
/* Ensure the static_key remains in a consistent state */
inode = file_inode(filp);
+ cpus_read_lock();
inode_lock(inode);
ret = sched_feat_set(cmp);
inode_unlock(inode);
+ cpus_read_unlock();
if (ret < 0)
return ret;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b39fb596f6c1..908c9cdae2f0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1392,6 +1392,17 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
int last_cpupid, this_cpupid;
this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
+ last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
+
+ /*
+ * Allow first faults or private faults to migrate immediately early in
+ * the lifetime of a task. The magic number 4 is based on waiting for
+ * two full passes of the "multi-stage node selection" test that is
+ * executed below.
+ */
+ if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) &&
+ (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid)))
+ return true;
/*
* Multi-stage node selection is used in conjunction with a periodic
@@ -1410,7 +1421,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
* This quadric squishes small probabilities, making it less likely we
* act on an unlikely task<->page relation.
*/
- last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
if (!cpupid_pid_unset(last_cpupid) &&
cpupid_to_nid(last_cpupid) != dst_nid)
return false;
@@ -1514,6 +1524,21 @@ struct task_numa_env {
static void task_numa_assign(struct task_numa_env *env,
struct task_struct *p, long imp)
{
+ struct rq *rq = cpu_rq(env->dst_cpu);
+
+ /* Bail out if run-queue part of active NUMA balance. */
+ if (xchg(&rq->numa_migrate_on, 1))
+ return;
+
+ /*
+ * Clear previous best_cpu/rq numa-migrate flag, since task now
+ * found a better CPU to move/swap.
+ */
+ if (env->best_cpu != -1) {
+ rq = cpu_rq(env->best_cpu);
+ WRITE_ONCE(rq->numa_migrate_on, 0);
+ }
+
if (env->best_task)
put_task_struct(env->best_task);
if (p)
@@ -1553,6 +1578,13 @@ static bool load_too_imbalanced(long src_load, long dst_load,
}
/*
+ * Maximum NUMA importance can be 1998 (2*999);
+ * SMALLIMP @ 30 would be close to 1998/64.
+ * Used to deter task migration.
+ */
+#define SMALLIMP 30
+
+/*
* This checks if the overall compute and NUMA accesses of the system would
* be improved if the source tasks was migrated to the target dst_cpu taking
* into account that it might be best if task running on the dst_cpu should
@@ -1569,6 +1601,9 @@ static void task_numa_compare(struct task_numa_env *env,
long moveimp = imp;
int dist = env->dist;
+ if (READ_ONCE(dst_rq->numa_migrate_on))
+ return;
+
rcu_read_lock();
cur = task_rcu_dereference(&dst_rq->curr);
if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
@@ -1582,7 +1617,7 @@ static void task_numa_compare(struct task_numa_env *env,
goto unlock;
if (!cur) {
- if (maymove || imp > env->best_imp)
+ if (maymove && moveimp >= env->best_imp)
goto assign;
else
goto unlock;
@@ -1625,16 +1660,22 @@ static void task_numa_compare(struct task_numa_env *env,
task_weight(cur, env->dst_nid, dist);
}
- if (imp <= env->best_imp)
- goto unlock;
-
if (maymove && moveimp > imp && moveimp > env->best_imp) {
- imp = moveimp - 1;
+ imp = moveimp;
cur = NULL;
goto assign;
}
/*
+ * If the NUMA importance is less than SMALLIMP,
+ * task migration might only result in ping pong
+ * of tasks and also hurt performance due to cache
+ * misses.
+ */
+ if (imp < SMALLIMP || imp <= env->best_imp + SMALLIMP / 2)
+ goto unlock;
+
+ /*
* In the overloaded case, try and keep the load balanced.
*/
load = task_h_load(env->p) - task_h_load(cur);
@@ -1710,6 +1751,7 @@ static int task_numa_migrate(struct task_struct *p)
.best_cpu = -1,
};
struct sched_domain *sd;
+ struct rq *best_rq;
unsigned long taskweight, groupweight;
int nid, ret, dist;
long taskimp, groupimp;
@@ -1805,20 +1847,17 @@ static int task_numa_migrate(struct task_struct *p)
if (env.best_cpu == -1)
return -EAGAIN;
- /*
- * Reset the scan period if the task is being rescheduled on an
- * alternative node to recheck if the tasks is now properly placed.
- */
- p->numa_scan_period = task_scan_start(p);
-
+ best_rq = cpu_rq(env.best_cpu);
if (env.best_task == NULL) {
ret = migrate_task_to(p, env.best_cpu);
+ WRITE_ONCE(best_rq->numa_migrate_on, 0);
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
return ret;
}
ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
+ WRITE_ONCE(best_rq->numa_migrate_on, 0);
if (ret != 0)
trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
@@ -2596,6 +2635,39 @@ void task_tick_numa(struct rq *rq, struct task_struct *curr)
}
}
+static void update_scan_period(struct task_struct *p, int new_cpu)
+{
+ int src_nid = cpu_to_node(task_cpu(p));
+ int dst_nid = cpu_to_node(new_cpu);
+
+ if (!static_branch_likely(&sched_numa_balancing))
+ return;
+
+ if (!p->mm || !p->numa_faults || (p->flags & PF_EXITING))
+ return;
+
+ if (src_nid == dst_nid)
+ return;
+
+ /*
+ * Allow resets if faults have been trapped before one scan
+ * has completed. This is most likely due to a new task that
+ * is pulled cross-node due to wakeups or load balancing.
+ */
+ if (p->numa_scan_seq) {
+ /*
+ * Avoid scan adjustments if moving to the preferred
+ * node or if the task was not previously running on
+ * the preferred node.
+ */
+ if (dst_nid == p->numa_preferred_nid ||
+ (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid))
+ return;
+ }
+
+ p->numa_scan_period = task_scan_start(p);
+}
+
#else
static void task_tick_numa(struct rq *rq, struct task_struct *curr)
{
@@ -2609,6 +2681,10 @@ static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
{
}
+static inline void update_scan_period(struct task_struct *p, int new_cpu)
+{
+}
+
#endif /* CONFIG_NUMA_BALANCING */
static void
@@ -3362,6 +3438,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
* attach_entity_load_avg - attach this entity to its cfs_rq load avg
* @cfs_rq: cfs_rq to attach to
* @se: sched_entity to attach
+ * @flags: migration hints
*
* Must call update_cfs_rq_load_avg() before this, since we rely on
* cfs_rq->avg.last_update_time being current.
@@ -3924,7 +4001,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* put back on, and if we advance min_vruntime, we'll be placed back
* further than we started -- ie. we'll be penalized.
*/
- if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
+ if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE)
update_min_vruntime(cfs_rq);
}
@@ -4399,9 +4476,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
/*
* Add to the _head_ of the list, so that an already-started
- * distribute_cfs_runtime will not see us
+ * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
+ * not running add to the tail so that later runqueues don't get starved.
*/
- list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+ if (cfs_b->distribute_running)
+ list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+ else
+ list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
/*
* If we're the first throttled task, make sure the bandwidth
@@ -4545,14 +4626,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
* in us over-using our runtime if it is all used during this loop, but
* only by limited amounts in that extreme case.
*/
- while (throttled && cfs_b->runtime > 0) {
+ while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
runtime = cfs_b->runtime;
+ cfs_b->distribute_running = 1;
raw_spin_unlock(&cfs_b->lock);
/* we can't nest cfs_b->lock while distributing bandwidth */
runtime = distribute_cfs_runtime(cfs_b, runtime,
runtime_expires);
raw_spin_lock(&cfs_b->lock);
+ cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
cfs_b->runtime -= min(runtime, cfs_b->runtime);
@@ -4663,6 +4746,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
/* confirm we're still not at a refresh boundary */
raw_spin_lock(&cfs_b->lock);
+ if (cfs_b->distribute_running) {
+ raw_spin_unlock(&cfs_b->lock);
+ return;
+ }
+
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
raw_spin_unlock(&cfs_b->lock);
return;
@@ -4672,6 +4760,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
runtime = cfs_b->runtime;
expires = cfs_b->runtime_expires;
+ if (runtime)
+ cfs_b->distribute_running = 1;
+
raw_spin_unlock(&cfs_b->lock);
if (!runtime)
@@ -4682,6 +4773,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
raw_spin_lock(&cfs_b->lock);
if (expires == cfs_b->runtime_expires)
cfs_b->runtime -= min(runtime, cfs_b->runtime);
+ cfs_b->distribute_running = 0;
raw_spin_unlock(&cfs_b->lock);
}
@@ -4790,6 +4882,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b->period_timer.function = sched_cfs_period_timer;
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cfs_b->slack_timer.function = sched_cfs_slack_timer;
+ cfs_b->distribute_running = 0;
}
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
@@ -6274,7 +6367,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se);
* cfs_rq_of(p) references at time of call are still valid and identify the
* previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
*/
-static void migrate_task_rq_fair(struct task_struct *p)
+static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
{
/*
* As blocked tasks retain absolute vruntime the migration needs to
@@ -6327,6 +6420,8 @@ static void migrate_task_rq_fair(struct task_struct *p)
/* We have migrated, no longer consider this task hot */
p->se.exec_start = 0;
+
+ update_scan_period(p, new_cpu);
}
static void task_dead_fair(struct task_struct *p)
@@ -7263,6 +7358,7 @@ static void update_blocked_averages(int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq, *pos;
+ const struct sched_class *curr_class;
struct rq_flags rf;
bool done = true;
@@ -7299,8 +7395,10 @@ static void update_blocked_averages(int cpu)
if (cfs_rq_has_blocked(cfs_rq))
done = false;
}
- update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
- update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
+ curr_class = rq->curr->sched_class;
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);
/* Don't need periodic decay once load/util_avg are null */
if (others_have_blocked(rq))
@@ -7365,13 +7463,16 @@ static inline void update_blocked_averages(int cpu)
{
struct rq *rq = cpu_rq(cpu);
struct cfs_rq *cfs_rq = &rq->cfs;
+ const struct sched_class *curr_class;
struct rq_flags rf;
rq_lock_irqsave(rq, &rf);
update_rq_clock(rq);
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
- update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
- update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
+
+ curr_class = rq->curr->sched_class;
+ update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
+ update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
update_irq_load_avg(rq, 0);
#ifdef CONFIG_NO_HZ_COMMON
rq->last_blocked_load_update_tick = jiffies;
@@ -7482,10 +7583,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
return load_idx;
}
-static unsigned long scale_rt_capacity(int cpu)
+static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
- unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
+ unsigned long max = arch_scale_cpu_capacity(sd, cpu);
unsigned long used, free;
unsigned long irq;
@@ -7507,7 +7608,7 @@ static unsigned long scale_rt_capacity(int cpu)
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
- unsigned long capacity = scale_rt_capacity(cpu);
+ unsigned long capacity = scale_rt_capacity(sd, cpu);
struct sched_group *sdg = sd->groups;
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
@@ -8269,7 +8370,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
force_balance:
/* Looks like there is an imbalance. Compute it */
calculate_imbalance(env, &sds);
- return sds.busiest;
+ return env->imbalance ? sds.busiest : NULL;
out_balanced:
env->imbalance = 0;
@@ -9638,7 +9739,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
* - A task which has been woken up by try_to_wake_up() and
* waiting for actually being woken up by sched_ttwu_pending().
*/
- if (!se->sum_exec_runtime || p->state == TASK_WAKING)
+ if (!se->sum_exec_runtime ||
+ (p->state == TASK_WAKING && p->sched_remote_wakeup))
return true;
return false;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 4a2e8cae63c4..9683f458aec7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -346,6 +346,8 @@ struct cfs_bandwidth {
int nr_periods;
int nr_throttled;
u64 throttled_time;
+
+ bool distribute_running;
#endif
};
@@ -783,6 +785,7 @@ struct rq {
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
unsigned int nr_preferred_running;
+ unsigned int numa_migrate_on;
#endif
#define CPU_LOAD_IDX_MAX 5
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
@@ -1523,7 +1526,7 @@ struct sched_class {
#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
- void (*migrate_task_rq)(struct task_struct *p);
+ void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 56a0fed30c0a..505a41c42b96 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1295,7 +1295,7 @@ static void init_numa_topology_type(void)
n = sched_max_numa_distance;
- if (sched_domains_numa_levels <= 1) {
+ if (sched_domains_numa_levels <= 2) {
sched_numa_topology_type = NUMA_DIRECT;
return;
}
@@ -1380,9 +1380,6 @@ void sched_init_numa(void)
break;
}
- if (!level)
- return;
-
/*
* 'level' contains the number of unique distances
*
diff --git a/kernel/signal.c b/kernel/signal.c
index 5843c541fda9..e4aad0e90882 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3460,7 +3460,8 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
}
static int
-do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
+do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
+ size_t min_ss_size)
{
struct task_struct *t = current;
@@ -3490,7 +3491,7 @@ do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
ss_size = 0;
ss_sp = NULL;
} else {
- if (unlikely(ss_size < MINSIGSTKSZ))
+ if (unlikely(ss_size < min_ss_size))
return -ENOMEM;
}
@@ -3508,7 +3509,8 @@ SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
return -EFAULT;
err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
- current_user_stack_pointer());
+ current_user_stack_pointer(),
+ MINSIGSTKSZ);
if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
err = -EFAULT;
return err;
@@ -3519,7 +3521,8 @@ int restore_altstack(const stack_t __user *uss)
stack_t new;
if (copy_from_user(&new, uss, sizeof(stack_t)))
return -EFAULT;
- (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
+ (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
+ MINSIGSTKSZ);
/* squash all but EFAULT for now */
return 0;
}
@@ -3553,7 +3556,8 @@ static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
uss.ss_size = uss32.ss_size;
}
ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
- compat_user_stack_pointer());
+ compat_user_stack_pointer(),
+ COMPAT_MINSIGSTKSZ);
if (ret >= 0 && uoss_ptr) {
compat_stack_t old;
memset(&old, 0, sizeof(old));
diff --git a/kernel/sys.c b/kernel/sys.c
index cf5c67533ff1..123bd73046ec 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -71,9 +71,6 @@
#include <asm/io.h>
#include <asm/unistd.h>
-/* Hardening for Spectre-v1 */
-#include <linux/nospec.h>
-
#include "uid16.h"
#ifndef SET_UNALIGN_CTL
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index f74fb00d8064..0e6e97a01942 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -133,19 +133,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
spin_unlock_irqrestore(&watchdog_lock, *flags);
}
+static int clocksource_watchdog_kthread(void *data);
+static void __clocksource_change_rating(struct clocksource *cs, int rating);
+
/*
* Interval: 0.5sec Threshold: 0.0625s
*/
#define WATCHDOG_INTERVAL (HZ >> 1)
#define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
+static void clocksource_watchdog_work(struct work_struct *work)
+{
+ /*
+ * We cannot directly run clocksource_watchdog_kthread() here, because
+ * clocksource_select() calls timekeeping_notify() which uses
+ * stop_machine(). One cannot use stop_machine() from a workqueue() due
+ * lock inversions wrt CPU hotplug.
+ *
+ * Also, we only ever run this work once or twice during the lifetime
+ * of the kernel, so there is no point in creating a more permanent
+ * kthread for this.
+ *
+ * If kthread_run fails the next watchdog scan over the
+ * watchdog_list will find the unstable clock again.
+ */
+ kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
+}
+
static void __clocksource_unstable(struct clocksource *cs)
{
cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
cs->flags |= CLOCK_SOURCE_UNSTABLE;
/*
- * If the clocksource is registered clocksource_watchdog_work() will
+ * If the clocksource is registered clocksource_watchdog_kthread() will
* re-rate and re-select.
*/
if (list_empty(&cs->list)) {
@@ -156,7 +177,7 @@ static void __clocksource_unstable(struct clocksource *cs)
if (cs->mark_unstable)
cs->mark_unstable(cs);
- /* kick clocksource_watchdog_work() */
+ /* kick clocksource_watchdog_kthread() */
if (finished_booting)
schedule_work(&watchdog_work);
}
@@ -166,7 +187,7 @@ static void __clocksource_unstable(struct clocksource *cs)
* @cs: clocksource to be marked unstable
*
* This function is called by the x86 TSC code to mark clocksources as unstable;
- * it defers demotion and re-selection to a work.
+ * it defers demotion and re-selection to a kthread.
*/
void clocksource_mark_unstable(struct clocksource *cs)
{
@@ -391,9 +412,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
}
}
-static void __clocksource_change_rating(struct clocksource *cs, int rating);
-
-static int __clocksource_watchdog_work(void)
+static int __clocksource_watchdog_kthread(void)
{
struct clocksource *cs, *tmp;
unsigned long flags;
@@ -418,12 +437,13 @@ static int __clocksource_watchdog_work(void)
return select;
}
-static void clocksource_watchdog_work(struct work_struct *work)
+static int clocksource_watchdog_kthread(void *data)
{
mutex_lock(&clocksource_mutex);
- if (__clocksource_watchdog_work())
+ if (__clocksource_watchdog_kthread())
clocksource_select();
mutex_unlock(&clocksource_mutex);
+ return 0;
}
static bool clocksource_is_watchdog(struct clocksource *cs)
@@ -442,7 +462,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
static void clocksource_select_watchdog(bool fallback) { }
static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
static inline void clocksource_resume_watchdog(void) { }
-static inline int __clocksource_watchdog_work(void) { return 0; }
+static inline int __clocksource_watchdog_kthread(void) { return 0; }
static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
void clocksource_mark_unstable(struct clocksource *cs) { }
@@ -810,7 +830,7 @@ static int __init clocksource_done_booting(void)
/*
* Run the watchdog first to eliminate unstable clock sources
*/
- __clocksource_watchdog_work();
+ __clocksource_watchdog_kthread();
clocksource_select();
mutex_unlock(&clocksource_mutex);
return 0;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 2868d85f1fb1..fac0ddf8a8e2 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -764,9 +764,9 @@ blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
return NULL;
- if (!bio->bi_css)
+ if (!bio->bi_blkg)
return NULL;
- return cgroup_get_kernfs_id(bio->bi_css->cgroup);
+ return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
}
#else
static union kernfs_node_id *
diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
index f704390db9fc..d8765c952fab 100644
--- a/kernel/trace/preemptirq_delay_test.c
+++ b/kernel/trace/preemptirq_delay_test.c
@@ -5,12 +5,12 @@
* Copyright (C) 2018 Joel Fernandes (Google) <joel@joelfernandes.org>
*/
+#include <linux/trace_clock.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
-#include <linux/ktime.h>
#include <linux/module.h>
#include <linux/printk.h>
#include <linux/string.h>
@@ -25,13 +25,13 @@ MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt or irq (default ir
static void busy_wait(ulong time)
{
- ktime_t start, end;
- start = ktime_get();
+ u64 start, end;
+ start = trace_clock_local();
do {
- end = ktime_get();
+ end = trace_clock_local();
if (kthread_should_stop())
break;
- } while (ktime_to_ns(ktime_sub(end, start)) < (time * 1000));
+ } while ((end - start) < (time * 1000));
}
static int preemptirq_delay_run(void *data)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 1d92d4a982fd..65bd4616220d 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1546,6 +1546,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
tmp_iter_page = first_page;
do {
+ cond_resched();
+
to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 85f6b01431c7..d239004aaf29 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -738,16 +738,30 @@ static void free_synth_field(struct synth_field *field)
kfree(field);
}
-static struct synth_field *parse_synth_field(char *field_type,
- char *field_name)
+static struct synth_field *parse_synth_field(int argc, char **argv,
+ int *consumed)
{
struct synth_field *field;
+ const char *prefix = NULL;
+ char *field_type = argv[0], *field_name;
int len, ret = 0;
char *array;
if (field_type[0] == ';')
field_type++;
+ if (!strcmp(field_type, "unsigned")) {
+ if (argc < 3)
+ return ERR_PTR(-EINVAL);
+ prefix = "unsigned ";
+ field_type = argv[1];
+ field_name = argv[2];
+ *consumed = 3;
+ } else {
+ field_name = argv[1];
+ *consumed = 2;
+ }
+
len = strlen(field_name);
if (field_name[len - 1] == ';')
field_name[len - 1] = '\0';
@@ -760,11 +774,15 @@ static struct synth_field *parse_synth_field(char *field_type,
array = strchr(field_name, '[');
if (array)
len += strlen(array);
+ if (prefix)
+ len += strlen(prefix);
field->type = kzalloc(len, GFP_KERNEL);
if (!field->type) {
ret = -ENOMEM;
goto free;
}
+ if (prefix)
+ strcat(field->type, prefix);
strcat(field->type, field_type);
if (array) {
strcat(field->type, array);
@@ -1009,7 +1027,7 @@ static int create_synth_event(int argc, char **argv)
struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
struct synth_event *event = NULL;
bool delete_event = false;
- int i, n_fields = 0, ret = 0;
+ int i, consumed = 0, n_fields = 0, ret = 0;
char *name;
mutex_lock(&synth_event_mutex);
@@ -1061,16 +1079,16 @@ static int create_synth_event(int argc, char **argv)
goto err;
}
- field = parse_synth_field(argv[i], argv[i + 1]);
+ field = parse_synth_field(argc - i, &argv[i], &consumed);
if (IS_ERR(field)) {
ret = PTR_ERR(field);
goto err;
}
- fields[n_fields] = field;
- i++; n_fields++;
+ fields[n_fields++] = field;
+ i += consumed - 1;
}
- if (i < argc) {
+ if (i < argc && strcmp(argv[i], ";") != 0) {
ret = -EINVAL;
goto err;
}
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index bf2c06ef9afc..a3be42304485 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -28,8 +28,8 @@
#include <linux/sched/task.h>
#include <linux/static_key.h>
-extern struct tracepoint * const __start___tracepoints_ptrs[];
-extern struct tracepoint * const __stop___tracepoints_ptrs[];
+extern tracepoint_ptr_t __start___tracepoints_ptrs[];
+extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
DEFINE_SRCU(tracepoint_srcu);
EXPORT_SYMBOL_GPL(tracepoint_srcu);
@@ -371,25 +371,17 @@ int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
}
EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
-static void for_each_tracepoint_range(struct tracepoint * const *begin,
- struct tracepoint * const *end,
+static void for_each_tracepoint_range(
+ tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
void (*fct)(struct tracepoint *tp, void *priv),
void *priv)
{
+ tracepoint_ptr_t *iter;
+
if (!begin)
return;
-
- if (IS_ENABLED(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)) {
- const int *iter;
-
- for (iter = (const int *)begin; iter < (const int *)end; iter++)
- fct(offset_to_ptr(iter), priv);
- } else {
- struct tracepoint * const *iter;
-
- for (iter = begin; iter < end; iter++)
- fct(*iter, priv);
- }
+ for (iter = begin; iter < end; iter++)
+ fct(tracepoint_ptr_deref(iter), priv);
}
#ifdef CONFIG_MODULES
diff --git a/lib/Makefile b/lib/Makefile
index ca3f7ebb900d..423876446810 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -119,7 +119,6 @@ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/
obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
obj-$(CONFIG_BCH) += bch.o
-CFLAGS_bch.o := $(call cc-option,-Wframe-larger-than=4500)
obj-$(CONFIG_LZO_COMPRESS) += lzo/
obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
obj-$(CONFIG_LZ4_COMPRESS) += lz4/
diff --git a/lib/bch.c b/lib/bch.c
index 7b0f2006698b..5db6d3a4c8a6 100644
--- a/lib/bch.c
+++ b/lib/bch.c
@@ -79,20 +79,19 @@
#define GF_T(_p) (CONFIG_BCH_CONST_T)
#define GF_N(_p) ((1 << (CONFIG_BCH_CONST_M))-1)
#define BCH_MAX_M (CONFIG_BCH_CONST_M)
+#define BCH_MAX_T (CONFIG_BCH_CONST_T)
#else
#define GF_M(_p) ((_p)->m)
#define GF_T(_p) ((_p)->t)
#define GF_N(_p) ((_p)->n)
-#define BCH_MAX_M 15
+#define BCH_MAX_M 15 /* 2KB */
+#define BCH_MAX_T 64 /* 64 bit correction */
#endif
-#define BCH_MAX_T (((1 << BCH_MAX_M) - 1) / BCH_MAX_M)
-
#define BCH_ECC_WORDS(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 32)
#define BCH_ECC_BYTES(_p) DIV_ROUND_UP(GF_M(_p)*GF_T(_p), 8)
#define BCH_ECC_MAX_WORDS DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 32)
-#define BCH_ECC_MAX_BYTES DIV_ROUND_UP(BCH_MAX_M * BCH_MAX_T, 8)
#ifndef dbg
#define dbg(_fmt, args...) do {} while (0)
@@ -202,6 +201,9 @@ void encode_bch(struct bch_control *bch, const uint8_t *data,
const uint32_t * const tab3 = tab2 + 256*(l+1);
const uint32_t *pdata, *p0, *p1, *p2, *p3;
+ if (WARN_ON(r_bytes > sizeof(r)))
+ return;
+
if (ecc) {
/* load ecc parity bytes into internal 32-bit buffer */
load_ecc8(bch, bch->ecc_buf, ecc);
@@ -1285,6 +1287,13 @@ struct bch_control *init_bch(int m, int t, unsigned int prim_poly)
*/
goto fail;
+ if (t > BCH_MAX_T)
+ /*
+ * we can support larger than 64 bits if necessary, at the
+ * cost of higher stack usage.
+ */
+ goto fail;
+
/* sanity checks */
if ((t < 1) || (m*t >= ((1 << m)-1)))
/* invalid t value */
diff --git a/lib/crc32.c b/lib/crc32.c
index a6c9afafc8c8..45b1d67a1767 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -183,21 +183,21 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
}
#if CRC_LE_BITS == 1
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len, NULL, CRC32C_POLY_LE);
}
#else
-u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak crc32_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
(const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
}
-u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len)
{
return crc32_le_generic(crc, p, len,
(const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
@@ -206,6 +206,9 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(__crc32c_le);
+u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le);
+u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le);
+
/*
* This multiplies the polynomials x and y modulo the given modulus.
* This follows the "little-endian" CRC convention that the lsbit
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 9f96fa7bc000..de10b8c0bff6 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -356,11 +356,35 @@ EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
*/
void percpu_ref_reinit(struct percpu_ref *ref)
{
+ WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+
+ percpu_ref_resurrect(ref);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+
+/**
+ * percpu_ref_resurrect - modify a percpu refcount from dead to live
+ * @ref: perpcu_ref to resurrect
+ *
+ * Modify @ref so that it's in the same state as before percpu_ref_kill() was
+ * called. @ref must be dead but must not yet have exited.
+ *
+ * If @ref->release() frees @ref then the caller is responsible for
+ * guaranteeing that @ref->release() does not get called while this
+ * function is in progress.
+ *
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
+ */
+void percpu_ref_resurrect(struct percpu_ref *ref)
+{
+ unsigned long __percpu *percpu_count;
unsigned long flags;
spin_lock_irqsave(&percpu_ref_switch_lock, flags);
- WARN_ON_ONCE(!percpu_ref_is_zero(ref));
+ WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD));
+ WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count));
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref);
@@ -368,4 +392,4 @@ void percpu_ref_reinit(struct percpu_ref *ref)
spin_unlock_irqrestore(&percpu_ref_switch_lock, flags);
}
-EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+EXPORT_SYMBOL_GPL(percpu_ref_resurrect);
diff --git a/lib/test_ida.c b/lib/test_ida.c
index 2d1637d8136b..b06880625961 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -150,10 +150,10 @@ static void ida_check_conv(struct ida *ida)
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
+static DEFINE_IDA(ida);
+
static int ida_checks(void)
{
- DEFINE_IDA(ida);
-
IDA_BUG_ON(&ida, !ida_is_empty(&ida));
ida_check_alloc(&ida);
ida_check_destroy(&ida);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d5b3a3f95c01..812e59e13fe6 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2794,7 +2794,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
copy = end - str;
memcpy(str, args, copy);
str += len;
- args += len;
+ args += len + 1;
}
}
if (process)
diff --git a/lib/xz/xz_crc32.c b/lib/xz/xz_crc32.c
index 25a5d87e2e4c..912aae5fa09e 100644
--- a/lib/xz/xz_crc32.c
+++ b/lib/xz/xz_crc32.c
@@ -15,7 +15,6 @@
* but they are bigger and use more memory for the lookup table.
*/
-#include <linux/crc32poly.h>
#include "xz_private.h"
/*
diff --git a/lib/xz/xz_private.h b/lib/xz/xz_private.h
index 482b90f363fe..09360ebb510e 100644
--- a/lib/xz/xz_private.h
+++ b/lib/xz/xz_private.h
@@ -102,6 +102,10 @@
# endif
#endif
+#ifndef CRC32_POLY_LE
+#define CRC32_POLY_LE 0xedb88320
+#endif
+
/*
* Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
* before calling xz_dec_lzma2_run().
diff --git a/mm/Kconfig b/mm/Kconfig
index a550635ea5c3..de64ea658716 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT
depends on NO_BOOTMEM
depends on SPARSEMEM
depends on !NEED_PER_CPU_KM
+ depends on 64BIT
help
Ordinarily all struct pages are initialised during early boot in a
single thread. On very large machines this can take a considerable
diff --git a/mm/Makefile b/mm/Makefile
index 8716bdabe1e6..6485d5745dd7 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -23,16 +23,16 @@ KCOV_INSTRUMENT_vmstat.o := n
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
- mlock.o mmap.o mprotect.o mremap.o msync.o \
- page_vma_mapped.o pagewalk.o pgtable-generic.o \
- rmap.o vmalloc.o
+ mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
+ msync.o page_vma_mapped.o pagewalk.o \
+ pgtable-generic.o rmap.o vmalloc.o
ifdef CONFIG_CROSS_MEMORY_ATTACH
mmu-$(CONFIG_MMU) += process_vm_access.o
endif
-obj-y := filemap.o mempool.o oom_kill.o \
+obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
util.o mmzone.o vmstat.o backing-dev.o \
@@ -49,7 +49,6 @@ else
obj-y += bootmem.o
endif
-obj-$(CONFIG_ADVISE_SYSCALLS) += fadvise.o
ifdef CONFIG_MMU
obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o
endif
diff --git a/mm/debug.c b/mm/debug.c
index 38c926520c97..bd10aad8539a 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma);
void dump_mm(const struct mm_struct *mm)
{
- pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
+ pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %px\n"
#endif
@@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm)
"tlb_flush_pending %d\n"
"def_flags: %#lx(%pGv)\n",
- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 2d8376e3c640..467bcd032037 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -27,9 +27,9 @@
* deactivate the pages and clear PG_Referenced.
*/
-int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice)
{
- struct fd f = fdget(fd);
struct inode *inode;
struct address_space *mapping;
struct backing_dev_info *bdi;
@@ -37,22 +37,14 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
pgoff_t start_index;
pgoff_t end_index;
unsigned long nrpages;
- int ret = 0;
-
- if (!f.file)
- return -EBADF;
- inode = file_inode(f.file);
- if (S_ISFIFO(inode->i_mode)) {
- ret = -ESPIPE;
- goto out;
- }
+ inode = file_inode(file);
+ if (S_ISFIFO(inode->i_mode))
+ return -ESPIPE;
- mapping = f.file->f_mapping;
- if (!mapping || len < 0) {
- ret = -EINVAL;
- goto out;
- }
+ mapping = file->f_mapping;
+ if (!mapping || len < 0)
+ return -EINVAL;
bdi = inode_to_bdi(mapping->host);
@@ -67,9 +59,9 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
/* no bad return value, but ignore advice */
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
- goto out;
+ return 0;
}
/*
@@ -85,21 +77,21 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
switch (advice) {
case POSIX_FADV_NORMAL:
- f.file->f_ra.ra_pages = bdi->ra_pages;
- spin_lock(&f.file->f_lock);
- f.file->f_mode &= ~FMODE_RANDOM;
- spin_unlock(&f.file->f_lock);
+ file->f_ra.ra_pages = bdi->ra_pages;
+ spin_lock(&file->f_lock);
+ file->f_mode &= ~FMODE_RANDOM;
+ spin_unlock(&file->f_lock);
break;
case POSIX_FADV_RANDOM:
- spin_lock(&f.file->f_lock);
- f.file->f_mode |= FMODE_RANDOM;
- spin_unlock(&f.file->f_lock);
+ spin_lock(&file->f_lock);
+ file->f_mode |= FMODE_RANDOM;
+ spin_unlock(&file->f_lock);
break;
case POSIX_FADV_SEQUENTIAL:
- f.file->f_ra.ra_pages = bdi->ra_pages * 2;
- spin_lock(&f.file->f_lock);
- f.file->f_mode &= ~FMODE_RANDOM;
- spin_unlock(&f.file->f_lock);
+ file->f_ra.ra_pages = bdi->ra_pages * 2;
+ spin_lock(&file->f_lock);
+ file->f_mode &= ~FMODE_RANDOM;
+ spin_unlock(&file->f_lock);
break;
case POSIX_FADV_WILLNEED:
/* First and last PARTIAL page! */
@@ -115,8 +107,7 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
* Ignore return value because fadvise() shall return
* success even if filesystem can't retrieve a hint,
*/
- force_page_cache_readahead(mapping, f.file, start_index,
- nrpages);
+ force_page_cache_readahead(mapping, file, start_index, nrpages);
break;
case POSIX_FADV_NOREUSE:
break;
@@ -183,9 +174,32 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
}
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-out:
+ return 0;
+}
+
+int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+{
+ if (file->f_op->fadvise)
+ return file->f_op->fadvise(file, offset, len, advice);
+
+ return generic_fadvise(file, offset, len, advice);
+}
+EXPORT_SYMBOL(vfs_fadvise);
+
+#ifdef CONFIG_ADVISE_SYSCALLS
+
+int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+{
+ struct fd f = fdget(fd);
+ int ret;
+
+ if (!f.file)
+ return -EBADF;
+
+ ret = vfs_fadvise(f.file, offset, len, advice);
+
fdput(f);
return ret;
}
@@ -203,3 +217,4 @@ SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
}
#endif
+#endif
diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
index 6a473709e9b6..7405c9d89d65 100644
--- a/mm/gup_benchmark.c
+++ b/mm/gup_benchmark.c
@@ -19,7 +19,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
struct gup_benchmark *gup)
{
ktime_t start_time, end_time;
- unsigned long i, nr, nr_pages, addr, next;
+ unsigned long i, nr_pages, addr, next;
+ int nr;
struct page **pages;
nr_pages = gup->size / PAGE_SIZE;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 533f9b00147d..deed97fba979 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1780,7 +1780,7 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd)
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
+ pmd_t *old_pmd, pmd_t *new_pmd)
{
spinlock_t *old_ptl, *new_ptl;
pmd_t pmd;
@@ -1811,7 +1811,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
- if (pmd_present(pmd) && pmd_dirty(pmd))
+ if (pmd_present(pmd))
force_flush = true;
VM_BUG_ON(!pmd_none(*new_pmd));
@@ -1822,12 +1822,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
}
pmd = move_soft_dirty_pmd(pmd);
set_pmd_at(mm, new_addr, new_pmd, pmd);
- if (new_ptl != old_ptl)
- spin_unlock(new_ptl);
if (force_flush)
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
- else
- *need_flush = true;
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
spin_unlock(old_ptl);
return true;
}
@@ -2885,9 +2883,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
if (!(pvmw->pmd && !pvmw->pte))
return;
- mmu_notifier_invalidate_range_start(mm, address,
- address + HPAGE_PMD_SIZE);
-
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
pmdval = *pvmw->pmd;
pmdp_invalidate(vma, address, pvmw->pmd);
@@ -2900,9 +2895,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
set_pmd_at(mm, address, pvmw->pmd, pmdswp);
page_remove_rmap(page, true);
put_page(page);
-
- mmu_notifier_invalidate_range_end(mm, address,
- address + HPAGE_PMD_SIZE);
}
void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
@@ -2931,7 +2923,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
else
page_add_file_rmap(new, true);
set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
- if (vma->vm_flags & VM_LOCKED)
+ if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
mlock_vma_page(new);
update_mmu_cache_pmd(vma, address, pvmw->pmd);
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3c21775f196b..5c390f5a5207 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3326,8 +3326,8 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
struct page *page;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
- const unsigned long mmun_start = start; /* For mmu_notifiers */
- const unsigned long mmun_end = end; /* For mmu_notifiers */
+ unsigned long mmun_start = start; /* For mmu_notifiers */
+ unsigned long mmun_end = end; /* For mmu_notifiers */
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
@@ -3339,6 +3339,11 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
*/
tlb_remove_check_page_size_change(tlb, sz);
tlb_start_vma(tlb, vma);
+
+ /*
+ * If sharing possible, alert mmu notifiers of worst case.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
address = start;
for (; address < end; address += sz) {
@@ -3349,6 +3354,10 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
ptl = huge_pte_lock(h, mm, ptep);
if (huge_pmd_unshare(mm, &address, ptep)) {
spin_unlock(ptl);
+ /*
+ * We just unmapped a page of PMDs by clearing a PUD.
+ * The caller's TLB flush range should cover this area.
+ */
continue;
}
@@ -3431,12 +3440,23 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
{
struct mm_struct *mm;
struct mmu_gather tlb;
+ unsigned long tlb_start = start;
+ unsigned long tlb_end = end;
+
+ /*
+ * If shared PMDs were possibly used within this vma range, adjust
+ * start/end for worst case tlb flushing.
+ * Note that we can not be sure if PMDs are shared until we try to
+ * unmap pages. However, we want to make sure TLB flushing covers
+ * the largest possible range.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
mm = vma->vm_mm;
- tlb_gather_mmu(&tlb, mm, start, end);
+ tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
- tlb_finish_mmu(&tlb, start, end);
+ tlb_finish_mmu(&tlb, tlb_start, tlb_end);
}
/*
@@ -4298,11 +4318,21 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
pte_t pte;
struct hstate *h = hstate_vma(vma);
unsigned long pages = 0;
+ unsigned long f_start = start;
+ unsigned long f_end = end;
+ bool shared_pmd = false;
+
+ /*
+ * In the case of shared PMDs, the area to flush could be beyond
+ * start/end. Set f_start/f_end to cover the maximum possible
+ * range if PMD sharing is possible.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
BUG_ON(address >= end);
- flush_cache_range(vma, address, end);
+ flush_cache_range(vma, f_start, f_end);
- mmu_notifier_invalidate_range_start(mm, start, end);
+ mmu_notifier_invalidate_range_start(mm, f_start, f_end);
i_mmap_lock_write(vma->vm_file->f_mapping);
for (; address < end; address += huge_page_size(h)) {
spinlock_t *ptl;
@@ -4313,6 +4343,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
if (huge_pmd_unshare(mm, &address, ptep)) {
pages++;
spin_unlock(ptl);
+ shared_pmd = true;
continue;
}
pte = huge_ptep_get(ptep);
@@ -4348,9 +4379,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
* may have cleared our pud entry and done put_page on the page table:
* once we release i_mmap_rwsem, another task can do the final put_page
- * and that page table be reused and filled with junk.
+ * and that page table be reused and filled with junk. If we actually
+ * did unshare a page of pmds, flush the range corresponding to the pud.
*/
- flush_hugetlb_tlb_range(vma, start, end);
+ if (shared_pmd)
+ flush_hugetlb_tlb_range(vma, f_start, f_end);
+ else
+ flush_hugetlb_tlb_range(vma, start, end);
/*
* No need to call mmu_notifier_invalidate_range() we are downgrading
* page table protection not changing it to point to a new page.
@@ -4358,7 +4393,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* See Documentation/vm/mmu_notifier.rst
*/
i_mmap_unlock_write(vma->vm_file->f_mapping);
- mmu_notifier_invalidate_range_end(mm, start, end);
+ mmu_notifier_invalidate_range_end(mm, f_start, f_end);
return pages << h->order;
}
@@ -4545,13 +4580,41 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
/*
* check on proper vm_flags and page table alignment
*/
- if (vma->vm_flags & VM_MAYSHARE &&
- vma->vm_start <= base && end <= vma->vm_end)
+ if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
return true;
return false;
}
/*
+ * Determine if start,end range within vma could be mapped by shared pmd.
+ * If yes, adjust start and end to cover range associated with possible
+ * shared pmd mappings.
+ */
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ unsigned long check_addr = *start;
+
+ if (!(vma->vm_flags & VM_MAYSHARE))
+ return;
+
+ for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
+ unsigned long a_start = check_addr & PUD_MASK;
+ unsigned long a_end = a_start + PUD_SIZE;
+
+ /*
+ * If sharing is possible, adjust start/end if necessary.
+ */
+ if (range_in_vma(vma, a_start, a_end)) {
+ if (a_start < *start)
+ *start = a_start;
+ if (a_end > *end)
+ *end = a_end;
+ }
+ }
+}
+
+/*
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
@@ -4648,6 +4711,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
+
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
#define want_pmd_share() (0)
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
diff --git a/mm/madvise.c b/mm/madvise.c
index 972a9eaa898b..71d21df2a3f3 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -96,7 +96,7 @@ static long madvise_behavior(struct vm_area_struct *vma,
new_flags |= VM_DONTDUMP;
break;
case MADV_DODUMP:
- if (new_flags & VM_SPECIAL) {
+ if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
error = -EINVAL;
goto out;
}
diff --git a/mm/memory.c b/mm/memory.c
index c467102a5cbc..21a5e6e4758b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -186,253 +186,6 @@ static void check_sync_rss_stat(struct task_struct *task)
#endif /* SPLIT_RSS_COUNTING */
-#ifdef HAVE_GENERIC_MMU_GATHER
-
-static bool tlb_next_batch(struct mmu_gather *tlb)
-{
- struct mmu_gather_batch *batch;
-
- batch = tlb->active;
- if (batch->next) {
- tlb->active = batch->next;
- return true;
- }
-
- if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
- return false;
-
- batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
- if (!batch)
- return false;
-
- tlb->batch_count++;
- batch->next = NULL;
- batch->nr = 0;
- batch->max = MAX_GATHER_BATCH;
-
- tlb->active->next = batch;
- tlb->active = batch;
-
- return true;
-}
-
-void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- tlb->mm = mm;
-
- /* Is it from 0 to ~0? */
- tlb->fullmm = !(start | (end+1));
- tlb->need_flush_all = 0;
- tlb->local.next = NULL;
- tlb->local.nr = 0;
- tlb->local.max = ARRAY_SIZE(tlb->__pages);
- tlb->active = &tlb->local;
- tlb->batch_count = 0;
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb->batch = NULL;
-#endif
- tlb->page_size = 0;
-
- __tlb_reset_range(tlb);
-}
-
-static void tlb_flush_mmu_free(struct mmu_gather *tlb)
-{
- struct mmu_gather_batch *batch;
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
- tlb_table_flush(tlb);
-#endif
- for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
- free_pages_and_swap_cache(batch->pages, batch->nr);
- batch->nr = 0;
- }
- tlb->active = &tlb->local;
-}
-
-void tlb_flush_mmu(struct mmu_gather *tlb)
-{
- tlb_flush_mmu_tlbonly(tlb);
- tlb_flush_mmu_free(tlb);
-}
-
-/* tlb_finish_mmu
- * Called at the end of the shootdown operation to free up any resources
- * that were required.
- */
-void arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force)
-{
- struct mmu_gather_batch *batch, *next;
-
- if (force)
- __tlb_adjust_range(tlb, start, end - start);
-
- tlb_flush_mmu(tlb);
-
- /* keep the page table cache within bounds */
- check_pgt_cache();
-
- for (batch = tlb->local.next; batch; batch = next) {
- next = batch->next;
- free_pages((unsigned long)batch, 0);
- }
- tlb->local.next = NULL;
-}
-
-/* __tlb_remove_page
- * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
- * handling the additional races in SMP caused by other CPUs caching valid
- * mappings in their TLBs. Returns the number of free page slots left.
- * When out of page slots we must call tlb_flush_mmu().
- *returns true if the caller should flush.
- */
-bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
-{
- struct mmu_gather_batch *batch;
-
- VM_BUG_ON(!tlb->end);
- VM_WARN_ON(tlb->page_size != page_size);
-
- batch = tlb->active;
- /*
- * Add the page and check if we are full. If so
- * force a flush.
- */
- batch->pages[batch->nr++] = page;
- if (batch->nr == batch->max) {
- if (!tlb_next_batch(tlb))
- return true;
- batch = tlb->active;
- }
- VM_BUG_ON_PAGE(batch->nr > batch->max, page);
-
- return false;
-}
-
-#endif /* HAVE_GENERIC_MMU_GATHER */
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-
-/*
- * See the comment near struct mmu_table_batch.
- */
-
-/*
- * If we want tlb_remove_table() to imply TLB invalidates.
- */
-static inline void tlb_table_invalidate(struct mmu_gather *tlb)
-{
-#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
- /*
- * Invalidate page-table caches used by hardware walkers. Then we still
- * need to RCU-sched wait while freeing the pages because software
- * walkers can still be in-flight.
- */
- tlb_flush_mmu_tlbonly(tlb);
-#endif
-}
-
-static void tlb_remove_table_smp_sync(void *arg)
-{
- /* Simply deliver the interrupt */
-}
-
-static void tlb_remove_table_one(void *table)
-{
- /*
- * This isn't an RCU grace period and hence the page-tables cannot be
- * assumed to be actually RCU-freed.
- *
- * It is however sufficient for software page-table walkers that rely on
- * IRQ disabling. See the comment near struct mmu_table_batch.
- */
- smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
- __tlb_remove_table(table);
-}
-
-static void tlb_remove_table_rcu(struct rcu_head *head)
-{
- struct mmu_table_batch *batch;
- int i;
-
- batch = container_of(head, struct mmu_table_batch, rcu);
-
- for (i = 0; i < batch->nr; i++)
- __tlb_remove_table(batch->tables[i]);
-
- free_page((unsigned long)batch);
-}
-
-void tlb_table_flush(struct mmu_gather *tlb)
-{
- struct mmu_table_batch **batch = &tlb->batch;
-
- if (*batch) {
- tlb_table_invalidate(tlb);
- call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
- *batch = NULL;
- }
-}
-
-void tlb_remove_table(struct mmu_gather *tlb, void *table)
-{
- struct mmu_table_batch **batch = &tlb->batch;
-
- if (*batch == NULL) {
- *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
- if (*batch == NULL) {
- tlb_table_invalidate(tlb);
- tlb_remove_table_one(table);
- return;
- }
- (*batch)->nr = 0;
- }
-
- (*batch)->tables[(*batch)->nr++] = table;
- if ((*batch)->nr == MAX_TABLE_BATCH)
- tlb_table_flush(tlb);
-}
-
-#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
-
-/**
- * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
- * @tlb: the mmu_gather structure to initialize
- * @mm: the mm_struct of the target address space
- * @start: start of the region that will be removed from the page-table
- * @end: end of the region that will be removed from the page-table
- *
- * Called to initialize an (on-stack) mmu_gather structure for page-table
- * tear-down from @mm. The @start and @end are set to 0 and -1
- * respectively when @mm is without users and we're going to destroy
- * the full address space (exit/execve).
- */
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- arch_tlb_gather_mmu(tlb, mm, start, end);
- inc_tlb_flush_pending(tlb->mm);
-}
-
-void tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end)
-{
- /*
- * If there are parallel threads are doing PTE changes on same range
- * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
- * flush by batching, a thread has stable TLB entry can fail to flush
- * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
- * forcefully if we detect parallel PTE batching threads.
- */
- bool force = mm_tlb_flush_nested(tlb->mm);
-
- arch_tlb_finish_mmu(tlb, start, end, force);
- dec_tlb_flush_pending(tlb->mm);
-}
-
/*
* Note: this doesn't free the actual pages themselves. That
* has been handled earlier when unmapping all the memory regions.
diff --git a/mm/migrate.c b/mm/migrate.c
index d6a2e89b086a..84381b55b2bd 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -275,6 +275,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
mlock_vma_page(new);
+ if (PageTransHuge(page) && PageMlocked(page))
+ clear_page_mlock(page);
+
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, pvmw.address, pvmw.pte);
}
@@ -1411,7 +1414,7 @@ retry:
* we encounter them after the rest of the list
* is processed.
*/
- if (PageTransHuge(page)) {
+ if (PageTransHuge(page) && !PageHuge(page)) {
lock_page(page);
rc = split_huge_page_to_list(page, from);
unlock_page(page);
@@ -1855,46 +1858,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
return newpage;
}
-/*
- * page migration rate limiting control.
- * Do not migrate more than @pages_to_migrate in a @migrate_interval_millisecs
- * window of time. Default here says do not migrate more than 1280M per second.
- */
-static unsigned int migrate_interval_millisecs __read_mostly = 100;
-static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
-
-/* Returns true if the node is migrate rate-limited after the update */
-static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
- unsigned long nr_pages)
-{
- /*
- * Rate-limit the amount of data that is being migrated to a node.
- * Optimal placement is no good if the memory bus is saturated and
- * all the time is being spent migrating!
- */
- if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
- spin_lock(&pgdat->numabalancing_migrate_lock);
- pgdat->numabalancing_migrate_nr_pages = 0;
- pgdat->numabalancing_migrate_next_window = jiffies +
- msecs_to_jiffies(migrate_interval_millisecs);
- spin_unlock(&pgdat->numabalancing_migrate_lock);
- }
- if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
- trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
- nr_pages);
- return true;
- }
-
- /*
- * This is an unlocked non-atomic update so errors are possible.
- * The consequences are failing to migrate when we potentiall should
- * have which is not severe enough to warrant locking. If it is ever
- * a problem, it can be converted to a per-cpu counter.
- */
- pgdat->numabalancing_migrate_nr_pages += nr_pages;
- return false;
-}
-
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
{
int page_lru;
@@ -1967,14 +1930,6 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
if (page_is_file_cache(page) && PageDirty(page))
goto out;
- /*
- * Rate-limit the amount of data that is being migrated to a node.
- * Optimal placement is no good if the memory bus is saturated and
- * all the time is being spent migrating!
- */
- if (numamigrate_update_ratelimit(pgdat, 1))
- goto out;
-
isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated)
goto out;
@@ -2021,14 +1976,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
unsigned long mmun_start = address & HPAGE_PMD_MASK;
unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
- /*
- * Rate-limit the amount of data that is being migrated to a node.
- * Optimal placement is no good if the memory bus is saturated and
- * all the time is being spent migrating!
- */
- if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
- goto out_dropref;
-
new_page = alloc_pages_node(node,
(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
HPAGE_PMD_ORDER);
@@ -2125,7 +2072,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
out_fail:
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
-out_dropref:
ptl = pmd_lock(mm, pmd);
if (pmd_same(*pmd, entry)) {
entry = pmd_modify(entry, vma->vm_page_prot);
diff --git a/mm/mmap.c b/mm/mmap.c
index 5f2b2b184c60..f7cd9cb966c0 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1410,7 +1410,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
if (flags & MAP_FIXED_NOREPLACE) {
struct vm_area_struct *vma = find_vma(mm, addr);
- if (vma && vma->vm_start <= addr)
+ if (vma && vma->vm_start < addr + len)
return -EEXIST;
}
diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
new file mode 100644
index 000000000000..2a9fbc4a37d5
--- /dev/null
+++ b/mm/mmu_gather.c
@@ -0,0 +1,261 @@
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mmdebug.h>
+#include <linux/mm_types.h>
+#include <linux/pagemap.h>
+#include <linux/rcupdate.h>
+#include <linux/smp.h>
+#include <linux/swap.h>
+
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+
+#ifdef HAVE_GENERIC_MMU_GATHER
+
+static bool tlb_next_batch(struct mmu_gather *tlb)
+{
+ struct mmu_gather_batch *batch;
+
+ batch = tlb->active;
+ if (batch->next) {
+ tlb->active = batch->next;
+ return true;
+ }
+
+ if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
+ return false;
+
+ batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
+ if (!batch)
+ return false;
+
+ tlb->batch_count++;
+ batch->next = NULL;
+ batch->nr = 0;
+ batch->max = MAX_GATHER_BATCH;
+
+ tlb->active->next = batch;
+ tlb->active = batch;
+
+ return true;
+}
+
+void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ tlb->mm = mm;
+
+ /* Is it from 0 to ~0? */
+ tlb->fullmm = !(start | (end+1));
+ tlb->need_flush_all = 0;
+ tlb->local.next = NULL;
+ tlb->local.nr = 0;
+ tlb->local.max = ARRAY_SIZE(tlb->__pages);
+ tlb->active = &tlb->local;
+ tlb->batch_count = 0;
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb->batch = NULL;
+#endif
+ tlb->page_size = 0;
+
+ __tlb_reset_range(tlb);
+}
+
+void tlb_flush_mmu_free(struct mmu_gather *tlb)
+{
+ struct mmu_gather_batch *batch;
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+ tlb_table_flush(tlb);
+#endif
+ for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
+ free_pages_and_swap_cache(batch->pages, batch->nr);
+ batch->nr = 0;
+ }
+ tlb->active = &tlb->local;
+}
+
+void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+ tlb_flush_mmu_tlbonly(tlb);
+ tlb_flush_mmu_free(tlb);
+}
+
+/* tlb_finish_mmu
+ * Called at the end of the shootdown operation to free up any resources
+ * that were required.
+ */
+void arch_tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end, bool force)
+{
+ struct mmu_gather_batch *batch, *next;
+
+ if (force) {
+ __tlb_reset_range(tlb);
+ __tlb_adjust_range(tlb, start, end - start);
+ }
+
+ tlb_flush_mmu(tlb);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+
+ for (batch = tlb->local.next; batch; batch = next) {
+ next = batch->next;
+ free_pages((unsigned long)batch, 0);
+ }
+ tlb->local.next = NULL;
+}
+
+/* __tlb_remove_page
+ * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
+ * handling the additional races in SMP caused by other CPUs caching valid
+ * mappings in their TLBs. Returns the number of free page slots left.
+ * When out of page slots we must call tlb_flush_mmu().
+ *returns true if the caller should flush.
+ */
+bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size)
+{
+ struct mmu_gather_batch *batch;
+
+ VM_BUG_ON(!tlb->end);
+ VM_WARN_ON(tlb->page_size != page_size);
+
+ batch = tlb->active;
+ /*
+ * Add the page and check if we are full. If so
+ * force a flush.
+ */
+ batch->pages[batch->nr++] = page;
+ if (batch->nr == batch->max) {
+ if (!tlb_next_batch(tlb))
+ return true;
+ batch = tlb->active;
+ }
+ VM_BUG_ON_PAGE(batch->nr > batch->max, page);
+
+ return false;
+}
+
+#endif /* HAVE_GENERIC_MMU_GATHER */
+
+#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+
+/*
+ * See the comment near struct mmu_table_batch.
+ */
+
+/*
+ * If we want tlb_remove_table() to imply TLB invalidates.
+ */
+static inline void tlb_table_invalidate(struct mmu_gather *tlb)
+{
+#ifdef CONFIG_HAVE_RCU_TABLE_INVALIDATE
+ /*
+ * Invalidate page-table caches used by hardware walkers. Then we still
+ * need to RCU-sched wait while freeing the pages because software
+ * walkers can still be in-flight.
+ */
+ tlb_flush_mmu_tlbonly(tlb);
+#endif
+}
+
+static void tlb_remove_table_smp_sync(void *arg)
+{
+ /* Simply deliver the interrupt */
+}
+
+static void tlb_remove_table_one(void *table)
+{
+ /*
+ * This isn't an RCU grace period and hence the page-tables cannot be
+ * assumed to be actually RCU-freed.
+ *
+ * It is however sufficient for software page-table walkers that rely on
+ * IRQ disabling. See the comment near struct mmu_table_batch.
+ */
+ smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
+ __tlb_remove_table(table);
+}
+
+static void tlb_remove_table_rcu(struct rcu_head *head)
+{
+ struct mmu_table_batch *batch;
+ int i;
+
+ batch = container_of(head, struct mmu_table_batch, rcu);
+
+ for (i = 0; i < batch->nr; i++)
+ __tlb_remove_table(batch->tables[i]);
+
+ free_page((unsigned long)batch);
+}
+
+void tlb_table_flush(struct mmu_gather *tlb)
+{
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ if (*batch) {
+ tlb_table_invalidate(tlb);
+ call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
+ *batch = NULL;
+ }
+}
+
+void tlb_remove_table(struct mmu_gather *tlb, void *table)
+{
+ struct mmu_table_batch **batch = &tlb->batch;
+
+ if (*batch == NULL) {
+ *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
+ if (*batch == NULL) {
+ tlb_table_invalidate(tlb);
+ tlb_remove_table_one(table);
+ return;
+ }
+ (*batch)->nr = 0;
+ }
+
+ (*batch)->tables[(*batch)->nr++] = table;
+ if ((*batch)->nr == MAX_TABLE_BATCH)
+ tlb_table_flush(tlb);
+}
+
+#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
+
+/**
+ * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
+ * @tlb: the mmu_gather structure to initialize
+ * @mm: the mm_struct of the target address space
+ * @start: start of the region that will be removed from the page-table
+ * @end: end of the region that will be removed from the page-table
+ *
+ * Called to initialize an (on-stack) mmu_gather structure for page-table
+ * tear-down from @mm. The @start and @end are set to 0 and -1
+ * respectively when @mm is without users and we're going to destroy
+ * the full address space (exit/execve).
+ */
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ arch_tlb_gather_mmu(tlb, mm, start, end);
+ inc_tlb_flush_pending(tlb->mm);
+}
+
+void tlb_finish_mmu(struct mmu_gather *tlb,
+ unsigned long start, unsigned long end)
+{
+ /*
+ * If there are parallel threads are doing PTE changes on same range
+ * under non-exclusive lock(e.g., mmap_sem read-side) but defer TLB
+ * flush by batching, a thread has stable TLB entry can fail to flush
+ * the TLB by observing pte_none|!pte_dirty, for example so flush TLB
+ * forcefully if we detect parallel PTE batching threads.
+ */
+ bool force = mm_tlb_flush_nested(tlb->mm);
+
+ arch_tlb_finish_mmu(tlb, start, end, force);
+ dec_tlb_flush_pending(tlb->mm);
+}
diff --git a/mm/mremap.c b/mm/mremap.c
index 5c2e18505f75..a9617e72e6b7 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -115,7 +115,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
unsigned long old_addr, unsigned long old_end,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
- unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
+ unsigned long new_addr, bool need_rmap_locks)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
@@ -163,15 +163,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
pte = ptep_get_and_clear(mm, old_addr, old_pte);
/*
- * If we are remapping a dirty PTE, make sure
+ * If we are remapping a valid PTE, make sure
* to flush TLB before we drop the PTL for the
- * old PTE or we may race with page_mkclean().
+ * PTE.
*
- * This check has to be done after we removed the
- * old PTE from page tables or another thread may
- * dirty it after the check and before the removal.
+ * NOTE! Both old and new PTL matter: the old one
+ * for racing with page_mkclean(), the new one to
+ * make sure the physical page stays valid until
+ * the TLB entry for the old mapping has been
+ * flushed.
*/
- if (pte_present(pte) && pte_dirty(pte))
+ if (pte_present(pte))
force_flush = true;
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
@@ -179,13 +181,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
}
arch_leave_lazy_mmu_mode();
+ if (force_flush)
+ flush_tlb_range(vma, old_end - len, old_end);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap(new_pte - 1);
- if (force_flush)
- flush_tlb_range(vma, old_end - len, old_end);
- else
- *need_flush = true;
pte_unmap_unlock(old_pte - 1, old_ptl);
if (need_rmap_locks)
drop_rmap_locks(vma);
@@ -198,7 +198,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
{
unsigned long extent, next, old_end;
pmd_t *old_pmd, *new_pmd;
- bool need_flush = false;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
@@ -229,8 +228,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (need_rmap_locks)
take_rmap_locks(vma);
moved = move_huge_pmd(vma, old_addr, new_addr,
- old_end, old_pmd, new_pmd,
- &need_flush);
+ old_end, old_pmd, new_pmd);
if (need_rmap_locks)
drop_rmap_locks(vma);
if (moved)
@@ -246,10 +244,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (extent > next - new_addr)
extent = next - new_addr;
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
- new_pmd, new_addr, need_rmap_locks, &need_flush);
+ new_pmd, new_addr, need_rmap_locks);
}
- if (need_flush)
- flush_tlb_range(vma, old_end-len, old_addr);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 89d2a2ab3fe6..e2ef1c17942f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6193,17 +6193,6 @@ static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
}
-#ifdef CONFIG_NUMA_BALANCING
-static void pgdat_init_numabalancing(struct pglist_data *pgdat)
-{
- spin_lock_init(&pgdat->numabalancing_migrate_lock);
- pgdat->numabalancing_migrate_nr_pages = 0;
- pgdat->numabalancing_migrate_next_window = jiffies;
-}
-#else
-static void pgdat_init_numabalancing(struct pglist_data *pgdat) {}
-#endif
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void pgdat_init_split_queue(struct pglist_data *pgdat)
{
@@ -6228,7 +6217,6 @@ static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
{
pgdat_resize_init(pgdat);
- pgdat_init_numabalancing(pgdat);
pgdat_init_split_queue(pgdat);
pgdat_init_kcompactd(pgdat);
diff --git a/mm/page_io.c b/mm/page_io.c
index aafd19ec1db4..573d3663d846 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -339,7 +339,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
goto out;
}
bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
- bio_associate_blkcg_from_page(bio, page);
+ bio_associate_blkg_from_page(bio, page);
count_swpout_vm_event(page);
set_page_writeback(page);
unlock_page(page);
diff --git a/mm/percpu.c b/mm/percpu.c
index a749d4d96e3e..4b90682623e9 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1212,6 +1212,7 @@ static void pcpu_free_chunk(struct pcpu_chunk *chunk)
{
if (!chunk)
return;
+ pcpu_mem_free(chunk->md_blocks);
pcpu_mem_free(chunk->bound_map);
pcpu_mem_free(chunk->alloc_map);
pcpu_mem_free(chunk);
diff --git a/mm/readahead.c b/mm/readahead.c
index a59ea70527b9..4e630143a0ba 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -20,6 +20,7 @@
#include <linux/file.h>
#include <linux/mm_inline.h>
#include <linux/blk-cgroup.h>
+#include <linux/fadvise.h>
#include "internal.h"
@@ -575,24 +576,6 @@ page_cache_async_readahead(struct address_space *mapping,
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
-static ssize_t
-do_readahead(struct address_space *mapping, struct file *filp,
- pgoff_t index, unsigned long nr)
-{
- if (!mapping || !mapping->a_ops)
- return -EINVAL;
-
- /*
- * Readahead doesn't make sense for DAX inodes, but we don't want it
- * to report a failure either. Instead, we just return success and
- * don't do any work.
- */
- if (dax_mapping(mapping))
- return 0;
-
- return force_page_cache_readahead(mapping, filp, index, nr);
-}
-
ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
{
ssize_t ret;
@@ -600,16 +583,22 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
ret = -EBADF;
f = fdget(fd);
- if (f.file) {
- if (f.file->f_mode & FMODE_READ) {
- struct address_space *mapping = f.file->f_mapping;
- pgoff_t start = offset >> PAGE_SHIFT;
- pgoff_t end = (offset + count - 1) >> PAGE_SHIFT;
- unsigned long len = end - start + 1;
- ret = do_readahead(mapping, f.file, start, len);
- }
- fdput(f);
- }
+ if (!f.file || !(f.file->f_mode & FMODE_READ))
+ goto out;
+
+ /*
+ * The readahead() syscall is intended to run only on files
+ * that can execute readahead. If readahead is not possible
+ * on this file, then we must return -EINVAL.
+ */
+ ret = -EINVAL;
+ if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
+ !S_ISREG(file_inode(f.file)->i_mode))
+ goto out;
+
+ ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+out:
+ fdput(f);
return ret;
}
diff --git a/mm/rmap.c b/mm/rmap.c
index eb477809a5c0..1e79fac3186b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1362,11 +1362,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
/*
- * We have to assume the worse case ie pmd for invalidation. Note that
- * the page can not be free in this function as call of try_to_unmap()
- * must hold a reference on the page.
+ * For THP, we have to assume the worse case ie pmd for invalidation.
+ * For hugetlb, it could be much worse if we need to do pud
+ * invalidation in the case of pmd sharing.
+ *
+ * Note that the page can not be free in this function as call of
+ * try_to_unmap() must hold a reference on the page.
*/
end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
+ if (PageHuge(page)) {
+ /*
+ * If sharing is possible, start and end will be adjusted
+ * accordingly.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &start, &end);
+ }
mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
while (page_vma_mapped_walk(&pvmw)) {
@@ -1409,6 +1419,32 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
address = pvmw.address;
+ if (PageHuge(page)) {
+ if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
+ /*
+ * huge_pmd_unshare unmapped an entire PMD
+ * page. There is no way of knowing exactly
+ * which PMDs may be cached for this mm, so
+ * we must flush them all. start/end were
+ * already adjusted above to cover this range.
+ */
+ flush_cache_range(vma, start, end);
+ flush_tlb_range(vma, start, end);
+ mmu_notifier_invalidate_range(mm, start, end);
+
+ /*
+ * The ref count of the PMD page was dropped
+ * which is part of the way map counting
+ * is done for shared PMDs. Return 'true'
+ * here. When there is no other sharing,
+ * huge_pmd_unshare returns false and we will
+ * unmap the actual page and drop map count
+ * to zero.
+ */
+ page_vma_mapped_walk_done(&pvmw);
+ break;
+ }
+ }
if (IS_ENABLED(CONFIG_MIGRATION) &&
(flags & TTU_MIGRATION) &&
diff --git a/mm/shmem.c b/mm/shmem.c
index 0376c124b043..446942677cd4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2227,6 +2227,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
mpol_shared_policy_init(&info->policy, NULL);
break;
}
+
+ lockdep_annotate_inode_mutex_key(inode);
} else
shmem_free_inode(sb);
return inode;
diff --git a/mm/vmacache.c b/mm/vmacache.c
index ea517bef7dc5..cdc32a3b02fa 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -20,44 +20,6 @@
#define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
/*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
- struct task_struct *g, *p;
-
- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
- /*
- * Single threaded tasks need not iterate the entire
- * list of process. We can avoid the flushing as well
- * since the mm's seqnum was increased and don't have
- * to worry about other threads' seqnum. Current's
- * flush will occur upon the next lookup.
- */
- if (atomic_read(&mm->mm_users) == 1)
- return;
-
- rcu_read_lock();
- for_each_process_thread(g, p) {
- /*
- * Only flush the vmacache pointers as the
- * mm seqnum is already set and curr's will
- * be set upon invalidation when the next
- * lookup is done.
- */
- if (mm == p->mm)
- vmacache_flush(p);
- }
- rcu_read_unlock();
-}
-
-/*
* This task may be accessing a foreign mm via (for example)
* get_user_pages()->find_vma(). The vmacache is task-local and this
* task's vmacache pertains to a different mm (ie, its own). There is
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7e7d25504651..c5ef7240cbcb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
delta = freeable >> priority;
delta *= 4;
do_div(delta, shrinker->seeks);
+
+ /*
+ * Make sure we apply some minimal pressure on default priority
+ * even on small cgroups. Stale objects are not only consuming memory
+ * by themselves, but can also hold a reference to a dying cgroup,
+ * preventing it from being reclaimed. A dying cgroup with all
+ * corresponding structures like per-cpu stats and kmem caches
+ * can be really big, so it may lead to a significant waste of memory.
+ */
+ delta = max_t(unsigned long long, delta, min(freeable, batch_size));
+
total_scan += delta;
if (total_scan < 0) {
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
@@ -569,8 +580,8 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg, int priority)
{
struct memcg_shrinker_map *map;
- unsigned long freed = 0;
- int ret, i;
+ unsigned long ret, freed = 0;
+ int i;
if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))
return 0;
@@ -666,9 +677,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
struct mem_cgroup *memcg,
int priority)
{
+ unsigned long ret, freed = 0;
struct shrinker *shrinker;
- unsigned long freed = 0;
- int ret;
if (!mem_cgroup_is_root(memcg))
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8ba0870ecddd..7878da76abf2 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1275,6 +1275,9 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_SMP
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
+#else
+ "", /* nr_tlb_remote_flush */
+ "", /* nr_tlb_remote_flush_received */
#endif /* CONFIG_SMP */
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
@@ -1283,7 +1286,6 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_DEBUG_VM_VMACACHE
"vmacache_find_calls",
"vmacache_find_hits",
- "vmacache_full_flushes",
#endif
#ifdef CONFIG_SWAP
"swap_ra",
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index 71c20c1d4002..9f481cfdf77d 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -241,7 +241,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
* the packet to be exactly of that size to make the link
* throughput estimation effective.
*/
- skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
+ skb_put_zero(skb, probe_len - hard_iface->bat_v.elp_skb->len);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Sending unicast (probe) ELP packet on interface %s to %pM\n",
@@ -268,6 +268,7 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
struct batadv_priv *bat_priv;
struct sk_buff *skb;
u32 elp_interval;
+ bool ret;
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
@@ -329,8 +330,11 @@ static void batadv_v_elp_periodic_work(struct work_struct *work)
* may sleep and that is not allowed in an rcu protected
* context. Therefore schedule a task for that.
*/
- queue_work(batadv_event_workqueue,
- &hardif_neigh->bat_v.metric_work);
+ ret = queue_work(batadv_event_workqueue,
+ &hardif_neigh->bat_v.metric_work);
+
+ if (!ret)
+ batadv_hardif_neigh_put(hardif_neigh);
}
rcu_read_unlock();
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ff9659af6b91..5f1aeeded0e3 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1772,6 +1772,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
{
struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
+ bool ret;
ethhdr = eth_hdr(skb);
@@ -1795,8 +1796,13 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
if (unlikely(!backbone_gw))
return true;
- queue_work(batadv_event_workqueue, &backbone_gw->report_work);
- /* backbone_gw is unreferenced in the report work function function */
+ ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
+
+ /* backbone_gw is unreferenced in the report work function function
+ * if queue_work() call was successful
+ */
+ if (!ret)
+ batadv_backbone_gw_put(backbone_gw);
return true;
}
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 8b198ee798c9..140c61a3f1ec 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -32,6 +32,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/rculist.h>
@@ -348,6 +349,9 @@ out:
* @bat_priv: the bat priv with all the soft interface information
* @orig_node: originator announcing gateway capabilities
* @gateway: announced bandwidth information
+ *
+ * Has to be called with the appropriate locks being acquired
+ * (gw.list_lock).
*/
static void batadv_gw_node_add(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
@@ -355,6 +359,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node;
+ lockdep_assert_held(&bat_priv->gw.list_lock);
+
if (gateway->bandwidth_down == 0)
return;
@@ -369,10 +375,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
- spin_lock_bh(&bat_priv->gw.list_lock);
kref_get(&gw_node->refcount);
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.gateway_list);
- spin_unlock_bh(&bat_priv->gw.list_lock);
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
@@ -428,11 +432,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
{
struct batadv_gw_node *gw_node, *curr_gw = NULL;
+ spin_lock_bh(&bat_priv->gw.list_lock);
gw_node = batadv_gw_node_get(bat_priv, orig_node);
if (!gw_node) {
batadv_gw_node_add(bat_priv, orig_node, gateway);
+ spin_unlock_bh(&bat_priv->gw.list_lock);
goto out;
}
+ spin_unlock_bh(&bat_priv->gw.list_lock);
if (gw_node->bandwidth_down == ntohl(gateway->bandwidth_down) &&
gw_node->bandwidth_up == ntohl(gateway->bandwidth_up))
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 8da3c9336111..3ccc75ee719c 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -25,7 +25,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2018.2"
+#define BATADV_SOURCE_VERSION "2018.3"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index c3578444f3cb..34caf129a9bf 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -854,16 +854,27 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
struct list_head *list;
+ /* Select ingoing or outgoing coding node */
+ if (in_coding) {
+ lock = &orig_neigh_node->in_coding_list_lock;
+ list = &orig_neigh_node->in_coding_list;
+ } else {
+ lock = &orig_neigh_node->out_coding_list_lock;
+ list = &orig_neigh_node->out_coding_list;
+ }
+
+ spin_lock_bh(lock);
+
/* Check if nc_node is already added */
nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
/* Node found */
if (nc_node)
- return nc_node;
+ goto unlock;
nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
if (!nc_node)
- return NULL;
+ goto unlock;
/* Initialize nc_node */
INIT_LIST_HEAD(&nc_node->list);
@@ -872,22 +883,14 @@ batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
kref_get(&orig_neigh_node->refcount);
nc_node->orig_node = orig_neigh_node;
- /* Select ingoing or outgoing coding node */
- if (in_coding) {
- lock = &orig_neigh_node->in_coding_list_lock;
- list = &orig_neigh_node->in_coding_list;
- } else {
- lock = &orig_neigh_node->out_coding_list_lock;
- list = &orig_neigh_node->out_coding_list;
- }
-
batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
nc_node->addr, nc_node->orig_node->orig);
/* Add nc_node to orig_node */
- spin_lock_bh(lock);
kref_get(&nc_node->refcount);
list_add_tail_rcu(&nc_node->list, list);
+
+unlock:
spin_unlock_bh(lock);
return nc_node;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 1485263a348b..626ddca332db 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -574,15 +574,20 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
struct batadv_softif_vlan *vlan;
int err;
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (vlan) {
batadv_softif_vlan_put(vlan);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -EEXIST;
}
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
- if (!vlan)
+ if (!vlan) {
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -ENOMEM;
+ }
vlan->bat_priv = bat_priv;
vlan->vid = vid;
@@ -590,17 +595,23 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
atomic_set(&vlan->ap_isolation, 0);
+ kref_get(&vlan->refcount);
+ hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
+ /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
+ * sleeping behavior of the sysfs functions and the fs_reclaim lock
+ */
err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
if (err) {
- kfree(vlan);
+ /* ref for the function */
+ batadv_softif_vlan_put(vlan);
+
+ /* ref for the list */
+ batadv_softif_vlan_put(vlan);
return err;
}
- spin_lock_bh(&bat_priv->softif_vlan_list_lock);
- kref_get(&vlan->refcount);
- hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
- spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag
*/
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index f2eef43bd2ec..09427fc6494a 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -188,7 +188,8 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
return __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
- &bat_priv->_var, net_dev); \
+ &bat_priv->_var, net_dev, \
+ NULL); \
}
#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
@@ -262,7 +263,9 @@ ssize_t batadv_store_##_name(struct kobject *kobj, \
\
length = __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
- &hard_iface->_var, net_dev); \
+ &hard_iface->_var, \
+ hard_iface->soft_iface, \
+ net_dev); \
\
batadv_hardif_put(hard_iface); \
return length; \
@@ -356,10 +359,12 @@ __batadv_store_bool_attr(char *buff, size_t count,
static int batadv_store_uint_attr(const char *buff, size_t count,
struct net_device *net_dev,
+ struct net_device *slave_dev,
const char *attr_name,
unsigned int min, unsigned int max,
atomic_t *attr)
{
+ char ifname[IFNAMSIZ + 3] = "";
unsigned long uint_val;
int ret;
@@ -385,8 +390,11 @@ static int batadv_store_uint_attr(const char *buff, size_t count,
if (atomic_read(attr) == uint_val)
return count;
- batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
- attr_name, atomic_read(attr), uint_val);
+ if (slave_dev)
+ snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
+
+ batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
+ attr_name, ifname, atomic_read(attr), uint_val);
atomic_set(attr, uint_val);
return count;
@@ -397,12 +405,13 @@ static ssize_t __batadv_store_uint_attr(const char *buff, size_t count,
void (*post_func)(struct net_device *),
const struct attribute *attr,
atomic_t *attr_store,
- struct net_device *net_dev)
+ struct net_device *net_dev,
+ struct net_device *slave_dev)
{
int ret;
- ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
- attr_store);
+ ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
+ attr->name, min, max, attr_store);
if (post_func && ret)
post_func(net_dev);
@@ -571,7 +580,7 @@ static ssize_t batadv_store_gw_sel_class(struct kobject *kobj,
return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
batadv_post_gw_reselect, attr,
&bat_priv->gw.sel_class,
- bat_priv->soft_iface);
+ bat_priv->soft_iface, NULL);
}
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
@@ -1090,8 +1099,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
if (old_tp_override == tp_override)
goto out;
- batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
- "throughput_override",
+ batadv_info(hard_iface->soft_iface,
+ "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
+ "throughput_override", net_dev->name,
old_tp_override / 10, old_tp_override % 10,
tp_override / 10, tp_override % 10);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 12a2b7d21376..d21624c44665 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1613,6 +1613,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
{
struct batadv_tt_orig_list_entry *orig_entry;
+ spin_lock_bh(&tt_global->list_lock);
+
orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
if (orig_entry) {
/* refresh the ttvn: the current value could be a bogus one that
@@ -1635,11 +1637,9 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
orig_entry->flags = flags;
kref_init(&orig_entry->refcount);
- spin_lock_bh(&tt_global->list_lock);
kref_get(&orig_entry->refcount);
hlist_add_head_rcu(&orig_entry->list,
&tt_global->orig_list);
- spin_unlock_bh(&tt_global->list_lock);
atomic_inc(&tt_global->orig_list_count);
sync_flags:
@@ -1647,6 +1647,8 @@ sync_flags:
out:
if (orig_entry)
batadv_tt_orig_list_entry_put(orig_entry);
+
+ spin_unlock_bh(&tt_global->list_lock);
}
/**
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index a637458205d1..40e69c9346d2 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -529,15 +529,20 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
{
struct batadv_tvlv_handler *tvlv_handler;
+ spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+
tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
if (tvlv_handler) {
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
batadv_tvlv_handler_put(tvlv_handler);
return;
}
tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
- if (!tvlv_handler)
+ if (!tvlv_handler) {
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
return;
+ }
tvlv_handler->ogm_handler = optr;
tvlv_handler->unicast_handler = uptr;
@@ -547,7 +552,6 @@ void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
kref_init(&tvlv_handler->refcount);
INIT_HLIST_NODE(&tvlv_handler->list);
- spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
kref_get(&tvlv_handler->refcount);
hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3bdc8f3ca259..ccce954f8146 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2434,9 +2434,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
/* LE address type */
addr_type = le_addr_type(cp->addr.type);
- hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
-
- err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+ /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
+ err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
if (err < 0) {
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
MGMT_STATUS_NOT_PAIRED, &rp,
@@ -2450,8 +2449,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto done;
}
- /* Abort any ongoing SMP pairing */
- smp_cancel_pairing(conn);
/* Defer clearing up the connection parameters until closing to
* give a chance of keeping them if a repairing happens.
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index ae91e2d40056..73f7211d0431 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -83,6 +83,7 @@ enum {
struct smp_dev {
/* Secure Connections OOB data */
+ bool local_oob;
u8 local_pk[64];
u8 local_rand[16];
bool debug_key;
@@ -599,6 +600,8 @@ int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16])
memcpy(rand, smp->local_rand, 16);
+ smp->local_oob = true;
+
return 0;
}
@@ -1785,7 +1788,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
* successfully received our local OOB data - therefore set the
* flag to indicate that local OOB is in use.
*/
- if (req->oob_flag == SMP_OOB_PRESENT)
+ if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
/* SMP over BR/EDR requires special treatment */
@@ -1967,7 +1970,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
* successfully received our local OOB data - therefore set the
* flag to indicate that local OOB is in use.
*/
- if (rsp->oob_flag == SMP_OOB_PRESENT)
+ if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob)
set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags);
smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -2419,30 +2422,51 @@ unlock:
return ret;
}
-void smp_cancel_pairing(struct hci_conn *hcon)
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type)
{
- struct l2cap_conn *conn = hcon->l2cap_data;
+ struct hci_conn *hcon;
+ struct l2cap_conn *conn;
struct l2cap_chan *chan;
struct smp_chan *smp;
+ int err;
+
+ err = hci_remove_ltk(hdev, bdaddr, addr_type);
+ hci_remove_irk(hdev, bdaddr, addr_type);
+
+ hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
+ if (!hcon)
+ goto done;
+ conn = hcon->l2cap_data;
if (!conn)
- return;
+ goto done;
chan = conn->smp;
if (!chan)
- return;
+ goto done;
l2cap_chan_lock(chan);
smp = chan->data;
if (smp) {
+ /* Set keys to NULL to make sure smp_failure() does not try to
+ * remove and free already invalidated rcu list entries. */
+ smp->ltk = NULL;
+ smp->slave_ltk = NULL;
+ smp->remote_irk = NULL;
+
if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
smp_failure(conn, 0);
else
smp_failure(conn, SMP_UNSPECIFIED);
+ err = 0;
}
l2cap_chan_unlock(chan);
+
+done:
+ return err;
}
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -2697,7 +2721,13 @@ static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb)
* key was set/generated.
*/
if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) {
- struct smp_dev *smp_dev = chan->data;
+ struct l2cap_chan *hchan = hdev->smp_data;
+ struct smp_dev *smp_dev;
+
+ if (!hchan || !hchan->data)
+ return SMP_UNSPECIFIED;
+
+ smp_dev = hchan->data;
tfm_ecdh = smp_dev->tfm_ecdh;
} else {
@@ -3230,6 +3260,7 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
return ERR_CAST(tfm_ecdh);
}
+ smp->local_oob = false;
smp->tfm_aes = tfm_aes;
smp->tfm_cmac = tfm_cmac;
smp->tfm_ecdh = tfm_ecdh;
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index 0ff6247eaa6c..121edadd5f8d 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -181,7 +181,8 @@ enum smp_key_pref {
};
/* SMP Commands */
-void smp_cancel_pairing(struct hci_conn *hcon);
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type);
bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
enum smp_key_pref key_pref);
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index f0fc182d3db7..94e88f510c5b 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -23,9 +23,11 @@ static void shutdown_umh(struct umh_info *info)
if (!info->pid)
return;
- tsk = pid_task(find_vpid(info->pid), PIDTYPE_PID);
- if (tsk)
+ tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID);
+ if (tsk) {
force_sig(SIGKILL, tsk);
+ put_task_struct(tsk);
+ }
fput(info->pipe_to_umh);
fput(info->pipe_from_umh);
info->pid = 0;
@@ -59,7 +61,7 @@ static int __bpfilter_process_sockopt(struct sock *sk, int optname,
req.is_set = is_set;
req.pid = current->pid;
req.cmd = optname;
- req.addr = (long)optval;
+ req.addr = (long __force __user)optval;
req.len = optlen;
mutex_lock(&bpfilter_lock);
if (!info.pid)
@@ -98,7 +100,7 @@ static int __init load_umh(void)
pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
/* health check that usermode process started correctly */
- if (__bpfilter_process_sockopt(NULL, 0, 0, 0, 0) != 0) {
+ if (__bpfilter_process_sockopt(NULL, 0, NULL, 0, 0) != 0) {
stop_umh();
return -EFAULT;
}
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 6e0dc6bcd32a..37278dc280eb 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -835,7 +835,8 @@ static unsigned int ip_sabotage_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
- if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
+ if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
+ !netif_is_l3_master(skb->dev)) {
state->okfn(state->net, state->sk, skb);
return NF_STOLEN;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 82114e1111e6..93243479085f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1752,6 +1752,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
}
EXPORT_SYMBOL(call_netdevice_notifiers);
+/**
+ * call_netdevice_notifiers_mtu - call all network notifier blocks
+ * @val: value passed unmodified to notifier function
+ * @dev: net_device pointer passed unmodified to notifier function
+ * @arg: additional u32 argument passed to the notifier function
+ *
+ * Call all network notifier blocks. Parameters and return value
+ * are as for raw_notifier_call_chain().
+ */
+static int call_netdevice_notifiers_mtu(unsigned long val,
+ struct net_device *dev, u32 arg)
+{
+ struct netdev_notifier_info_ext info = {
+ .info.dev = dev,
+ .ext.mtu = arg,
+ };
+
+ BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
+
+ return call_netdevice_notifiers_info(val, &info.info);
+}
+
#ifdef CONFIG_NET_INGRESS
static DEFINE_STATIC_KEY_FALSE(ingress_needed_key);
@@ -7574,14 +7596,16 @@ int dev_set_mtu_ext(struct net_device *dev, int new_mtu,
err = __dev_set_mtu(dev, new_mtu);
if (!err) {
- err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ orig_mtu);
err = notifier_to_errno(err);
if (err) {
/* setting mtu back and notifying everyone again,
* so that they have a chance to revert changes.
*/
__dev_set_mtu(dev, orig_mtu);
- call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ new_mtu);
}
}
return err;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 65fc366a78a4..6bc42933be4a 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -2592,7 +2592,7 @@ send_done:
if (!nlh) {
err = devlink_dpipe_send_and_alloc_skb(&skb, info);
if (err)
- goto err_skb_send_alloc;
+ return err;
goto send_done;
}
return genlmsg_reply(skb, info);
@@ -2600,7 +2600,6 @@ send_done:
nla_put_failure:
err = -EMSGSIZE;
err_resource_put:
-err_skb_send_alloc:
nlmsg_free(skb);
return err;
}
@@ -2996,6 +2995,8 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
struct genl_info *info,
union devlink_param_value *value)
{
+ int len;
+
if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
!info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
return -EINVAL;
@@ -3011,10 +3012,13 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
break;
case DEVLINK_PARAM_TYPE_STRING:
- if (nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) >
- DEVLINK_PARAM_MAX_STRING_VALUE)
+ len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
+ nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
+ if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
+ len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
return -EINVAL;
- value->vstr = nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+ strcpy(value->vstr,
+ nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
break;
case DEVLINK_PARAM_TYPE_BOOL:
value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
@@ -3101,7 +3105,10 @@ static int devlink_nl_cmd_param_set_doit(struct sk_buff *skb,
return -EOPNOTSUPP;
if (cmode == DEVLINK_PARAM_CMODE_DRIVERINIT) {
- param_item->driverinit_value = value;
+ if (param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(param_item->driverinit_value.vstr, value.vstr);
+ else
+ param_item->driverinit_value = value;
param_item->driverinit_value_valid = true;
} else {
if (!param->set)
@@ -4541,7 +4548,10 @@ int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
DEVLINK_PARAM_CMODE_DRIVERINIT))
return -EOPNOTSUPP;
- *init_val = param_item->driverinit_value;
+ if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(init_val->vstr, param_item->driverinit_value.vstr);
+ else
+ *init_val = param_item->driverinit_value;
return 0;
}
@@ -4572,7 +4582,10 @@ int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
DEVLINK_PARAM_CMODE_DRIVERINIT))
return -EOPNOTSUPP;
- param_item->driverinit_value = init_val;
+ if (param_item->param->type == DEVLINK_PARAM_TYPE_STRING)
+ strcpy(param_item->driverinit_value.vstr, init_val.vstr);
+ else
+ param_item->driverinit_value = init_val;
param_item->driverinit_value_valid = true;
devlink_param_notify(devlink, param_item, DEVLINK_CMD_PARAM_NEW);
@@ -4605,6 +4618,23 @@ void devlink_param_value_changed(struct devlink *devlink, u32 param_id)
EXPORT_SYMBOL_GPL(devlink_param_value_changed);
/**
+ * devlink_param_value_str_fill - Safely fill-up the string preventing
+ * from overflow of the preallocated buffer
+ *
+ * @dst_val: destination devlink_param_value
+ * @src: source buffer
+ */
+void devlink_param_value_str_fill(union devlink_param_value *dst_val,
+ const char *src)
+{
+ size_t len;
+
+ len = strlcpy(dst_val->vstr, src, __DEVLINK_PARAM_MAX_STRING_VALUE);
+ WARN_ON(len >= __DEVLINK_PARAM_MAX_STRING_VALUE);
+}
+EXPORT_SYMBOL_GPL(devlink_param_value_str_fill);
+
+/**
* devlink_region_create - create a new address region
*
* @devlink: devlink
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index c9993c6c2fd4..aeabc4831fca 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1015,6 +1015,9 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
return -EINVAL;
}
+ if (info.cmd != cmd)
+ return -EINVAL;
+
if (info.cmd == ETHTOOL_GRXCLSRLALL) {
if (info.rule_cnt > 0) {
if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
@@ -1483,6 +1486,7 @@ static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
{
struct ethtool_wolinfo wol;
+ int ret;
if (!dev->ethtool_ops->set_wol)
return -EOPNOTSUPP;
@@ -1490,7 +1494,13 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
if (copy_from_user(&wol, useraddr, sizeof(wol)))
return -EFAULT;
- return dev->ethtool_ops->set_wol(dev, &wol);
+ ret = dev->ethtool_ops->set_wol(dev, &wol);
+ if (ret)
+ return ret;
+
+ dev->wol_enabled = !!wol.wolopts;
+
+ return 0;
}
static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
@@ -2462,13 +2472,17 @@ roll_back:
return ret;
}
-static int ethtool_set_per_queue(struct net_device *dev, void __user *useraddr)
+static int ethtool_set_per_queue(struct net_device *dev,
+ void __user *useraddr, u32 sub_cmd)
{
struct ethtool_per_queue_op per_queue_opt;
if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt)))
return -EFAULT;
+ if (per_queue_opt.sub_command != sub_cmd)
+ return -EINVAL;
+
switch (per_queue_opt.sub_command) {
case ETHTOOL_GCOALESCE:
return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt);
@@ -2624,6 +2638,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GPHYSTATS:
case ETHTOOL_GTSO:
case ETHTOOL_GPERMADDR:
+ case ETHTOOL_GUFO:
case ETHTOOL_GGSO:
case ETHTOOL_GGRO:
case ETHTOOL_GFLAGS:
@@ -2838,7 +2853,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
rc = ethtool_get_phy_stats(dev, useraddr);
break;
case ETHTOOL_PERQUEUE:
- rc = ethtool_set_per_queue(dev, useraddr);
+ rc = ethtool_set_per_queue(dev, useraddr, sub_cmd);
break;
case ETHTOOL_GLINKSETTINGS:
rc = ethtool_get_link_ksettings(dev, useraddr);
diff --git a/net/core/filter.c b/net/core/filter.c
index aecdeba052d3..5e00f2b85a56 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2344,7 +2344,8 @@ BPF_CALL_4(bpf_msg_pull_data,
if (unlikely(bytes_sg_total > copy))
return -EINVAL;
- page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
+ page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
+ get_order(copy));
if (unlikely(!page))
return -ENOMEM;
p = page_address(page);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index aa19d86937af..4e07824eec5e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1148,8 +1148,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
neigh->nud_state = new;
err = 0;
notify = old & NUD_VALID;
- if (((old & (NUD_INCOMPLETE | NUD_PROBE)) ||
- (flags & NEIGH_UPDATE_F_ADMIN)) &&
+ if ((old & (NUD_INCOMPLETE | NUD_PROBE)) &&
(new & NUD_FAILED)) {
neigh_invalidate(neigh);
notify = 1;
@@ -1180,6 +1179,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
lladdr = neigh->ha;
}
+ /* Update confirmed timestamp for neighbour entry after we
+ * received ARP packet even if it doesn't change IP to MAC binding.
+ */
+ if (new & NUD_CONNECTED)
+ neigh->confirmed = jiffies;
+
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
@@ -1201,15 +1206,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
}
}
- /* Update timestamps only once we know we will make a change to the
+ /* Update timestamp only once we know we will make a change to the
* neighbour entry. Otherwise we risk to move the locktime window with
* noop updates and ignore relevant ARP updates.
*/
- if (new != old || lladdr != neigh->ha) {
- if (new & NUD_CONNECTED)
- neigh->confirmed = jiffies;
+ if (new != old || lladdr != neigh->ha)
neigh->updated = jiffies;
- }
if (new != old) {
neigh_del_timer(neigh);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 57557a6a950c..3ae899805f8b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -135,27 +135,9 @@ static void queue_process(struct work_struct *work)
}
}
-/*
- * Check whether delayed processing was scheduled for our NIC. If so,
- * we attempt to grab the poll lock and use ->poll() to pump the card.
- * If this fails, either we've recursed in ->poll() or it's already
- * running on another CPU.
- *
- * Note: we don't mask interrupts with this lock because we're using
- * trylock here and interrupts are already disabled in the softirq
- * case. Further, we test the poll_owner to avoid recursion on UP
- * systems where the lock doesn't exist.
- */
static void poll_one_napi(struct napi_struct *napi)
{
- int work = 0;
-
- /* net_rx_action's ->poll() invocations and our's are
- * synchronized by this test which is only made while
- * holding the napi->poll_lock.
- */
- if (!test_bit(NAPI_STATE_SCHED, &napi->state))
- return;
+ int work;
/* If we set this bit but see that it has already been set,
* that indicates that napi has been disabled and we need
@@ -187,16 +169,16 @@ static void poll_napi(struct net_device *dev)
}
}
-static void netpoll_poll_dev(struct net_device *dev)
+void netpoll_poll_dev(struct net_device *dev)
{
- const struct net_device_ops *ops;
struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
+ const struct net_device_ops *ops;
/* Don't do any rx activity if the dev_lock mutex is held
* the dev_open/close paths use this to block netpoll activity
* while changing device state
*/
- if (down_trylock(&ni->dev_lock))
+ if (!ni || down_trylock(&ni->dev_lock))
return;
if (!netif_running(dev)) {
@@ -205,13 +187,8 @@ static void netpoll_poll_dev(struct net_device *dev)
}
ops = dev->netdev_ops;
- if (!ops->ndo_poll_controller) {
- up(&ni->dev_lock);
- return;
- }
-
- /* Process pending work on NIC */
- ops->ndo_poll_controller(dev);
+ if (ops->ndo_poll_controller)
+ ops->ndo_poll_controller(dev);
poll_napi(dev);
@@ -219,6 +196,7 @@ static void netpoll_poll_dev(struct net_device *dev)
zap_completion_queue();
}
+EXPORT_SYMBOL(netpoll_poll_dev);
void netpoll_poll_disable(struct net_device *dev)
{
@@ -613,8 +591,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
- if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
- !ndev->netdev_ops->ndo_poll_controller) {
+ if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
np_err(np, "%s doesn't support polling, aborting\n",
np->dev_name);
err = -ENOTSUPP;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 60c928894a78..37c7936124e6 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1898,10 +1898,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
if (tb[IFLA_IF_NETNSID]) {
netnsid = nla_get_s32(tb[IFLA_IF_NETNSID]);
tgt_net = get_target_net(skb->sk, netnsid);
- if (IS_ERR(tgt_net)) {
- tgt_net = net;
- netnsid = -1;
- }
+ if (IS_ERR(tgt_net))
+ return PTR_ERR(tgt_net);
}
if (tb[IFLA_EXT_MASK])
@@ -2810,7 +2808,7 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
}
if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
- __dev_notify_flags(dev, old_flags, 0U);
+ __dev_notify_flags(dev, old_flags, (old_flags ^ dev->flags));
} else {
dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
__dev_notify_flags(dev, old_flags, ~0U);
@@ -2837,6 +2835,12 @@ struct net_device *rtnl_create_link(struct net *net,
else if (ops->get_num_rx_queues)
num_rx_queues = ops->get_num_rx_queues();
+ if (num_tx_queues < 1 || num_tx_queues > 4096)
+ return ERR_PTR(-EINVAL);
+
+ if (num_rx_queues < 1 || num_rx_queues > 4096)
+ return ERR_PTR(-EINVAL);
+
dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
ops->setup, num_tx_queues, num_rx_queues);
if (!dev)
@@ -3744,16 +3748,27 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
int err = 0;
int fidx = 0;
- err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
- IFLA_MAX, ifla_policy, NULL);
- if (err < 0) {
- return -EINVAL;
- } else if (err == 0) {
- if (tb[IFLA_MASTER])
- br_idx = nla_get_u32(tb[IFLA_MASTER]);
- }
+ /* A hack to preserve kernel<->userspace interface.
+ * Before Linux v4.12 this code accepted ndmsg since iproute2 v3.3.0.
+ * However, ndmsg is shorter than ifinfomsg thus nlmsg_parse() bails.
+ * So, check for ndmsg with an optional u32 attribute (not used here).
+ * Fortunately these sizes don't conflict with the size of ifinfomsg
+ * with an optional attribute.
+ */
+ if (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) &&
+ (nlmsg_len(cb->nlh) != sizeof(struct ndmsg) +
+ nla_attr_size(sizeof(u32)))) {
+ err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
+ IFLA_MAX, ifla_policy, NULL);
+ if (err < 0) {
+ return -EINVAL;
+ } else if (err == 0) {
+ if (tb[IFLA_MASTER])
+ br_idx = nla_get_u32(tb[IFLA_MASTER]);
+ }
- brport_idx = ifm->ifi_index;
+ brport_idx = ifm->ifi_index;
+ }
if (br_idx) {
br_dev = __dev_get_by_index(net, br_idx);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c996c09d095f..f817f336595d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -939,9 +939,6 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
WARN_ON_ONCE(!in_task());
- if (!sock_flag(sk, SOCK_ZEROCOPY))
- return NULL;
-
skb = sock_omalloc(sk, 0, GFP_KERNEL);
if (!skb)
return NULL;
@@ -1849,8 +1846,9 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
if (skb->ip_summed == CHECKSUM_COMPLETE) {
int delta = skb->len - len;
- skb->csum = csum_sub(skb->csum,
- skb_checksum(skb, len, delta, 0));
+ skb->csum = csum_block_sub(skb->csum,
+ skb_checksum(skb, len, delta, 0),
+ len);
}
return __pskb_trim(skb, len);
}
@@ -4455,14 +4453,16 @@ EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
*/
bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
{
- if (unlikely(start > skb_headlen(skb)) ||
- unlikely((int)start + off > skb_headlen(skb) - 2)) {
- net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
- start, off, skb_headlen(skb));
+ u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
+ u32 csum_start = skb_headroom(skb) + (u32)start;
+
+ if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
+ net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
+ start, off, skb_headroom(skb), skb_headlen(skb));
return false;
}
skb->ip_summed = CHECKSUM_PARTIAL;
- skb->csum_start = skb_headroom(skb) + start;
+ skb->csum_start = csum_start;
skb->csum_offset = off;
skb_set_transport_header(skb, start);
return true;
diff --git a/net/dccp/input.c b/net/dccp/input.c
index d28d46bff6ab..85d6c879383d 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -606,11 +606,13 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
if (sk->sk_state == DCCP_LISTEN) {
if (dh->dccph_type == DCCP_PKT_REQUEST) {
/* It is possible that we process SYN packets from backlog,
- * so we need to make sure to disable BH right there.
+ * so we need to make sure to disable BH and RCU right there.
*/
+ rcu_read_lock();
local_bh_disable();
acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
local_bh_enable();
+ rcu_read_unlock();
if (!acceptable)
return 1;
consume_skb(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index b08feb219b44..8e08cea6f178 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -493,9 +493,11 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
ireq->ir_rmt_addr);
+ rcu_read_lock();
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- ireq_opt_deref(ireq));
+ rcu_dereference(ireq->ireq_opt));
+ rcu_read_unlock();
err = net_xmit_eval(err);
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 20fda8fb8ffd..1fbe2f815474 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1377,6 +1377,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb,
if (encap)
skb_reset_inner_headers(skb);
skb->network_header = (u8 *)iph - skb->head;
+ skb_reset_mac_len(skb);
} while ((skb = skb->next));
out:
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 2998b0e47d4b..0113993e9b2c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1243,7 +1243,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *upper_info = ptr;
+ struct netdev_notifier_info_ext *info_ext = ptr;
struct in_device *in_dev;
struct net *net = dev_net(dev);
unsigned int flags;
@@ -1278,16 +1279,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
fib_sync_up(dev, RTNH_F_LINKDOWN);
else
fib_sync_down_dev(dev, event, false);
- /* fall through */
+ rt_cache_flush(net);
+ break;
case NETDEV_CHANGEMTU:
+ fib_sync_mtu(dev, info_ext->ext.mtu);
rt_cache_flush(net);
break;
case NETDEV_CHANGEUPPER:
- info = ptr;
+ upper_info = ptr;
/* flush all routes if dev is linked to or unlinked from
* an L3 master device (e.g., VRF)
*/
- if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+ if (upper_info->upper_dev &&
+ netif_is_l3_master(upper_info->upper_dev))
fib_disable_ip(dev, NETDEV_DOWN, true);
break;
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index f3c89ccf14c5..446204ca7406 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1470,6 +1470,56 @@ static int call_fib_nh_notifiers(struct fib_nh *fib_nh,
return NOTIFY_DONE;
}
+/* Update the PMTU of exceptions when:
+ * - the new MTU of the first hop becomes smaller than the PMTU
+ * - the old MTU was the same as the PMTU, and it limited discovery of
+ * larger MTUs on the path. With that limit raised, we can now
+ * discover larger MTUs
+ * A special case is locked exceptions, for which the PMTU is smaller
+ * than the minimal accepted PMTU:
+ * - if the new MTU is greater than the PMTU, don't make any change
+ * - otherwise, unlock and set PMTU
+ */
+static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
+{
+ struct fnhe_hash_bucket *bucket;
+ int i;
+
+ bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
+ if (!bucket)
+ return;
+
+ for (i = 0; i < FNHE_HASH_SIZE; i++) {
+ struct fib_nh_exception *fnhe;
+
+ for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
+ fnhe;
+ fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
+ if (fnhe->fnhe_mtu_locked) {
+ if (new <= fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ fnhe->fnhe_mtu_locked = false;
+ }
+ } else if (new < fnhe->fnhe_pmtu ||
+ orig == fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ }
+ }
+ }
+}
+
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
+{
+ unsigned int hash = fib_devindex_hashfn(dev->ifindex);
+ struct hlist_head *head = &fib_info_devhash[hash];
+ struct fib_nh *nh;
+
+ hlist_for_each_entry(nh, head, nh_hash) {
+ if (nh->nh_dev == dev)
+ nh_update_mtu(nh, dev->mtu, orig_mtu);
+ }
+}
+
/* Event force Flags Description
* NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
* NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index dfd5009f96ef..15e7f7915a21 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -544,7 +544,8 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
struct ip_options_rcu *opt;
struct rtable *rt;
- opt = ireq_opt_deref(ireq);
+ rcu_read_lock();
+ opt = rcu_dereference(ireq->ireq_opt);
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
@@ -558,11 +559,13 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
goto no_route;
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
goto route_err;
+ rcu_read_unlock();
return &rt->dst;
route_err:
ip_rt_put(rt);
no_route:
+ rcu_read_unlock();
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 88281fbce88c..e7227128df2c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -599,6 +599,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
nextp = &fp->next;
fp->prev = NULL;
memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ fp->sk = NULL;
head->data_len += fp->len;
head->len += fp->len;
if (head->ip_summed != fp->ip_summed)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index ae714aecc31c..8cce0e9ea08c 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -178,6 +178,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
if (tpi->proto == htons(ETH_P_TEB))
itn = net_generic(net, gre_tap_net_id);
+ else if (tpi->proto == htons(ETH_P_ERSPAN) ||
+ tpi->proto == htons(ETH_P_ERSPAN2))
+ itn = net_generic(net, erspan_net_id);
else
itn = net_generic(net, ipgre_net_id);
@@ -328,6 +331,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
return PACKET_RCVD;
}
+ return PACKET_REJECT;
+
drop:
kfree_skb(skb);
return PACKET_RCVD;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c0fe5ad996f2..26c36cccabdc 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -149,7 +149,6 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
struct sockaddr_in sin;
- const struct iphdr *iph = ip_hdr(skb);
__be16 *ports;
int end;
@@ -164,7 +163,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
ports = (__be16 *)skb_transport_header(skb);
sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = iph->daddr;
+ sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
sin.sin_port = ports[1];
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c4f5602308ed..284a22154b4e 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -627,6 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ unsigned int inner_nhdr_len = 0;
const struct iphdr *inner_iph;
struct flowi4 fl4;
u8 tos, ttl;
@@ -636,6 +637,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
__be32 dst;
bool connected;
+ /* ensure we can access the inner net header, for several users below */
+ if (skb->protocol == htons(ETH_P_IP))
+ inner_nhdr_len = sizeof(struct iphdr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ inner_nhdr_len = sizeof(struct ipv6hdr);
+ if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+ goto tx_error;
+
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c
index 1ad9aa62a97b..eab8cd5ec2f5 100644
--- a/net/ipv4/ipmr_base.c
+++ b/net/ipv4/ipmr_base.c
@@ -296,8 +296,6 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
next_entry:
e++;
}
- e = 0;
- s_e = 0;
spin_lock_bh(lock);
list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index d9504adc47b3..184bf2e0a1ed 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -106,6 +106,10 @@ config NF_NAT_IPV4
if NF_NAT_IPV4
+config NF_NAT_MASQUERADE_IPV4
+ bool
+
+if NF_TABLES
config NFT_CHAIN_NAT_IPV4
depends on NF_TABLES_IPV4
tristate "IPv4 nf_tables nat chain support"
@@ -115,9 +119,6 @@ config NFT_CHAIN_NAT_IPV4
packet transformations such as the source, destination address and
source and destination ports.
-config NF_NAT_MASQUERADE_IPV4
- bool
-
config NFT_MASQ_IPV4
tristate "IPv4 masquerading support for nf_tables"
depends on NF_TABLES_IPV4
@@ -135,6 +136,7 @@ config NFT_REDIR_IPV4
help
This is the expression that provides IPv4 redirect support for
nf_tables.
+endif # NF_TABLES
config NF_NAT_SNMP_BASIC
tristate "Basic SNMP-ALG support"
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index b678466da451..8501554e96a4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1001,21 +1001,22 @@ out: kfree_skb(skb);
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
+ u32 old_mtu = ipv4_mtu(dst);
struct fib_result res;
bool lock = false;
if (ip_mtu_locked(dst))
return;
- if (ipv4_mtu(dst) < mtu)
+ if (old_mtu < mtu)
return;
if (mtu < ip_rt_min_pmtu) {
lock = true;
- mtu = ip_rt_min_pmtu;
+ mtu = min(old_mtu, ip_rt_min_pmtu);
}
- if (rt->rt_pmtu == mtu &&
+ if (rt->rt_pmtu == mtu && !lock &&
time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
return;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b92f422f2fa8..891ed2f91467 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -48,6 +48,7 @@ static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
static int comp_sack_nr_max = 255;
+static u32 u32_max_div_HZ = UINT_MAX / HZ;
/* obsolete */
static int sysctl_tcp_low_latency __read_mostly;
@@ -745,9 +746,10 @@ static struct ctl_table ipv4_net_table[] = {
{
.procname = "tcp_probe_interval",
.data = &init_net.ipv4.sysctl_tcp_probe_interval,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(u32),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_douintvec_minmax,
+ .extra2 = &u32_max_div_HZ,
},
{
.procname = "igmp_link_local_mcast_reports",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b8af2fec5ad5..10c6246396cc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1185,7 +1185,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
flags = msg->msg_flags;
- if (flags & MSG_ZEROCOPY && size) {
+ if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
if (sk->sk_state != TCP_ESTABLISHED) {
err = -EINVAL;
goto out_err;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4c2dd9f863f7..47e08c1b5bc3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6009,11 +6009,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (th->fin)
goto discard;
/* It is possible that we process SYN packets from backlog,
- * so we need to make sure to disable BH right there.
+ * so we need to make sure to disable BH and RCU right there.
*/
+ rcu_read_lock();
local_bh_disable();
acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
local_bh_enable();
+ rcu_read_unlock();
if (!acceptable)
return 1;
@@ -6367,8 +6369,8 @@ static bool tcp_syn_flood_action(const struct sock *sk,
if (!queue->synflood_warned &&
net->ipv4.sysctl_tcp_syncookies != 2 &&
xchg(&queue->synflood_warned, 1) == 0)
- pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
- proto, ntohs(tcp_hdr(skb)->dest), msg);
+ net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
+ proto, ntohs(tcp_hdr(skb)->dest), msg);
return want_cookie;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 44c09eddbb78..cd426313a298 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -943,9 +943,11 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
if (skb) {
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
+ rcu_read_lock();
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- ireq_opt_deref(ireq));
+ rcu_dereference(ireq->ireq_opt));
+ rcu_read_unlock();
err = net_xmit_eval(err);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f4e35b2ff8b8..c32a4c16b7ff 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1627,7 +1627,7 @@ busy_check:
*err = error;
return NULL;
}
-EXPORT_SYMBOL_GPL(__skb_recv_udp);
+EXPORT_SYMBOL(__skb_recv_udp);
/*
* This should be easy, if there is something there we
@@ -2124,6 +2124,28 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
inet_compute_pseudo);
}
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+ struct udphdr *uh)
+{
+ int ret;
+
+ if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+ skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+ inet_compute_pseudo);
+
+ ret = udp_queue_rcv_skb(sk, skb);
+
+ /* a return value > 0 means to resubmit the input, but
+ * it wants the return to be -protocol, or 0
+ */
+ if (ret > 0)
+ return -ret;
+ return 0;
+}
+
/*
* All we need to do is get the socket, and then do a checksum.
*/
@@ -2170,14 +2192,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (unlikely(sk->sk_rx_dst != dst))
udp_sk_rx_dst_set(sk, dst);
- ret = udp_queue_rcv_skb(sk, skb);
+ ret = udp_unicast_rcv_skb(sk, skb, uh);
sock_put(sk);
- /* a return value > 0 means to resubmit the input, but
- * it wants the return to be -protocol, or 0
- */
- if (ret > 0)
- return -ret;
- return 0;
+ return ret;
}
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
@@ -2185,22 +2202,8 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
saddr, daddr, udptable, proto);
sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
- if (sk) {
- int ret;
-
- if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
- skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
- inet_compute_pseudo);
-
- ret = udp_queue_rcv_skb(sk, skb);
-
- /* a return value > 0 means to resubmit the input, but
- * it wants the return to be -protocol, or 0
- */
- if (ret > 0)
- return -ret;
- return 0;
- }
+ if (sk)
+ return udp_unicast_rcv_skb(sk, skb, uh);
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index bcfc00e88756..f8de2482a529 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -67,6 +67,7 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async)
if (xo && (xo->flags & XFRM_GRO)) {
skb_mac_header_rebuild(skb);
+ skb_reset_transport_header(skb);
return 0;
}
diff --git a/net/ipv4/xfrm4_mode_transport.c b/net/ipv4/xfrm4_mode_transport.c
index 3d36644890bb..1ad2c2c4e250 100644
--- a/net/ipv4/xfrm4_mode_transport.c
+++ b/net/ipv4/xfrm4_mode_transport.c
@@ -46,7 +46,6 @@ static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ihl = skb->data - skb_transport_header(skb);
- struct xfrm_offload *xo = xfrm_offload(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
@@ -54,8 +53,7 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
skb->network_header = skb->transport_header;
}
ip_hdr(skb)->tot_len = htons(skb->len + ihl);
- if (!xo || !(xo->flags & XFRM_GRO))
- skb_reset_transport_header(skb);
+ skb_reset_transport_header(skb);
return 0;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index d51a8c0b3372..4e81ff2f4588 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4201,7 +4201,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
p++;
continue;
}
- state->offset++;
return ifa;
}
@@ -4225,13 +4224,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
return ifa;
}
+ state->offset = 0;
while (++state->bucket < IN6_ADDR_HSIZE) {
- state->offset = 0;
hlist_for_each_entry_rcu(ifa,
&inet6_addr_lst[state->bucket], addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
- state->offset++;
return ifa;
}
}
@@ -4930,8 +4928,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
/* unicast address incl. temp addr */
list_for_each_entry(ifa, &idev->addr_list, if_list) {
- if (++ip_idx < s_ip_idx)
- continue;
+ if (ip_idx < s_ip_idx)
+ goto next;
err = inet6_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
@@ -4940,6 +4938,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
if (err < 0)
break;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+next:
+ ip_idx++;
}
break;
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5516f55e214b..cbe46175bb59 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -196,6 +196,8 @@ void fib6_info_destroy_rcu(struct rcu_head *head)
*ppcpu_rt = NULL;
}
}
+
+ free_percpu(f6i->rt6i_pcpu);
}
lwtstate_put(f6i->fib6_nh.nh_lwtstate);
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 37ff4805b20c..c7e495f12011 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -115,6 +115,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
payload_len = skb->len - nhoff - sizeof(*ipv6h);
ipv6h->payload_len = htons(payload_len);
skb->network_header = (u8 *)ipv6h - skb->head;
+ skb_reset_mac_len(skb);
if (udpfrag) {
int err = ip6_find_1stfragopt(skb, &prevhdr);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 16f200f06500..f9f8f554d141 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -219,12 +219,10 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
kfree_skb(skb);
return -ENOBUFS;
}
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
consume_skb(skb);
skb = skb2;
- /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
- * it is safe to call in our context (socket lock not held)
- */
- skb_set_owner_w(skb, (struct sock *)sk);
}
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 419960b0ba16..a9d06d4dd057 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1184,11 +1184,6 @@ route_lookup:
}
skb_dst_set(skb, dst);
- if (encap_limit >= 0) {
- init_tel_txopt(&opt, encap_limit);
- ipv6_push_frag_opts(skb, &opt.ops, &proto);
- }
-
if (hop_limit == 0) {
if (skb->protocol == htons(ETH_P_IP))
hop_limit = ip_hdr(skb)->ttl;
@@ -1210,6 +1205,11 @@ route_lookup:
if (err)
return err;
+ if (encap_limit >= 0) {
+ init_tel_txopt(&opt, encap_limit);
+ ipv6_push_frag_opts(skb, &opt.ops, &proto);
+ }
+
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
@@ -1234,7 +1234,7 @@ static inline int
ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- const struct iphdr *iph = ip_hdr(skb);
+ const struct iphdr *iph;
int encap_limit = -1;
struct flowi6 fl6;
__u8 dsfield;
@@ -1242,6 +1242,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
+ /* ensure we can access the full inner ip header */
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return -1;
+
+ iph = ip_hdr(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
tproto = READ_ONCE(t->parms.proto);
@@ -1306,7 +1311,7 @@ static inline int
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ipv6hdr *ipv6h;
int encap_limit = -1;
__u16 offset;
struct flowi6 fl6;
@@ -1315,6 +1320,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+ return -1;
+
+ ipv6h = ipv6_hdr(skb);
tproto = READ_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
ip6_tnl_addr_conflict(t, ipv6h))
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 4ae54aaca373..dbab62e3f0d7 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2436,17 +2436,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
{
int err;
- /* callers have the socket lock and rtnl lock
- * so no other readers or writers of iml or its sflist
- */
+ write_lock_bh(&iml->sflock);
if (!iml->sflist) {
/* any-source empty exclude case */
- return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
+ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
+ } else {
+ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
+ iml->sflist->sl_count, iml->sflist->sl_addr, 0);
+ sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
+ iml->sflist = NULL;
}
- err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
- iml->sflist->sl_count, iml->sflist->sl_addr, 0);
- sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
- iml->sflist = NULL;
+ write_unlock_bh(&iml->sflock);
return err;
}
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 2a14d8b65924..8f68a518d9db 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -445,6 +445,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
+ fp->sk = NULL;
}
sub_frag_mem_limit(fq->q.net, head->truesize);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 413d98bf24f4..5e0efd3954e9 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -651,8 +651,6 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb->tstamp = sockc->transmit_time;
- skb_dst_set(skb, &rt->dst);
- *dstp = NULL;
skb_put(skb, length);
skb_reset_network_header(skb);
@@ -665,8 +663,14 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->transport_header = skb->network_header;
err = memcpy_from_msg(iph, msg, length);
- if (err)
- goto error_fault;
+ if (err) {
+ err = -EFAULT;
+ kfree_skb(skb);
+ goto error;
+ }
+
+ skb_dst_set(skb, &rt->dst);
+ *dstp = NULL;
/* if egress device is enslaved to an L3 master device pass the
* skb to its handler for processing
@@ -675,21 +679,28 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
if (unlikely(!skb))
return 0;
+ /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
+ * in the error path. Since skb has been freed, the dst could
+ * have been queued for deletion.
+ */
+ rcu_read_lock();
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
NULL, rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
- if (err)
- goto error;
+ if (err) {
+ IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+ rcu_read_unlock();
+ goto error_check;
+ }
+ rcu_read_unlock();
out:
return 0;
-error_fault:
- err = -EFAULT;
- kfree_skb(skb);
error:
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+error_check:
if (err == -ENOBUFS && !np->recverr)
err = 0;
return err;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 18e00ce1719a..abcb5ae77319 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -364,11 +364,14 @@ EXPORT_SYMBOL(ip6_dst_alloc);
static void ip6_dst_destroy(struct dst_entry *dst)
{
+ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
struct rt6_info *rt = (struct rt6_info *)dst;
struct fib6_info *from;
struct inet6_dev *idev;
- dst_destroy_metrics_generic(dst);
+ if (p != &dst_default_metrics && refcount_dec_and_test(&p->refcnt))
+ kfree(p);
+
rt6_uncached_list_del(rt);
idev = rt->rt6i_idev;
@@ -517,10 +520,11 @@ static void rt6_probe_deferred(struct work_struct *w)
static void rt6_probe(struct fib6_info *rt)
{
- struct __rt6_probe_work *work;
+ struct __rt6_probe_work *work = NULL;
const struct in6_addr *nh_gw;
struct neighbour *neigh;
struct net_device *dev;
+ struct inet6_dev *idev;
/*
* Okay, this does not seem to be appropriate
@@ -536,15 +540,12 @@ static void rt6_probe(struct fib6_info *rt)
nh_gw = &rt->fib6_nh.nh_gw;
dev = rt->fib6_nh.nh_dev;
rcu_read_lock_bh();
+ idev = __in6_dev_get(dev);
neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
if (neigh) {
- struct inet6_dev *idev;
-
if (neigh->nud_state & NUD_VALID)
goto out;
- idev = __in6_dev_get(dev);
- work = NULL;
write_lock(&neigh->lock);
if (!(neigh->nud_state & NUD_VALID) &&
time_after(jiffies,
@@ -554,11 +555,13 @@ static void rt6_probe(struct fib6_info *rt)
__neigh_set_probe_once(neigh);
}
write_unlock(&neigh->lock);
- } else {
+ } else if (time_after(jiffies, rt->last_probe +
+ idev->cnf.rtr_probe_interval)) {
work = kmalloc(sizeof(*work), GFP_ATOMIC);
}
if (work) {
+ rt->last_probe = jiffies;
INIT_WORK(&work->work, rt6_probe_deferred);
work->target = *nh_gw;
dev_hold(dev);
@@ -946,8 +949,6 @@ static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct fib6_info *ort)
static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
{
- rt->dst.flags |= fib6_info_dst_flags(ort);
-
if (ort->fib6_flags & RTF_REJECT) {
ip6_rt_init_dst_reject(rt, ort);
return;
@@ -978,6 +979,10 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
rt->rt6i_flags &= ~RTF_EXPIRES;
rcu_assign_pointer(rt->from, from);
dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
+ if (from->fib6_metrics != &dst_default_metrics) {
+ rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+ refcount_inc(&from->fib6_metrics->refcnt);
+ }
}
/* Caller must already hold reference to @ort */
@@ -4316,11 +4321,6 @@ static int ip6_route_info_append(struct net *net,
if (!nh)
return -ENOMEM;
nh->fib6_info = rt;
- err = ip6_convert_metrics(net, rt, r_cfg);
- if (err) {
- kfree(nh);
- return err;
- }
memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
list_add_tail(&nh->next, rt6_nh_list);
@@ -4670,20 +4670,31 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
int iif, int type, u32 portid, u32 seq,
unsigned int flags)
{
- struct rtmsg *rtm;
+ struct rt6_info *rt6 = (struct rt6_info *)dst;
+ struct rt6key *rt6_dst, *rt6_src;
+ u32 *pmetrics, table, rt6_flags;
struct nlmsghdr *nlh;
+ struct rtmsg *rtm;
long expires = 0;
- u32 *pmetrics;
- u32 table;
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
if (!nlh)
return -EMSGSIZE;
+ if (rt6) {
+ rt6_dst = &rt6->rt6i_dst;
+ rt6_src = &rt6->rt6i_src;
+ rt6_flags = rt6->rt6i_flags;
+ } else {
+ rt6_dst = &rt->fib6_dst;
+ rt6_src = &rt->fib6_src;
+ rt6_flags = rt->fib6_flags;
+ }
+
rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_INET6;
- rtm->rtm_dst_len = rt->fib6_dst.plen;
- rtm->rtm_src_len = rt->fib6_src.plen;
+ rtm->rtm_dst_len = rt6_dst->plen;
+ rtm->rtm_src_len = rt6_src->plen;
rtm->rtm_tos = 0;
if (rt->fib6_table)
table = rt->fib6_table->tb6_id;
@@ -4698,7 +4709,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
rtm->rtm_protocol = rt->fib6_protocol;
- if (rt->fib6_flags & RTF_CACHE)
+ if (rt6_flags & RTF_CACHE)
rtm->rtm_flags |= RTM_F_CLONED;
if (dest) {
@@ -4706,7 +4717,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
rtm->rtm_dst_len = 128;
} else if (rtm->rtm_dst_len)
- if (nla_put_in6_addr(skb, RTA_DST, &rt->fib6_dst.addr))
+ if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
goto nla_put_failure;
#ifdef CONFIG_IPV6_SUBTREES
if (src) {
@@ -4714,12 +4725,12 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
rtm->rtm_src_len = 128;
} else if (rtm->rtm_src_len &&
- nla_put_in6_addr(skb, RTA_SRC, &rt->fib6_src.addr))
+ nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
goto nla_put_failure;
#endif
if (iif) {
#ifdef CONFIG_IPV6_MROUTE
- if (ipv6_addr_is_multicast(&rt->fib6_dst.addr)) {
+ if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
int err = ip6mr_get_route(net, skb, rtm, portid);
if (err == 0)
@@ -4754,7 +4765,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
/* For multipath routes, walk the siblings list and add
* each as a nexthop within RTA_MULTIPATH.
*/
- if (rt->fib6_nsiblings) {
+ if (rt6) {
+ if (rt6_flags & RTF_GATEWAY &&
+ nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
+ goto nla_put_failure;
+
+ if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
+ goto nla_put_failure;
+ } else if (rt->fib6_nsiblings) {
struct fib6_info *sibling, *next_sibling;
struct nlattr *mp;
@@ -4777,7 +4795,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
}
- if (rt->fib6_flags & RTF_EXPIRES) {
+ if (rt6_flags & RTF_EXPIRES) {
expires = dst ? dst->expires : rt->expires;
expires -= jiffies;
}
@@ -4785,7 +4803,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
goto nla_put_failure;
- if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->fib6_flags)))
+ if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
goto nla_put_failure;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 83f4c77c79d8..b36694b6716e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -752,6 +752,26 @@ static void udp6_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
}
}
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+ struct udphdr *uh)
+{
+ int ret;
+
+ if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+ skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+ ip6_compute_pseudo);
+
+ ret = udpv6_queue_rcv_skb(sk, skb);
+
+ /* a return value > 0 means to resubmit the input */
+ if (ret > 0)
+ return ret;
+ return 0;
+}
+
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
@@ -803,13 +823,14 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (unlikely(sk->sk_rx_dst != dst))
udp6_sk_rx_dst_set(sk, dst);
- ret = udpv6_queue_rcv_skb(sk, skb);
- sock_put(sk);
+ if (!uh->check && !udp_sk(sk)->no_check6_rx) {
+ sock_put(sk);
+ goto report_csum_error;
+ }
- /* a return value > 0 means to resubmit the input */
- if (ret > 0)
- return ret;
- return 0;
+ ret = udp6_unicast_rcv_skb(sk, skb, uh);
+ sock_put(sk);
+ return ret;
}
/*
@@ -822,30 +843,13 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
/* Unicast */
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
- int ret;
-
- if (!uh->check && !udp_sk(sk)->no_check6_rx) {
- udp6_csum_zero_error(skb);
- goto csum_error;
- }
-
- if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
- skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
- ip6_compute_pseudo);
-
- ret = udpv6_queue_rcv_skb(sk, skb);
-
- /* a return value > 0 means to resubmit the input */
- if (ret > 0)
- return ret;
-
- return 0;
+ if (!uh->check && !udp_sk(sk)->no_check6_rx)
+ goto report_csum_error;
+ return udp6_unicast_rcv_skb(sk, skb, uh);
}
- if (!uh->check) {
- udp6_csum_zero_error(skb);
- goto csum_error;
- }
+ if (!uh->check)
+ goto report_csum_error;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
@@ -866,6 +870,9 @@ short_packet:
ulen, skb->len,
daddr, ntohs(uh->dest));
goto discard;
+
+report_csum_error:
+ udp6_csum_zero_error(skb);
csum_error:
__UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c
index 841f4a07438e..9ef490dddcea 100644
--- a/net/ipv6/xfrm6_input.c
+++ b/net/ipv6/xfrm6_input.c
@@ -59,6 +59,7 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async)
if (xo && (xo->flags & XFRM_GRO)) {
skb_mac_header_rebuild(skb);
+ skb_reset_transport_header(skb);
return -1;
}
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 9ad07a91708e..3c29da5defe6 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -51,7 +51,6 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
{
int ihl = skb->data - skb_transport_header(skb);
- struct xfrm_offload *xo = xfrm_offload(skb);
if (skb->transport_header != skb->network_header) {
memmove(skb_transport_header(skb),
@@ -60,8 +59,7 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
}
ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
sizeof(struct ipv6hdr));
- if (!xo || !(xo->flags & XFRM_GRO))
- skb_reset_transport_header(skb);
+ skb_reset_transport_header(skb);
return 0;
}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 5959ce9620eb..6a74080005cf 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -170,9 +170,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
if (toobig && xfrm6_local_dontfrag(skb)) {
xfrm6_local_rxpmtu(skb, mtu);
+ kfree_skb(skb);
return -EMSGSIZE;
} else if (!skb->ignore_df && toobig && skb->sk) {
xfrm_local_error(skb, mtu);
+ kfree_skb(skb);
return -EMSGSIZE;
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ef3defaf43b9..d35bcf92969c 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -146,8 +146,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
- while (nh + offset + 1 < skb->data ||
- pskb_may_pull(skb, nh + offset + 1 - skb->data)) {
+ while (nh + offset + sizeof(*exthdr) < skb->data ||
+ pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
nh = skb_network_header(skb);
exthdr = (struct ipv6_opt_hdr *)(nh + offset);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a21d8ed0a325..e2f16a0173a9 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -351,20 +351,28 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
skb->dev = iucv->hs_dev;
- if (!skb->dev)
- return -ENODEV;
- if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
- return -ENETDOWN;
+ if (!skb->dev) {
+ err = -ENODEV;
+ goto err_free;
+ }
+ if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
+ err = -ENETDOWN;
+ goto err_free;
+ }
if (skb->len > skb->dev->mtu) {
- if (sock->sk_type == SOCK_SEQPACKET)
- return -EMSGSIZE;
- else
- skb_trim(skb, skb->dev->mtu);
+ if (sock->sk_type == SOCK_SEQPACKET) {
+ err = -EMSGSIZE;
+ goto err_free;
+ }
+ skb_trim(skb, skb->dev->mtu);
}
skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb)
- return -ENOMEM;
+ if (!nskb) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
skb_queue_tail(&iucv->send_skb_q, nskb);
err = dev_queue_xmit(skb);
if (net_xmit_eval(err)) {
@@ -375,6 +383,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
WARN_ON(atomic_read(&iucv->msg_recv) < 0);
}
return net_xmit_eval(err);
+
+err_free:
+ kfree_skb(skb);
+ return err;
}
static struct sock *__iucv_get_sock_by_name(char *nm)
@@ -1167,7 +1179,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
err = afiucv_hs_send(&txmsg, sk, skb, 0);
if (err) {
atomic_dec(&iucv->msg_sent);
- goto fail;
+ goto out;
}
} else { /* Classic VM IUCV transport */
skb_queue_tail(&iucv->send_skb_q, skb);
@@ -2155,8 +2167,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct sock *sk;
struct iucv_sock *iucv;
struct af_iucv_trans_hdr *trans_hdr;
+ int err = NET_RX_SUCCESS;
char nullstring[8];
- int err = 0;
if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
@@ -2254,7 +2266,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
err = afiucv_hs_callback_rx(sk, skb);
break;
default:
- ;
+ kfree_skb(skb);
}
return err;
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 8f7ef167c45a..eb502c6290c2 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1874,7 +1874,7 @@ static void iucv_pm_complete(struct device *dev)
* Returns 0 if there are still iucv pathes defined
* 1 if there are no iucv pathes defined
*/
-int iucv_path_table_empty(void)
+static int iucv_path_table_empty(void)
{
int i;
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c0ac522b48a1..4ff89cb7c86f 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -734,6 +734,7 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
llc_sk(sk)->sap = sap;
spin_lock_bh(&sap->sk_lock);
+ sock_set_flag(sk, SOCK_RCU_FREE);
sap->sk_count++;
sk_nulls_add_node_rcu(sk, laddr_hb);
hlist_add_head(&llc->dev_hash_node, dev_hb);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index d25da0e66da1..5d22eda8a6b1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -427,7 +427,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
/* Keys without a station are used for TX only */
- if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+ if (sta && test_sta_flag(sta, WLAN_STA_MFP))
key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
break;
case NL80211_IFTYPE_ADHOC:
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 5e6cf2cee965..5836ddeac9e3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1756,7 +1756,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
if (local->ops->wake_tx_queue &&
type != NL80211_IFTYPE_AP_VLAN &&
- type != NL80211_IFTYPE_MONITOR)
+ (type != NL80211_IFTYPE_MONITOR ||
+ (params->flags & MONITOR_FLAG_ACTIVE)))
txq_size += sizeof(struct txq_info) +
local->hw.txq_data_size;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index ee56f18cad3f..21526630bf65 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -217,7 +217,8 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
void ieee80211s_init(void);
void ieee80211s_update_metric(struct ieee80211_local *local,
- struct sta_info *sta, struct sk_buff *skb);
+ struct sta_info *sta,
+ struct ieee80211_tx_status *st);
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
void ieee80211_mesh_teardown_sdata(struct ieee80211_sub_if_data *sdata);
int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index daf9db3c8f24..6950cd0bf594 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -295,15 +295,12 @@ int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
}
void ieee80211s_update_metric(struct ieee80211_local *local,
- struct sta_info *sta, struct sk_buff *skb)
+ struct sta_info *sta,
+ struct ieee80211_tx_status *st)
{
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_tx_info *txinfo = st->info;
int failed;
- if (!ieee80211_is_data(hdr->frame_control))
- return;
-
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
/* moving average, scaled to 100.
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 9a6d7208bf4f..91d7c0cd1882 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -479,11 +479,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
if (!skb)
return;
- if (dropped) {
- dev_kfree_skb_any(skb);
- return;
- }
-
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
struct ieee80211_sub_if_data *sdata;
@@ -507,6 +502,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
rcu_read_unlock();
dev_kfree_skb_any(skb);
+ } else if (dropped) {
+ dev_kfree_skb_any(skb);
} else {
/* consumes skb */
skb_complete_wifi_ack(skb, acked);
@@ -811,7 +808,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
rate_control_tx_status(local, sband, status);
if (ieee80211_vif_is_mesh(&sta->sdata->vif))
- ieee80211s_update_metric(local, sta, skb);
+ ieee80211s_update_metric(local, sta, status);
if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
ieee80211_frame_acked(sta, skb);
@@ -972,6 +969,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
}
rate_control_tx_status(local, sband, status);
+ if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+ ieee80211s_update_metric(local, sta, status);
}
if (acked || noack_success) {
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index 5cd5e6e5834e..6c647f425e05 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -16,6 +16,7 @@
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
+#include "wme.h"
/* give usermode some time for retries in setting up the TDLS session */
#define TDLS_PEER_SETUP_TIMEOUT (15 * HZ)
@@ -1010,14 +1011,13 @@ ieee80211_tdls_prep_mgmt_packet(struct wiphy *wiphy, struct net_device *dev,
switch (action_code) {
case WLAN_TDLS_SETUP_REQUEST:
case WLAN_TDLS_SETUP_RESPONSE:
- skb_set_queue_mapping(skb, IEEE80211_AC_BK);
- skb->priority = 2;
+ skb->priority = 256 + 2;
break;
default:
- skb_set_queue_mapping(skb, IEEE80211_AC_VI);
- skb->priority = 5;
+ skb->priority = 256 + 5;
break;
}
+ skb_set_queue_mapping(skb, ieee80211_select_queue(sdata, skb));
/*
* Set the WLAN_TDLS_TEARDOWN flag to indicate a teardown in progress.
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index f353d9db54bc..25ba24bef8f5 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -214,6 +214,7 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
{
struct ieee80211_local *local = tx->local;
struct ieee80211_if_managed *ifmgd;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
/* driver doesn't support power save */
if (!ieee80211_hw_check(&local->hw, SUPPORTS_PS))
@@ -242,6 +243,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
return TX_CONTINUE;
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_OFFCHAN_TX_OK))
+ return TX_CONTINUE;
+
ifmgd = &tx->sdata->u.mgd;
/*
@@ -1890,7 +1894,7 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
if (invoke_tx_handlers_early(&tx))
- return false;
+ return true;
if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
return true;
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 7a4de6d618b1..8fbe6cdbe255 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1533,10 +1533,14 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
unsigned int flags;
if (event == NETDEV_REGISTER) {
- /* For now just support Ethernet, IPGRE, SIT and IPIP devices */
+
+ /* For now just support Ethernet, IPGRE, IP6GRE, SIT and
+ * IPIP devices
+ */
if (dev->type == ARPHRD_ETHER ||
dev->type == ARPHRD_LOOPBACK ||
dev->type == ARPHRD_IPGRE ||
+ dev->type == ARPHRD_IP6GRE ||
dev->type == ARPHRD_SIT ||
dev->type == ARPHRD_TUNNEL) {
mdev = mpls_add_dev(dev);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 71709c104081..f61c306de1d0 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -771,13 +771,13 @@ config NETFILTER_XT_TARGET_CHECKSUM
depends on NETFILTER_ADVANCED
---help---
This option adds a `CHECKSUM' target, which can be used in the iptables mangle
- table.
+ table to work around buggy DHCP clients in virtualized environments.
- You can use this target to compute and fill in the checksum in
- a packet that lacks a checksum. This is particularly useful,
- if you need to work around old applications such as dhcp clients,
- that do not work well with checksum offloads, but don't want to disable
- checksum offload in your device.
+ Some old DHCP clients drop packets because they are not aware
+ that the checksum would normally be offloaded to hardware and
+ thus should be considered valid.
+ This target can be used to fill in the checksum using iptables
+ when such packets are sent via a virtual network device.
To compile it as a module, choose M here. If unsure, say N.
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 9f14b0df6960..51c5d7eec0a3 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -776,9 +776,26 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
};
#endif
+static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto)
+{
+ u8 nfproto = (unsigned long)_nfproto;
+
+ if (nf_ct_l3num(ct) != nfproto)
+ return 0;
+
+ if (nf_ct_protonum(ct) == IPPROTO_TCP &&
+ ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) {
+ ct->proto.tcp.seen[0].td_maxwin = 0;
+ ct->proto.tcp.seen[1].td_maxwin = 0;
+ }
+
+ return 0;
+}
+
static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
{
struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
+ bool fixup_needed = false;
int err = 0;
mutex_lock(&nf_ct_proto_mutex);
@@ -798,6 +815,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
ARRAY_SIZE(ipv4_conntrack_ops));
if (err)
cnet->users4 = 0;
+ else
+ fixup_needed = true;
break;
#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6:
@@ -814,6 +833,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
ARRAY_SIZE(ipv6_conntrack_ops));
if (err)
cnet->users6 = 0;
+ else
+ fixup_needed = true;
break;
#endif
default:
@@ -822,6 +843,11 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
}
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
+
+ if (fixup_needed)
+ nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup,
+ (void *)(unsigned long)nfproto, 0, 0);
+
return err;
}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 8c58f96b59e7..f3f91ed2c21a 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -675,7 +675,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
}
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -697,6 +697,8 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
+
+ timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST];
return 0;
}
@@ -726,7 +728,7 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
[CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
/* template, data assigned later */
@@ -827,6 +829,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto)
dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
+
+ /* timeouts[0] is unused, make it same as SYN_SENT so
+ * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+ */
+ dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
}
return dccp_kmemdup_sysctl_table(net, pn, dn);
@@ -856,7 +863,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
@@ -864,7 +871,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = dccp_init_net,
.get_net_proto = dccp_get_net_proto,
};
@@ -889,7 +896,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
@@ -897,7 +904,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = dccp_init_net,
.get_net_proto = dccp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index ac4a0b296dcd..1df3244ecd07 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -70,7 +70,7 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
return ret;
}
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -113,7 +113,7 @@ static const struct nla_policy
generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
[CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table generic_sysctl_table[] = {
@@ -164,7 +164,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
.pkt_to_tuple = generic_pkt_to_tuple,
.packet = generic_packet,
.new = generic_new,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = generic_timeout_nlattr_to_obj,
.obj_to_nlattr = generic_timeout_obj_to_nlattr,
@@ -172,7 +172,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
.obj_size = sizeof(unsigned int),
.nla_policy = generic_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = generic_init_net,
.get_net_proto = generic_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index d1632252bf5b..650eb4fba2c5 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -285,7 +285,7 @@ static void gre_destroy(struct nf_conn *ct)
nf_ct_gre_keymap_destroy(master);
}
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -334,7 +334,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
[CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
static int gre_init_net(struct net *net, u_int16_t proto)
{
@@ -367,7 +367,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = gre_timeout_nlattr_to_obj,
.obj_to_nlattr = gre_timeout_obj_to_nlattr,
@@ -375,7 +375,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
.obj_size = sizeof(unsigned int) * GRE_CT_MAX,
.nla_policy = gre_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.net_id = &proto_gre_net_id,
.init_net = gre_init_net,
};
diff --git a/net/netfilter/nf_conntrack_proto_icmp.c b/net/netfilter/nf_conntrack_proto_icmp.c
index 036670b38282..43c7e1a217b9 100644
--- a/net/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/netfilter/nf_conntrack_proto_icmp.c
@@ -273,7 +273,7 @@ static unsigned int icmp_nlattr_tuple_size(void)
}
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -313,7 +313,7 @@ static const struct nla_policy
icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
[CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table icmp_sysctl_table[] = {
@@ -374,7 +374,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
.nlattr_to_tuple = icmp_nlattr_to_tuple,
.nla_policy = icmp_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = icmp_timeout_nlattr_to_obj,
.obj_to_nlattr = icmp_timeout_obj_to_nlattr,
@@ -382,7 +382,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
.obj_size = sizeof(unsigned int),
.nla_policy = icmp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = icmp_init_net,
.get_net_proto = icmp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index bed07b998a10..97e40f77d678 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -274,7 +274,7 @@ static unsigned int icmpv6_nlattr_tuple_size(void)
}
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -314,7 +314,7 @@ static const struct nla_policy
icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
[CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table icmpv6_sysctl_table[] = {
@@ -373,7 +373,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
.nlattr_to_tuple = icmpv6_nlattr_to_tuple,
.nla_policy = icmpv6_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = icmpv6_timeout_nlattr_to_obj,
.obj_to_nlattr = icmpv6_timeout_obj_to_nlattr,
@@ -381,7 +381,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
.obj_size = sizeof(unsigned int),
.nla_policy = icmpv6_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = icmpv6_init_net,
.get_net_proto = icmpv6_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 8d1e085fc14a..e4d738d34cd0 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -591,7 +591,7 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
}
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -613,6 +613,8 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
+
+ timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED];
return 0;
}
@@ -644,7 +646,7 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
[CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
@@ -743,6 +745,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto)
for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
sn->timeouts[i] = sctp_timeouts[i];
+
+ /* timeouts[0] is unused, init it so ->timeouts[0] contains
+ * 'new' timeout, like udp or icmp.
+ */
+ sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
}
return sctp_kmemdup_sysctl_table(pn, sn);
@@ -773,7 +780,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = sctp_timeout_nlattr_to_obj,
.obj_to_nlattr = sctp_timeout_obj_to_nlattr,
@@ -781,7 +788,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
.obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
.nla_policy = sctp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = sctp_init_net,
.get_net_proto = sctp_get_net_proto,
};
@@ -806,7 +813,8 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#endif
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = sctp_timeout_nlattr_to_obj,
.obj_to_nlattr = sctp_timeout_obj_to_nlattr,
@@ -814,8 +822,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
.obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
.nla_policy = sctp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#endif
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = sctp_init_net,
.get_net_proto = sctp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index d80d322b9d8b..247b89784a6f 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1213,8 +1213,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
#define TCP_NLATTR_SIZE ( \
NLA_ALIGN(NLA_HDRLEN + 1) + \
NLA_ALIGN(NLA_HDRLEN + 1) + \
- NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
- NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
+ NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
+ NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
{
@@ -1279,7 +1279,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
}
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -1301,6 +1301,7 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
timeouts[TCP_CONNTRACK_SYN_SENT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
}
+
if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
timeouts[TCP_CONNTRACK_SYN_RECV] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
@@ -1341,6 +1342,8 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
timeouts[TCP_CONNTRACK_UNACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
}
+
+ timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
return 0;
}
@@ -1391,7 +1394,7 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
[CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table tcp_sysctl_table[] = {
@@ -1518,6 +1521,10 @@ static int tcp_init_net(struct net *net, u_int16_t proto)
for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
tn->timeouts[i] = tcp_timeouts[i];
+ /* timeouts[0] is unused, make it same as SYN_SENT so
+ * ->timeouts[0] contains 'new' timeout, like udp or icmp.
+ */
+ tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
tn->tcp_loose = nf_ct_tcp_loose;
tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
@@ -1551,7 +1558,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
.nlattr_size = TCP_NLATTR_SIZE,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
@@ -1560,7 +1567,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = tcp_init_net,
.get_net_proto = tcp_get_net_proto,
};
@@ -1586,7 +1593,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
@@ -1595,7 +1602,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = tcp_init_net,
.get_net_proto = tcp_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 7a1b8988a931..3065fb8ef91b 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -171,7 +171,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
return NF_ACCEPT;
}
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@@ -221,7 +221,7 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
[CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 },
};
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table udp_sysctl_table[] = {
@@ -292,7 +292,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
@@ -300,7 +300,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
@@ -321,7 +321,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
@@ -329,7 +329,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
@@ -350,7 +350,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
@@ -358,7 +358,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
@@ -379,7 +379,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
@@ -387,10 +387,9 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
-#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
#endif
-#include <net/netfilter/nf_conntrack_timeout.h>
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 1dca5683f59f..2cfb173cd0b2 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4637,6 +4637,7 @@ static int nft_flush_set(const struct nft_ctx *ctx,
}
set->ndeact++;
+ nft_set_elem_deactivate(ctx->net, set, elem);
nft_trans_elem_set(trans) = set;
nft_trans_elem(trans) = *elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index d46a236cdf31..a30f8ba4b89a 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -489,8 +489,8 @@ err:
return err;
}
-static struct ctnl_timeout *
-ctnl_timeout_find_get(struct net *net, const char *name)
+static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net,
+ const char *name)
{
struct ctnl_timeout *timeout, *matching = NULL;
@@ -509,7 +509,7 @@ ctnl_timeout_find_get(struct net *net, const char *name)
break;
}
err:
- return matching;
+ return matching ? &matching->timeout : NULL;
}
static void ctnl_timeout_put(struct nf_ct_timeout *t)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index ea4ba551abb2..d33094f4ec41 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -233,6 +233,7 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
int err;
if (verdict == NF_ACCEPT ||
+ verdict == NF_REPEAT ||
verdict == NF_STOP) {
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 26a8baebd072..5dd87748afa8 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -799,7 +799,7 @@ err:
}
struct nft_ct_timeout_obj {
- struct nf_conn *tmpl;
+ struct nf_ct_timeout *timeout;
u8 l4proto;
};
@@ -809,26 +809,42 @@ static void nft_ct_timeout_obj_eval(struct nft_object *obj,
{
const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
- struct sk_buff *skb = pkt->skb;
+ struct nf_conn_timeout *timeout;
+ const unsigned int *values;
+
+ if (priv->l4proto != pkt->tprot)
+ return;
- if (ct ||
- priv->l4proto != pkt->tprot)
+ if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct))
return;
- nf_ct_set(skb, priv->tmpl, IP_CT_NEW);
+ timeout = nf_ct_timeout_find(ct);
+ if (!timeout) {
+ timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC);
+ if (!timeout) {
+ regs->verdict.code = NF_DROP;
+ return;
+ }
+ }
+
+ rcu_assign_pointer(timeout->timeout, priv->timeout);
+
+ /* adjust the timeout as per 'new' state. ct is unconfirmed,
+ * so the current timestamp must not be added.
+ */
+ values = nf_ct_timeout_data(timeout);
+ if (values)
+ nf_ct_refresh(ct, pkt->skb, values[0]);
}
static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
- const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
const struct nf_conntrack_l4proto *l4proto;
- struct nf_conn_timeout *timeout_ext;
struct nf_ct_timeout *timeout;
int l3num = ctx->family;
- struct nf_conn *tmpl;
__u8 l4num;
int ret;
@@ -863,28 +879,14 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
timeout->l3num = l3num;
timeout->l4proto = l4proto;
- tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC);
- if (!tmpl) {
- ret = -ENOMEM;
- goto err_free_timeout;
- }
-
- timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC);
- if (!timeout_ext) {
- ret = -ENOMEM;
- goto err_free_tmpl;
- }
ret = nf_ct_netns_get(ctx->net, ctx->family);
if (ret < 0)
- goto err_free_tmpl;
-
- priv->tmpl = tmpl;
+ goto err_free_timeout;
+ priv->timeout = timeout;
return 0;
-err_free_tmpl:
- nf_ct_tmpl_free(tmpl);
err_free_timeout:
kfree(timeout);
err_proto_put:
@@ -896,22 +898,19 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
- struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
- struct nf_ct_timeout *timeout;
+ struct nf_ct_timeout *timeout = priv->timeout;
- timeout = rcu_dereference_raw(t->timeout);
nf_ct_untimeout(ctx->net, timeout);
nf_ct_l4proto_put(timeout->l4proto);
nf_ct_netns_put(ctx->net, ctx->family);
- nf_ct_tmpl_free(priv->tmpl);
+ kfree(priv->timeout);
}
static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
- const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
- const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout);
+ const struct nf_ct_timeout *timeout = priv->timeout;
struct nlattr *nest_params;
int ret;
diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
index 5af74b37f423..a35fb59ace73 100644
--- a/net/netfilter/nft_osf.c
+++ b/net/netfilter/nft_osf.c
@@ -49,7 +49,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
err = nft_validate_register_store(ctx, priv->dreg, NULL,
- NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN);
+ NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 55e2d9215c0d..0e5ec126f6ad 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -355,12 +355,11 @@ cont:
static void nft_rbtree_gc(struct work_struct *work)
{
+ struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
struct nft_set_gc_batch *gcb = NULL;
- struct rb_node *node, *prev = NULL;
- struct nft_rbtree_elem *rbe;
struct nft_rbtree *priv;
+ struct rb_node *node;
struct nft_set *set;
- int i;
priv = container_of(work, struct nft_rbtree, gc_work.work);
set = nft_set_container_of(priv);
@@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
rbe = rb_entry(node, struct nft_rbtree_elem, node);
if (nft_rbtree_interval_end(rbe)) {
- prev = node;
+ rbe_end = rbe;
continue;
}
if (!nft_set_elem_expired(&rbe->ext))
@@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
if (nft_set_elem_mark_busy(&rbe->ext))
continue;
+ if (rbe_prev) {
+ rb_erase(&rbe_prev->node, &priv->root);
+ rbe_prev = NULL;
+ }
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
if (!gcb)
break;
atomic_dec(&set->nelems);
nft_set_gc_batch_add(gcb, rbe);
+ rbe_prev = rbe;
- if (prev) {
- rbe = rb_entry(prev, struct nft_rbtree_elem, node);
+ if (rbe_end) {
atomic_dec(&set->nelems);
- nft_set_gc_batch_add(gcb, rbe);
- prev = NULL;
+ nft_set_gc_batch_add(gcb, rbe_end);
+ rb_erase(&rbe_end->node, &priv->root);
+ rbe_end = NULL;
}
node = rb_next(node);
if (!node)
break;
}
- if (gcb) {
- for (i = 0; i < gcb->head.cnt; i++) {
- rbe = gcb->elems[i];
- rb_erase(&rbe->node, &priv->root);
- }
- }
+ if (rbe_prev)
+ rb_erase(&rbe_prev->node, &priv->root);
write_seqcount_end(&priv->count);
write_unlock_bh(&priv->lock);
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c
index 9f4151ec3e06..6c7aa6a0a0d2 100644
--- a/net/netfilter/xt_CHECKSUM.c
+++ b/net/netfilter/xt_CHECKSUM.c
@@ -16,6 +16,9 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CHECKSUM.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
MODULE_DESCRIPTION("Xtables: checksum modification");
@@ -25,7 +28,7 @@ MODULE_ALIAS("ip6t_CHECKSUM");
static unsigned int
checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
- if (skb->ip_summed == CHECKSUM_PARTIAL)
+ if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb))
skb_checksum_help(skb);
return XT_CONTINUE;
@@ -34,6 +37,8 @@ checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
static int checksum_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_CHECKSUM_info *einfo = par->targinfo;
+ const struct ip6t_ip6 *i6 = par->entryinfo;
+ const struct ipt_ip *i4 = par->entryinfo;
if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
@@ -43,6 +48,21 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
if (!einfo->operation)
return -EINVAL;
+ switch (par->family) {
+ case NFPROTO_IPV4:
+ if (i4->proto == IPPROTO_UDP &&
+ (i4->invflags & XT_INV_PROTO) == 0)
+ return 0;
+ break;
+ case NFPROTO_IPV6:
+ if ((i6->flags & IP6T_F_PROTO) &&
+ i6->proto == IPPROTO_UDP &&
+ (i6->invflags & XT_INV_PROTO) == 0)
+ return 0;
+ break;
+ }
+
+ pr_warn_once("CHECKSUM should be avoided. If really needed, restrict with \"-p udp\" and only use in OUTPUT\n");
return 0;
}
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index dfbdbb2fc0ed..51d0c257e7a5 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -125,6 +125,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_cluster_match_info *info = par->matchinfo;
+ int ret;
if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
@@ -135,7 +136,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
return -EDOM;
}
- return 0;
+
+ ret = nf_ct_netns_get(par->net, par->family);
+ if (ret < 0)
+ pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
+ par->family);
+ return ret;
+}
+
+static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
+{
+ nf_ct_netns_put(par->net, par->family);
}
static struct xt_match xt_cluster_match __read_mostly = {
@@ -144,6 +155,7 @@ static struct xt_match xt_cluster_match __read_mostly = {
.match = xt_cluster_mt,
.checkentry = xt_cluster_mt_checkentry,
.matchsize = sizeof(struct xt_cluster_match_info),
+ .destroy = xt_cluster_mt_destroy,
.me = THIS_MODULE,
};
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9b16402f29af..3e7d259e5d8d 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -1057,7 +1057,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
static void *dl_seq_start(struct seq_file *s, loff_t *pos)
__acquires(htable->lock)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket;
spin_lock_bh(&htable->lock);
@@ -1074,7 +1074,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
*pos = ++(*bucket);
@@ -1088,7 +1088,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void dl_seq_stop(struct seq_file *s, void *v)
__releases(htable->lock)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
if (!IS_ERR(bucket))
@@ -1130,7 +1130,7 @@ static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family,
static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
- struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
@@ -1145,7 +1145,7 @@ static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
- struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
@@ -1160,7 +1160,7 @@ static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
- struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
@@ -1174,7 +1174,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
static int dl_seq_show_v2(struct seq_file *s, void *v)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = (unsigned int *)v;
struct dsthash_ent *ent;
@@ -1188,7 +1188,7 @@ static int dl_seq_show_v2(struct seq_file *s, void *v)
static int dl_seq_show_v1(struct seq_file *s, void *v)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
struct dsthash_ent *ent;
@@ -1202,7 +1202,7 @@ static int dl_seq_show_v1(struct seq_file *s, void *v)
static int dl_seq_show(struct seq_file *s, void *v)
{
- struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
+ struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
struct dsthash_ent *ent;
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 0472f3472842..ada144e5645b 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -56,7 +56,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk;
- if (!net_eq(xt_net(par), sock_net(sk)))
+ if (sk && !net_eq(xt_net(par), sock_net(sk)))
sk = NULL;
if (!sk)
@@ -117,7 +117,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
struct sk_buff *pskb = (struct sk_buff *)skb;
struct sock *sk = skb->sk;
- if (!net_eq(xt_net(par), sock_net(sk)))
+ if (sk && !net_eq(xt_net(par), sock_net(sk)))
sk = NULL;
if (!sk)
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index c070dfc0190a..c92894c3e40a 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -781,7 +781,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
{
u32 addr_len;
- if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
+ if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
+ info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
if (addr_len != sizeof(struct in_addr) &&
addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index ac8030c4bcf8..19cb2e473ea6 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
create_info = (struct hci_create_pipe_resp *)skb->data;
+ if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
/* Save the new created pipe and bind with local gate,
* the description for skb->data[3] is destination gate id
* but since we received this cmd from host controller, we
@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
delete_info = (struct hci_delete_pipe_noti *)skb->data;
+ if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
break;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 86a75105af1a..35ae64cbef33 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1312,6 +1312,10 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
rcu_assign_pointer(help->helper, helper);
info->helper = helper;
+
+ if (info->nat)
+ request_module("ip_nat_%s", name);
+
return 0;
}
@@ -1624,10 +1628,6 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
OVS_NLERR(log, "Failed to allocate conntrack template");
return -ENOMEM;
}
-
- __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
- nf_conntrack_get(&ct_info.ct->ct_general);
-
if (helper) {
err = ovs_ct_add_helper(&ct_info, helper, key, log);
if (err)
@@ -1639,6 +1639,8 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
if (err)
goto err_free_ct;
+ __set_bit(IPS_CONFIRMED_BIT, &ct_info.ct->status);
+ nf_conntrack_get(&ct_info.ct->ct_general);
return 0;
err_free_ct:
__ovs_ct_free_action(&ct_info);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 75c92a87e7b2..d6e94dc7e290 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2715,10 +2715,12 @@ tpacket_error:
}
}
- if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
- vio_le())) {
- tp_len = -EINVAL;
- goto tpacket_error;
+ if (po->has_vnet_hdr) {
+ if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
+ tp_len = -EINVAL;
+ goto tpacket_error;
+ }
+ virtio_net_hdr_set_proto(skb, vnet_hdr);
}
skb->destructor = tpacket_destruct_skb;
@@ -2915,6 +2917,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (err)
goto out_free;
len += sizeof(vnet_hdr);
+ virtio_net_hdr_set_proto(skb, &vnet_hdr);
}
skb_probe_transport_header(skb, reserve);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 3ab55784b637..762d2c6788a3 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -76,11 +76,13 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
struct rds_sock *rs;
__rds_create_bind_key(key, addr, port, scope_id);
- rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms);
+ rcu_read_lock();
+ rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
rds_sock_addref(rs);
else
rs = NULL;
+ rcu_read_unlock();
rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
ntohs(port));
@@ -235,6 +237,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
+ sock_set_flag(sk, SOCK_RCU_FREE);
ret = rds_add_bound(rs, binding_addr, &port, scope_id);
if (ret)
goto out;
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 73427ff439f9..71ff356ee702 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -443,7 +443,7 @@ int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
/* ib_stats.c */
-DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
#define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
#define rds_ib_stats_add(member, count) \
rds_stats_add_which(rds_ib_stats, member, count)
diff --git a/net/rds/send.c b/net/rds/send.c
index 57b3d5a8b2db..fe785ee819dd 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -1007,7 +1007,8 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
return ret;
}
-static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
+static int rds_send_mprds_hash(struct rds_sock *rs,
+ struct rds_connection *conn, int nonblock)
{
int hash;
@@ -1023,10 +1024,16 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
* used. But if we are interrupted, we have to use the zero
* c_path in case the connection ends up being non-MP capable.
*/
- if (conn->c_npaths == 0)
+ if (conn->c_npaths == 0) {
+ /* Cannot wait for the connection be made, so just use
+ * the base c_path.
+ */
+ if (nonblock)
+ return 0;
if (wait_event_interruptible(conn->c_hs_waitq,
conn->c_npaths != 0))
hash = 0;
+ }
if (conn->c_npaths == 1)
hash = 0;
}
@@ -1256,7 +1263,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
}
if (conn->c_trans->t_mp_capable)
- cpath = &conn->c_path[rds_send_mprds_hash(rs, conn)];
+ cpath = &conn->c_path[rds_send_mprds_hash(rs, conn, nonblock)];
else
cpath = &conn->c_path[0];
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index c97558710421..a6e6cae82c30 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -40,17 +40,12 @@ struct rxrpc_crypt {
struct rxrpc_connection;
/*
- * Mark applied to socket buffers.
+ * Mark applied to socket buffers in skb->mark. skb->priority is used
+ * to pass supplementary information.
*/
enum rxrpc_skb_mark {
- RXRPC_SKB_MARK_DATA, /* data message */
- RXRPC_SKB_MARK_FINAL_ACK, /* final ACK received message */
- RXRPC_SKB_MARK_BUSY, /* server busy message */
- RXRPC_SKB_MARK_REMOTE_ABORT, /* remote abort message */
- RXRPC_SKB_MARK_LOCAL_ABORT, /* local abort message */
- RXRPC_SKB_MARK_NET_ERROR, /* network error message */
- RXRPC_SKB_MARK_LOCAL_ERROR, /* local error message */
- RXRPC_SKB_MARK_NEW_CALL, /* local error message */
+ RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */
+ RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */
};
/*
@@ -293,7 +288,6 @@ struct rxrpc_peer {
struct hlist_node hash_link;
struct rxrpc_local *local;
struct hlist_head error_targets; /* targets for net error distribution */
- struct work_struct error_distributor;
struct rb_root service_conns; /* Service connections */
struct list_head keepalive_link; /* Link in net->peer_keepalive[] */
time64_t last_tx_at; /* Last time packet sent here */
@@ -304,12 +298,11 @@ struct rxrpc_peer {
unsigned int maxdata; /* data size (MTU - hdrsize) */
unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
int debug_id; /* debug ID for printks */
- int error_report; /* Net (+0) or local (+1000000) to distribute */
-#define RXRPC_LOCAL_ERROR_OFFSET 1000000
struct sockaddr_rxrpc srx; /* remote address */
/* calculated RTT cache */
#define RXRPC_RTT_CACHE_SIZE 32
+ spinlock_t rtt_input_lock; /* RTT lock for input routine */
ktime_t rtt_last_req; /* Time of last RTT request */
u64 rtt; /* Current RTT estimate (in nS) */
u64 rtt_sum; /* Sum of cache contents */
@@ -450,19 +443,29 @@ struct rxrpc_connection {
spinlock_t state_lock; /* state-change lock */
enum rxrpc_conn_cache_state cache_state;
enum rxrpc_conn_proto_state state; /* current state of connection */
- u32 local_abort; /* local abort code */
- u32 remote_abort; /* remote abort code */
+ u32 abort_code; /* Abort code of connection abort */
int debug_id; /* debug ID for printks */
atomic_t serial; /* packet serial number counter */
unsigned int hi_serial; /* highest serial number received */
u32 security_nonce; /* response re-use preventer */
- u16 service_id; /* Service ID, possibly upgraded */
+ u32 service_id; /* Service ID, possibly upgraded */
u8 size_align; /* data size alignment (for security) */
u8 security_size; /* security header size */
u8 security_ix; /* security type */
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
+ short error; /* Local error code */
};
+static inline bool rxrpc_to_server(const struct rxrpc_skb_priv *sp)
+{
+ return sp->hdr.flags & RXRPC_CLIENT_INITIATED;
+}
+
+static inline bool rxrpc_to_client(const struct rxrpc_skb_priv *sp)
+{
+ return !rxrpc_to_server(sp);
+}
+
/*
* Flags in call->flags.
*/
@@ -633,6 +636,8 @@ struct rxrpc_call {
bool tx_phase; /* T if transmission phase, F if receive phase */
u8 nr_jumbo_bad; /* Number of jumbo dups/exceeds-windows */
+ spinlock_t input_lock; /* Lock for packet input to this call */
+
/* receive-phase ACK management */
u8 ackr_reason; /* reason to ACK */
u16 ackr_skew; /* skew on packet being ACK'd */
@@ -717,7 +722,7 @@ extern struct workqueue_struct *rxrpc_workqueue;
int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
void rxrpc_discard_prealloc(struct rxrpc_sock *);
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *,
- struct rxrpc_connection *,
+ struct rxrpc_sock *,
struct sk_buff *);
void rxrpc_accept_incoming_calls(struct rxrpc_local *);
struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long,
@@ -887,8 +892,9 @@ extern unsigned long rxrpc_conn_idle_client_fast_expiry;
extern struct idr rxrpc_client_conn_ids;
void rxrpc_destroy_client_conn_ids(void);
-int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *,
- struct sockaddr_rxrpc *, gfp_t);
+int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
+ struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
+ gfp_t);
void rxrpc_expose_client_call(struct rxrpc_call *);
void rxrpc_disconnect_client_call(struct rxrpc_call *);
void rxrpc_put_client_conn(struct rxrpc_connection *);
@@ -908,7 +914,8 @@ extern unsigned int rxrpc_closed_conn_expiry;
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
- struct sk_buff *);
+ struct sk_buff *,
+ struct rxrpc_peer **);
void __rxrpc_disconnect_call(struct rxrpc_connection *, struct rxrpc_call *);
void rxrpc_disconnect_call(struct rxrpc_call *);
void rxrpc_kill_connection(struct rxrpc_connection *);
@@ -960,7 +967,7 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
/*
* input.c
*/
-void rxrpc_data_ready(struct sock *);
+int rxrpc_input_packet(struct sock *, struct sk_buff *);
/*
* insecure.c
@@ -1031,7 +1038,6 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
* peer_event.c
*/
void rxrpc_error_report(struct sock *);
-void rxrpc_peer_error_distributor(struct work_struct *);
void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
void rxrpc_peer_keepalive_worker(struct work_struct *);
@@ -1041,16 +1047,15 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
*/
struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
const struct sockaddr_rxrpc *);
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
struct sockaddr_rxrpc *, gfp_t);
struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t);
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *,
- struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
+ struct rxrpc_peer *);
void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
void rxrpc_put_peer(struct rxrpc_peer *);
-void __rxrpc_queue_peer_error(struct rxrpc_peer *);
/*
* proc.c
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 9d1e298b784c..8079aacaecac 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -249,11 +249,11 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
*/
static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
struct rxrpc_local *local,
+ struct rxrpc_peer *peer,
struct rxrpc_connection *conn,
struct sk_buff *skb)
{
struct rxrpc_backlog *b = rx->backlog;
- struct rxrpc_peer *peer, *xpeer;
struct rxrpc_call *call;
unsigned short call_head, conn_head, peer_head;
unsigned short call_tail, conn_tail, peer_tail;
@@ -276,21 +276,18 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
return NULL;
if (!conn) {
- /* No connection. We're going to need a peer to start off
- * with. If one doesn't yet exist, use a spare from the
- * preallocation set. We dump the address into the spare in
- * anticipation - and to save on stack space.
- */
- xpeer = b->peer_backlog[peer_tail];
- if (rxrpc_extract_addr_from_skb(local, &xpeer->srx, skb) < 0)
- return NULL;
-
- peer = rxrpc_lookup_incoming_peer(local, xpeer);
- if (peer == xpeer) {
+ if (peer && !rxrpc_get_peer_maybe(peer))
+ peer = NULL;
+ if (!peer) {
+ peer = b->peer_backlog[peer_tail];
+ if (rxrpc_extract_addr_from_skb(local, &peer->srx, skb) < 0)
+ return NULL;
b->peer_backlog[peer_tail] = NULL;
smp_store_release(&b->peer_backlog_tail,
(peer_tail + 1) &
(RXRPC_BACKLOG_MAX - 1));
+
+ rxrpc_new_incoming_peer(rx, local, peer);
}
/* Now allocate and set up the connection */
@@ -335,45 +332,38 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
* The call is returned with the user access mutex held.
*/
struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
- struct rxrpc_connection *conn,
+ struct rxrpc_sock *rx,
struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
- struct rxrpc_sock *rx;
+ struct rxrpc_connection *conn;
+ struct rxrpc_peer *peer = NULL;
struct rxrpc_call *call;
- u16 service_id = sp->hdr.serviceId;
_enter("");
- /* Get the socket providing the service */
- rx = rcu_dereference(local->service);
- if (rx && (service_id == rx->srx.srx_service ||
- service_id == rx->second_service))
- goto found_service;
-
- trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
- RX_INVALID_OPERATION, EOPNOTSUPP);
- skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
- skb->priority = RX_INVALID_OPERATION;
- _leave(" = NULL [service]");
- return NULL;
-
-found_service:
spin_lock(&rx->incoming_lock);
if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
rx->sk.sk_state == RXRPC_CLOSE) {
trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
- skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
skb->priority = RX_INVALID_OPERATION;
_leave(" = NULL [close]");
call = NULL;
goto out;
}
- call = rxrpc_alloc_incoming_call(rx, local, conn, skb);
+ /* The peer, connection and call may all have sprung into existence due
+ * to a duplicate packet being handled on another CPU in parallel, so
+ * we have to recheck the routing. However, we're now holding
+ * rx->incoming_lock, so the values should remain stable.
+ */
+ conn = rxrpc_find_connection_rcu(local, skb, &peer);
+
+ call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
if (!call) {
- skb->mark = RXRPC_SKB_MARK_BUSY;
+ skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
_leave(" = NULL [busy]");
call = NULL;
goto out;
@@ -413,20 +403,22 @@ found_service:
case RXRPC_CONN_SERVICE:
write_lock(&call->state_lock);
- if (rx->discard_new_call)
- call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
- else
- call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ if (rx->discard_new_call)
+ call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
+ else
+ call->state = RXRPC_CALL_SERVER_ACCEPTING;
+ }
write_unlock(&call->state_lock);
break;
case RXRPC_CONN_REMOTELY_ABORTED:
rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
- conn->remote_abort, -ECONNABORTED);
+ conn->abort_code, conn->error);
break;
case RXRPC_CONN_LOCALLY_ABORTED:
rxrpc_abort_call("CON", call, sp->hdr.seq,
- conn->local_abort, -ECONNABORTED);
+ conn->abort_code, conn->error);
break;
default:
BUG();
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 9486293fef5c..8f1a8f85b1f9 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -138,6 +138,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
init_waitqueue_head(&call->waitq);
spin_lock_init(&call->lock);
spin_lock_init(&call->notify_lock);
+ spin_lock_init(&call->input_lock);
rwlock_init(&call->state_lock);
atomic_set(&call->usage, 1);
call->debug_id = debug_id;
@@ -287,7 +288,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
/* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID.
*/
- ret = rxrpc_connect_call(call, cp, srx, gfp);
+ ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0)
goto error;
@@ -339,7 +340,7 @@ int rxrpc_retry_client_call(struct rxrpc_sock *rx,
/* Set up or get a connection record and set the protocol parameters,
* including channel number and call ID.
*/
- ret = rxrpc_connect_call(call, cp, srx, gfp);
+ ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
if (ret < 0)
goto error;
@@ -400,7 +401,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
rcu_assign_pointer(conn->channels[chan].call, call);
spin_lock(&conn->params.peer->lock);
- hlist_add_head(&call->error_link, &conn->params.peer->error_targets);
+ hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
spin_unlock(&conn->params.peer->lock);
_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index f8f37188a932..521189f4b666 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -276,7 +276,8 @@ dont_reuse:
* If we return with a connection, the call will be on its waiting list. It's
* left to the caller to assign a channel and wake up the call.
*/
-static int rxrpc_get_client_conn(struct rxrpc_call *call,
+static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
gfp_t gfp)
@@ -289,7 +290,7 @@ static int rxrpc_get_client_conn(struct rxrpc_call *call,
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
- cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp);
+ cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
if (!cp->peer)
goto error;
@@ -683,7 +684,8 @@ out:
* find a connection for a call
* - called in process context with IRQs enabled
*/
-int rxrpc_connect_call(struct rxrpc_call *call,
+int rxrpc_connect_call(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
gfp_t gfp)
@@ -696,7 +698,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
rxrpc_cull_active_client_conns(rxnet);
- ret = rxrpc_get_client_conn(call, cp, srx, gfp);
+ ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
if (ret < 0)
goto out;
@@ -710,8 +712,8 @@ int rxrpc_connect_call(struct rxrpc_call *call,
}
spin_lock_bh(&call->conn->params.peer->lock);
- hlist_add_head(&call->error_link,
- &call->conn->params.peer->error_targets);
+ hlist_add_head_rcu(&call->error_link,
+ &call->conn->params.peer->error_targets);
spin_unlock_bh(&call->conn->params.peer->lock);
out:
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 6df56ce68861..b6fca8ebb117 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -126,7 +126,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
switch (chan->last_type) {
case RXRPC_PACKET_TYPE_ABORT:
- _proto("Tx ABORT %%%u { %d } [re]", serial, conn->local_abort);
+ _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code);
break;
case RXRPC_PACKET_TYPE_ACK:
trace_rxrpc_tx_ack(chan->call_debug_id, serial,
@@ -153,13 +153,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
* pass a connection-level abort onto all calls on that connection
*/
static void rxrpc_abort_calls(struct rxrpc_connection *conn,
- enum rxrpc_call_completion compl,
- u32 abort_code, int error)
+ enum rxrpc_call_completion compl)
{
struct rxrpc_call *call;
int i;
- _enter("{%d},%x", conn->debug_id, abort_code);
+ _enter("{%d},%x", conn->debug_id, conn->abort_code);
spin_lock(&conn->channel_lock);
@@ -172,9 +171,11 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
trace_rxrpc_abort(call->debug_id,
"CON", call->cid,
call->call_id, 0,
- abort_code, error);
+ conn->abort_code,
+ conn->error);
if (rxrpc_set_call_completion(call, compl,
- abort_code, error))
+ conn->abort_code,
+ conn->error))
rxrpc_notify_socket(call);
}
}
@@ -207,10 +208,12 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
return 0;
}
+ conn->error = error;
+ conn->abort_code = abort_code;
conn->state = RXRPC_CONN_LOCALLY_ABORTED;
spin_unlock_bh(&conn->state_lock);
- rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, abort_code, error);
+ rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
msg.msg_name = &conn->params.peer->srx.transport;
msg.msg_namelen = conn->params.peer->srx.transport_len;
@@ -229,7 +232,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
whdr._rsvd = 0;
whdr.serviceId = htons(conn->service_id);
- word = htonl(conn->local_abort);
+ word = htonl(conn->abort_code);
iov[0].iov_base = &whdr;
iov[0].iov_len = sizeof(whdr);
@@ -240,7 +243,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
serial = atomic_inc_return(&conn->serial);
whdr.serial = htonl(serial);
- _proto("Tx CONN ABORT %%%u { %d }", serial, conn->local_abort);
+ _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
if (ret < 0) {
@@ -315,9 +318,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
abort_code = ntohl(wtmp);
_proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
+ conn->error = -ECONNABORTED;
+ conn->abort_code = abort_code;
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
- rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED,
- abort_code, -ECONNABORTED);
+ rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED);
return -ECONNABORTED;
case RXRPC_PACKET_TYPE_CHALLENGE:
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 77440a356b14..885dae829f4a 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -69,10 +69,14 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
* If successful, a pointer to the connection is returned, but no ref is taken.
* NULL is returned if there is no match.
*
+ * When searching for a service call, if we find a peer but no connection, we
+ * return that through *_peer in case we need to create a new service call.
+ *
* The caller must be holding the RCU read lock.
*/
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct rxrpc_peer **_peer)
{
struct rxrpc_connection *conn;
struct rxrpc_conn_proto k;
@@ -85,9 +89,6 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
goto not_found;
- k.epoch = sp->hdr.epoch;
- k.cid = sp->hdr.cid & RXRPC_CIDMASK;
-
/* We may have to handle mixing IPv4 and IPv6 */
if (srx.transport.family != local->srx.transport.family) {
pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
@@ -99,7 +100,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
k.epoch = sp->hdr.epoch;
k.cid = sp->hdr.cid & RXRPC_CIDMASK;
- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
+ if (rxrpc_to_server(sp)) {
/* We need to look up service connections by the full protocol
* parameter set. We look up the peer first as an intermediate
* step and then the connection from the peer's tree.
@@ -107,6 +108,7 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
peer = rxrpc_lookup_peer_rcu(local, &srx);
if (!peer)
goto not_found;
+ *_peer = peer;
conn = rxrpc_find_service_conn_rcu(peer, skb);
if (!conn || atomic_read(&conn->usage) == 0)
goto not_found;
@@ -214,7 +216,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
call->peer->cong_cwnd = call->cong_cwnd;
spin_lock_bh(&conn->params.peer->lock);
- hlist_del_init(&call->error_link);
+ hlist_del_rcu(&call->error_link);
spin_unlock_bh(&conn->params.peer->lock);
if (rxrpc_is_client_call(call))
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index cfdc199c6351..570b49d2da42 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -216,10 +216,11 @@ static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
/*
* Apply a hard ACK by advancing the Tx window.
*/
-static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
+static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
struct rxrpc_ack_summary *summary)
{
struct sk_buff *skb, *list = NULL;
+ bool rot_last = false;
int ix;
u8 annotation;
@@ -243,15 +244,17 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
skb->next = list;
list = skb;
- if (annotation & RXRPC_TX_ANNO_LAST)
+ if (annotation & RXRPC_TX_ANNO_LAST) {
set_bit(RXRPC_CALL_TX_LAST, &call->flags);
+ rot_last = true;
+ }
if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
summary->nr_rot_new_acks++;
}
spin_unlock(&call->lock);
- trace_rxrpc_transmit(call, (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ?
+ trace_rxrpc_transmit(call, (rot_last ?
rxrpc_transmit_rotate_last :
rxrpc_transmit_rotate));
wake_up(&call->waitq);
@@ -262,6 +265,8 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
skb->next = NULL;
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
}
+
+ return rot_last;
}
/*
@@ -273,23 +278,26 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
const char *abort_why)
{
+ unsigned int state;
ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
write_lock(&call->state_lock);
- switch (call->state) {
+ state = call->state;
+ switch (state) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
case RXRPC_CALL_CLIENT_AWAIT_REPLY:
if (reply_begun)
- call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
+ call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
else
- call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+ call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
break;
case RXRPC_CALL_SERVER_AWAIT_ACK:
__rxrpc_call_completed(call);
rxrpc_notify_socket(call);
+ state = call->state;
break;
default:
@@ -297,11 +305,10 @@ static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
}
write_unlock(&call->state_lock);
- if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) {
+ if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
- } else {
+ else
trace_rxrpc_transmit(call, rxrpc_transmit_end);
- }
_leave(" = ok");
return true;
@@ -332,11 +339,11 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
}
- if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
- rxrpc_rotate_tx_window(call, top, &summary);
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
- rxrpc_proto_abort("TXL", call, top);
- return false;
+ if (!rxrpc_rotate_tx_window(call, top, &summary)) {
+ rxrpc_proto_abort("TXL", call, top);
+ return false;
+ }
}
if (!rxrpc_end_tx_phase(call, true, "ETD"))
return false;
@@ -452,13 +459,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
}
}
+ spin_lock(&call->input_lock);
+
/* Received data implicitly ACKs all of the request packets we sent
* when we're acting as a client.
*/
if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
!rxrpc_receiving_reply(call))
- return;
+ goto unlock;
call->ackr_prev_seq = seq;
@@ -488,12 +497,16 @@ next_subpacket:
if (flags & RXRPC_LAST_PACKET) {
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- seq != call->rx_top)
- return rxrpc_proto_abort("LSN", call, seq);
+ seq != call->rx_top) {
+ rxrpc_proto_abort("LSN", call, seq);
+ goto unlock;
+ }
} else {
if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
- after_eq(seq, call->rx_top))
- return rxrpc_proto_abort("LSA", call, seq);
+ after_eq(seq, call->rx_top)) {
+ rxrpc_proto_abort("LSA", call, seq);
+ goto unlock;
+ }
}
trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
@@ -560,8 +573,10 @@ next_subpacket:
skip:
offset += len;
if (flags & RXRPC_JUMBO_PACKET) {
- if (skb_copy_bits(skb, offset, &flags, 1) < 0)
- return rxrpc_proto_abort("XJF", call, seq);
+ if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
+ rxrpc_proto_abort("XJF", call, seq);
+ goto unlock;
+ }
offset += sizeof(struct rxrpc_jumbo_header);
seq++;
serial++;
@@ -601,6 +616,9 @@ ack:
trace_rxrpc_notify_socket(call->debug_id, serial);
rxrpc_notify_socket(call);
}
+
+unlock:
+ spin_unlock(&call->input_lock);
_leave(" [queued]");
}
@@ -622,13 +640,14 @@ static void rxrpc_input_requested_ack(struct rxrpc_call *call,
if (!skb)
continue;
+ sent_at = skb->tstamp;
+ smp_rmb(); /* Read timestamp before serial. */
sp = rxrpc_skb(skb);
if (sp->hdr.serial != orig_serial)
continue;
- smp_rmb();
- sent_at = skb->tstamp;
goto found;
}
+
return;
found:
@@ -686,15 +705,14 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
ping_time = call->ping_time;
smp_rmb();
- ping_serial = call->ping_serial;
+ ping_serial = READ_ONCE(call->ping_serial);
if (orig_serial == call->acks_lost_ping)
rxrpc_input_check_for_lost_ack(call);
- if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
- before(orig_serial, ping_serial))
+ if (before(orig_serial, ping_serial) ||
+ !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
return;
- clear_bit(RXRPC_CALL_PINGING, &call->flags);
if (after(orig_serial, ping_serial))
return;
@@ -860,15 +878,32 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
rxrpc_propose_ack_respond_to_ack);
}
+ /* Discard any out-of-order or duplicate ACKs. */
+ if (before_eq(sp->hdr.serial, call->acks_latest))
+ return;
+
+ buf.info.rxMTU = 0;
ioffset = offset + nr_acks + 3;
- if (skb->len >= ioffset + sizeof(buf.info)) {
- if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
- return rxrpc_proto_abort("XAI", call, 0);
+ if (skb->len >= ioffset + sizeof(buf.info) &&
+ skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
+ return rxrpc_proto_abort("XAI", call, 0);
+
+ spin_lock(&call->input_lock);
+
+ /* Discard any out-of-order or duplicate ACKs. */
+ if (before_eq(sp->hdr.serial, call->acks_latest))
+ goto out;
+ call->acks_latest_ts = skb->tstamp;
+ call->acks_latest = sp->hdr.serial;
+
+ /* Parse rwind and mtu sizes if provided. */
+ if (buf.info.rxMTU)
rxrpc_input_ackinfo(call, skb, &buf.info);
- }
- if (first_soft_ack == 0)
- return rxrpc_proto_abort("AK0", call, 0);
+ if (first_soft_ack == 0) {
+ rxrpc_proto_abort("AK0", call, 0);
+ goto out;
+ }
/* Ignore ACKs unless we are or have just been transmitting. */
switch (READ_ONCE(call->state)) {
@@ -878,39 +913,35 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
case RXRPC_CALL_SERVER_AWAIT_ACK:
break;
default:
- return;
- }
-
- /* Discard any out-of-order or duplicate ACKs. */
- if (before_eq(sp->hdr.serial, call->acks_latest)) {
- _debug("discard ACK %d <= %d",
- sp->hdr.serial, call->acks_latest);
- return;
+ goto out;
}
- call->acks_latest_ts = skb->tstamp;
- call->acks_latest = sp->hdr.serial;
if (before(hard_ack, call->tx_hard_ack) ||
- after(hard_ack, call->tx_top))
- return rxrpc_proto_abort("AKW", call, 0);
- if (nr_acks > call->tx_top - hard_ack)
- return rxrpc_proto_abort("AKN", call, 0);
+ after(hard_ack, call->tx_top)) {
+ rxrpc_proto_abort("AKW", call, 0);
+ goto out;
+ }
+ if (nr_acks > call->tx_top - hard_ack) {
+ rxrpc_proto_abort("AKN", call, 0);
+ goto out;
+ }
- if (after(hard_ack, call->tx_hard_ack))
- rxrpc_rotate_tx_window(call, hard_ack, &summary);
+ if (after(hard_ack, call->tx_hard_ack)) {
+ if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
+ rxrpc_end_tx_phase(call, false, "ETA");
+ goto out;
+ }
+ }
if (nr_acks > 0) {
- if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
- return rxrpc_proto_abort("XSA", call, 0);
+ if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
+ rxrpc_proto_abort("XSA", call, 0);
+ goto out;
+ }
rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
&summary);
}
- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
- rxrpc_end_tx_phase(call, false, "ETA");
- return;
- }
-
if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack &&
@@ -919,7 +950,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
false, true,
rxrpc_propose_ack_ping_for_lost_reply);
- return rxrpc_congestion_management(call, skb, &summary, acked_serial);
+ rxrpc_congestion_management(call, skb, &summary, acked_serial);
+out:
+ spin_unlock(&call->input_lock);
}
/*
@@ -932,9 +965,12 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
_proto("Rx ACKALL %%%u", sp->hdr.serial);
- rxrpc_rotate_tx_window(call, call->tx_top, &summary);
- if (test_bit(RXRPC_CALL_TX_LAST, &call->flags))
+ spin_lock(&call->input_lock);
+
+ if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
rxrpc_end_tx_phase(call, false, "ETL");
+
+ spin_unlock(&call->input_lock);
}
/*
@@ -1017,18 +1053,19 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
}
/*
- * Handle a new call on a channel implicitly completing the preceding call on
- * that channel.
+ * Handle a new service call on a channel implicitly completing the preceding
+ * call on that channel. This does not apply to client conns.
*
* TODO: If callNumber > call_id + 1, renegotiate security.
*/
-static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
+static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
+ struct rxrpc_connection *conn,
struct rxrpc_call *call)
{
switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call);
- break;
+ /* Fall through */
case RXRPC_CALL_COMPLETE:
break;
default:
@@ -1036,11 +1073,13 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_connection *conn,
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
+ trace_rxrpc_improper_term(call);
break;
}
- trace_rxrpc_improper_term(call);
+ spin_lock(&rx->incoming_lock);
__rxrpc_disconnect_call(conn, call);
+ spin_unlock(&rx->incoming_lock);
rxrpc_notify_socket(call);
}
@@ -1119,43 +1158,29 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
* The socket is locked by the caller and this prevents the socket from being
* shut down and the local endpoint from going away, thus sk_user_data will not
* be cleared until this function returns.
+ *
+ * Called with the RCU read lock held from the IP layer via UDP.
*/
-void rxrpc_data_ready(struct sock *udp_sk)
+int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
{
struct rxrpc_connection *conn;
struct rxrpc_channel *chan;
- struct rxrpc_call *call;
+ struct rxrpc_call *call = NULL;
struct rxrpc_skb_priv *sp;
struct rxrpc_local *local = udp_sk->sk_user_data;
- struct sk_buff *skb;
+ struct rxrpc_peer *peer = NULL;
+ struct rxrpc_sock *rx = NULL;
unsigned int channel;
- int ret, skew;
+ int skew = 0;
_enter("%p", udp_sk);
- ASSERT(!irqs_disabled());
-
- skb = skb_recv_udp(udp_sk, 0, 1, &ret);
- if (!skb) {
- if (ret == -EAGAIN)
- return;
- _debug("UDP socket error %d", ret);
- return;
- }
+ if (skb->tstamp == 0)
+ skb->tstamp = ktime_get_real();
rxrpc_new_skb(skb, rxrpc_skb_rx_received);
- _net("recv skb %p", skb);
-
- /* we'll probably need to checksum it (didn't call sock_recvmsg) */
- if (skb_checksum_complete(skb)) {
- rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
- __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
- _leave(" [CSUM failed]");
- return;
- }
-
- __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
+ skb_pull(skb, sizeof(struct udphdr));
/* The UDP protocol already released all skb resources;
* we are free to add our own data there.
@@ -1170,69 +1195,104 @@ void rxrpc_data_ready(struct sock *udp_sk)
static int lose;
if ((lose++ & 7) == 7) {
trace_rxrpc_rx_lose(sp);
- rxrpc_lose_skb(skb, rxrpc_skb_rx_lost);
- return;
+ rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
+ return 0;
}
}
+ if (skb->tstamp == 0)
+ skb->tstamp = ktime_get_real();
trace_rxrpc_rx_packet(sp);
- _net("Rx RxRPC %s ep=%x call=%x:%x",
- sp->hdr.flags & RXRPC_CLIENT_INITIATED ? "ToServer" : "ToClient",
- sp->hdr.epoch, sp->hdr.cid, sp->hdr.callNumber);
-
- if (sp->hdr.type >= RXRPC_N_PACKET_TYPES ||
- !((RXRPC_SUPPORTED_PACKET_TYPES >> sp->hdr.type) & 1)) {
- _proto("Rx Bad Packet Type %u", sp->hdr.type);
- goto bad_message;
- }
-
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_VERSION:
- if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED))
+ if (rxrpc_to_client(sp))
goto discard;
rxrpc_post_packet_to_local(local, skb);
goto out;
case RXRPC_PACKET_TYPE_BUSY:
- if (sp->hdr.flags & RXRPC_CLIENT_INITIATED)
+ if (rxrpc_to_server(sp))
goto discard;
/* Fall through */
+ case RXRPC_PACKET_TYPE_ACK:
+ case RXRPC_PACKET_TYPE_ACKALL:
+ if (sp->hdr.callNumber == 0)
+ goto bad_message;
+ /* Fall through */
+ case RXRPC_PACKET_TYPE_ABORT:
+ break;
case RXRPC_PACKET_TYPE_DATA:
- if (sp->hdr.callNumber == 0)
+ if (sp->hdr.callNumber == 0 ||
+ sp->hdr.seq == 0)
goto bad_message;
if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
!rxrpc_validate_jumbo(skb))
goto bad_message;
break;
+ case RXRPC_PACKET_TYPE_CHALLENGE:
+ if (rxrpc_to_server(sp))
+ goto discard;
+ break;
+ case RXRPC_PACKET_TYPE_RESPONSE:
+ if (rxrpc_to_client(sp))
+ goto discard;
+ break;
+
/* Packet types 9-11 should just be ignored. */
case RXRPC_PACKET_TYPE_PARAMS:
case RXRPC_PACKET_TYPE_10:
case RXRPC_PACKET_TYPE_11:
goto discard;
+
+ default:
+ _proto("Rx Bad Packet Type %u", sp->hdr.type);
+ goto bad_message;
}
- rcu_read_lock();
+ if (sp->hdr.serviceId == 0)
+ goto bad_message;
+
+ if (rxrpc_to_server(sp)) {
+ /* Weed out packets to services we're not offering. Packets
+ * that would begin a call are explicitly rejected and the rest
+ * are just discarded.
+ */
+ rx = rcu_dereference(local->service);
+ if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
+ sp->hdr.serviceId != rx->second_service)) {
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+ sp->hdr.seq == 1)
+ goto unsupported_service;
+ goto discard;
+ }
+ }
- conn = rxrpc_find_connection_rcu(local, skb);
+ conn = rxrpc_find_connection_rcu(local, skb, &peer);
if (conn) {
if (sp->hdr.securityIndex != conn->security_ix)
goto wrong_security;
if (sp->hdr.serviceId != conn->service_id) {
- if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags) ||
- conn->service_id != conn->params.service_id)
+ int old_id;
+
+ if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
+ goto reupgrade;
+ old_id = cmpxchg(&conn->service_id, conn->params.service_id,
+ sp->hdr.serviceId);
+
+ if (old_id != conn->params.service_id &&
+ old_id != sp->hdr.serviceId)
goto reupgrade;
- conn->service_id = sp->hdr.serviceId;
}
if (sp->hdr.callNumber == 0) {
/* Connection-level packet */
_debug("CONN %p {%d}", conn, conn->debug_id);
rxrpc_post_packet_to_conn(conn, skb);
- goto out_unlock;
+ goto out;
}
/* Note the serial number skew here */
@@ -1251,19 +1311,19 @@ void rxrpc_data_ready(struct sock *udp_sk)
/* Ignore really old calls */
if (sp->hdr.callNumber < chan->last_call)
- goto discard_unlock;
+ goto discard;
if (sp->hdr.callNumber == chan->last_call) {
if (chan->call ||
sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
- goto discard_unlock;
+ goto discard;
/* For the previous service call, if completed
* successfully, we discard all further packets.
*/
if (rxrpc_conn_is_service(conn) &&
chan->last_type == RXRPC_PACKET_TYPE_ACK)
- goto discard_unlock;
+ goto discard;
/* But otherwise we need to retransmit the final packet
* from data cached in the connection record.
@@ -1274,18 +1334,16 @@ void rxrpc_data_ready(struct sock *udp_sk)
sp->hdr.serial,
sp->hdr.flags, 0);
rxrpc_post_packet_to_conn(conn, skb);
- goto out_unlock;
+ goto out;
}
call = rcu_dereference(chan->call);
if (sp->hdr.callNumber > chan->call_id) {
- if (!(sp->hdr.flags & RXRPC_CLIENT_INITIATED)) {
- rcu_read_unlock();
+ if (rxrpc_to_client(sp))
goto reject_packet;
- }
if (call)
- rxrpc_input_implicit_end_call(conn, call);
+ rxrpc_input_implicit_end_call(rx, conn, call);
call = NULL;
}
@@ -1297,66 +1355,57 @@ void rxrpc_data_ready(struct sock *udp_sk)
if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
}
- } else {
- skew = 0;
- call = NULL;
}
if (!call || atomic_read(&call->usage) == 0) {
- if (!(sp->hdr.type & RXRPC_CLIENT_INITIATED) ||
- sp->hdr.callNumber == 0 ||
+ if (rxrpc_to_client(sp) ||
sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
- goto bad_message_unlock;
+ goto bad_message;
if (sp->hdr.seq != 1)
- goto discard_unlock;
- call = rxrpc_new_incoming_call(local, conn, skb);
- if (!call) {
- rcu_read_unlock();
+ goto discard;
+ call = rxrpc_new_incoming_call(local, rx, skb);
+ if (!call)
goto reject_packet;
- }
rxrpc_send_ping(call, skb, skew);
mutex_unlock(&call->user_mutex);
}
rxrpc_input_call_packet(call, skb, skew);
- goto discard_unlock;
+ goto discard;
-discard_unlock:
- rcu_read_unlock();
discard:
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
out:
trace_rxrpc_rx_done(0, 0);
- return;
-
-out_unlock:
- rcu_read_unlock();
- goto out;
+ return 0;
wrong_security:
- rcu_read_unlock();
trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RXKADINCONSISTENCY, EBADMSG);
skb->priority = RXKADINCONSISTENCY;
goto post_abort;
+unsupported_service:
+ trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+ RX_INVALID_OPERATION, EOPNOTSUPP);
+ skb->priority = RX_INVALID_OPERATION;
+ goto post_abort;
+
reupgrade:
- rcu_read_unlock();
trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
goto protocol_error;
-bad_message_unlock:
- rcu_read_unlock();
bad_message:
trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
RX_PROTOCOL_ERROR, EBADMSG);
protocol_error:
skb->priority = RX_PROTOCOL_ERROR;
post_abort:
- skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT;
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
reject_packet:
trace_rxrpc_rx_done(skb->mark, skb->priority);
rxrpc_reject_packet(local, skb);
_leave(" [badmsg]");
+ return 0;
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 777c3ed4cfc0..0906e51d3cfb 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -19,6 +19,7 @@
#include <linux/ip.h>
#include <linux/hashtable.h>
#include <net/sock.h>
+#include <net/udp.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
@@ -108,7 +109,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
*/
static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
{
- struct sock *sock;
+ struct sock *usk;
int ret, opt;
_enter("%p{%d,%d}",
@@ -122,6 +123,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
return ret;
}
+ /* set the socket up */
+ usk = local->socket->sk;
+ inet_sk(usk)->mc_loop = 0;
+
+ /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */
+ inet_inc_convert_csum(usk);
+
+ rcu_assign_sk_user_data(usk, local);
+
+ udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC;
+ udp_sk(usk)->encap_rcv = rxrpc_input_packet;
+ udp_sk(usk)->encap_destroy = NULL;
+ udp_sk(usk)->gro_receive = NULL;
+ udp_sk(usk)->gro_complete = NULL;
+
+ udp_encap_enable();
+#if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
+ if (local->srx.transport.family == AF_INET6)
+ udpv6_encap_enable();
+#endif
+ usk->sk_error_report = rxrpc_error_report;
+
/* if a local address was supplied then bind it */
if (local->srx.transport_len > sizeof(sa_family_t)) {
_debug("bind");
@@ -135,10 +158,10 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
switch (local->srx.transport.family) {
- case AF_INET:
- /* we want to receive ICMP errors */
+ case AF_INET6:
+ /* we want to receive ICMPv6 errors */
opt = 1;
- ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
+ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
@@ -146,19 +169,22 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
/* we want to set the don't fragment bit */
- opt = IP_PMTUDISC_DO;
- ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
+ opt = IPV6_PMTUDISC_DO;
+ ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
goto error;
}
- break;
- case AF_INET6:
+ /* Fall through and set IPv4 options too otherwise we don't get
+ * errors from IPv4 packets sent through the IPv6 socket.
+ */
+
+ case AF_INET:
/* we want to receive ICMP errors */
opt = 1;
- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
+ ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
@@ -166,24 +192,28 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
/* we want to set the don't fragment bit */
- opt = IPV6_PMTUDISC_DO;
- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
+ opt = IP_PMTUDISC_DO;
+ ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
(char *) &opt, sizeof(opt));
if (ret < 0) {
_debug("setsockopt failed");
goto error;
}
+
+ /* We want receive timestamps. */
+ opt = 1;
+ ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS,
+ (char *)&opt, sizeof(opt));
+ if (ret < 0) {
+ _debug("setsockopt failed");
+ goto error;
+ }
break;
default:
BUG();
}
- /* set the socket up */
- sock = local->socket->sk;
- sock->sk_user_data = local;
- sock->sk_data_ready = rxrpc_data_ready;
- sock->sk_error_report = rxrpc_error_report;
_leave(" = 0");
return 0;
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index ccf5de160444..a141ee3ab812 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -124,7 +124,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
struct kvec iov[2];
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top;
- ktime_t now;
size_t len, n;
int ret;
u8 reason;
@@ -196,9 +195,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
/* We need to stick a time in before we send the packet in case
* the reply gets back before kernel_sendmsg() completes - but
* asking UDP to send the packet can take a relatively long
- * time, so we update the time after, on the assumption that
- * the packet transmission is more likely to happen towards the
- * end of the kernel_sendmsg() call.
+ * time.
*/
call->ping_time = ktime_get_real();
set_bit(RXRPC_CALL_PINGING, &call->flags);
@@ -206,9 +203,6 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
}
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
- now = ktime_get_real();
- if (ping)
- call->ping_time = now;
conn->params.peer->last_tx_at = ktime_get_seconds();
if (ret < 0)
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
@@ -363,8 +357,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
/* If our RTT cache needs working on, request an ACK. Also request
* ACKs if a DATA packet appears to have been lost.
+ *
+ * However, we mustn't request an ACK on the last reply packet of a
+ * service call, lest OpenAFS incorrectly send us an ACK with some
+ * soft-ACKs in it and then never follow up with a proper hard ACK.
*/
- if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
+ if ((!(sp->hdr.flags & RXRPC_LAST_PACKET) ||
+ rxrpc_to_server(sp)
+ ) &&
(test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
retrans ||
call->cong_mode == RXRPC_CALL_SLOW_START ||
@@ -390,6 +390,11 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
goto send_fragmentable;
down_read(&conn->params.local->defrag_sem);
+
+ sp->hdr.serial = serial;
+ smp_wmb(); /* Set serial before timestamp */
+ skb->tstamp = ktime_get_real();
+
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
* to go out of the interface
@@ -413,12 +418,8 @@ done:
trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
retrans, lost);
if (ret >= 0) {
- ktime_t now = ktime_get_real();
- skb->tstamp = now;
- smp_wmb();
- sp->hdr.serial = serial;
if (whdr.flags & RXRPC_REQUEST_ACK) {
- call->peer->rtt_last_req = now;
+ call->peer->rtt_last_req = skb->tstamp;
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
if (call->peer->rtt_usage > 1) {
unsigned long nowj = jiffies, ack_lost_at;
@@ -457,6 +458,10 @@ send_fragmentable:
down_write(&conn->params.local->defrag_sem);
+ sp->hdr.serial = serial;
+ smp_wmb(); /* Set serial before timestamp */
+ skb->tstamp = ktime_get_real();
+
switch (conn->params.local->srx.transport.family) {
case AF_INET:
opt = IP_PMTUDISC_DONT;
@@ -519,7 +524,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
struct kvec iov[2];
size_t size;
__be32 code;
- int ret;
+ int ret, ioc;
_enter("%d", local->debug_id);
@@ -527,7 +532,6 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
iov[0].iov_len = sizeof(whdr);
iov[1].iov_base = &code;
iov[1].iov_len = sizeof(code);
- size = sizeof(whdr) + sizeof(code);
msg.msg_name = &srx.transport;
msg.msg_control = NULL;
@@ -535,17 +539,31 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
msg.msg_flags = 0;
memset(&whdr, 0, sizeof(whdr));
- whdr.type = RXRPC_PACKET_TYPE_ABORT;
while ((skb = skb_dequeue(&local->reject_queue))) {
rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
sp = rxrpc_skb(skb);
+ switch (skb->mark) {
+ case RXRPC_SKB_MARK_REJECT_BUSY:
+ whdr.type = RXRPC_PACKET_TYPE_BUSY;
+ size = sizeof(whdr);
+ ioc = 1;
+ break;
+ case RXRPC_SKB_MARK_REJECT_ABORT:
+ whdr.type = RXRPC_PACKET_TYPE_ABORT;
+ code = htonl(skb->priority);
+ size = sizeof(whdr) + sizeof(code);
+ ioc = 2;
+ break;
+ default:
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ continue;
+ }
+
if (rxrpc_extract_addr_from_skb(local, &srx, skb) == 0) {
msg.msg_namelen = srx.transport_len;
- code = htonl(skb->priority);
-
whdr.epoch = htonl(sp->hdr.epoch);
whdr.cid = htonl(sp->hdr.cid);
whdr.callNumber = htonl(sp->hdr.callNumber);
@@ -554,7 +572,8 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
whdr.flags ^= RXRPC_CLIENT_INITIATED;
whdr.flags &= RXRPC_CLIENT_INITIATED;
- ret = kernel_sendmsg(local->socket, &msg, iov, 2, size);
+ ret = kernel_sendmsg(local->socket, &msg,
+ iov, ioc, size);
if (ret < 0)
trace_rxrpc_tx_fail(local->debug_id, 0, ret,
rxrpc_tx_point_reject);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 4f9da2f51c69..bd2fa3b7caa7 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -23,6 +23,8 @@
#include "ar-internal.h"
static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
+static void rxrpc_distribute_error(struct rxrpc_peer *, int,
+ enum rxrpc_call_completion);
/*
* Find the peer associated with an ICMP packet.
@@ -193,9 +195,8 @@ void rxrpc_error_report(struct sock *sk)
rxrpc_store_error(peer, serr);
rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ rxrpc_put_peer(peer);
- /* The ref we obtained is passed off to the work item */
- __rxrpc_queue_peer_error(peer);
_leave("");
}
@@ -205,6 +206,7 @@ void rxrpc_error_report(struct sock *sk)
static void rxrpc_store_error(struct rxrpc_peer *peer,
struct sock_exterr_skb *serr)
{
+ enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
struct sock_extended_err *ee;
int err;
@@ -255,7 +257,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
case SO_EE_ORIGIN_NONE:
case SO_EE_ORIGIN_LOCAL:
_proto("Rx Received local error { error=%d }", err);
- err += RXRPC_LOCAL_ERROR_OFFSET;
+ compl = RXRPC_CALL_LOCAL_ERROR;
break;
case SO_EE_ORIGIN_ICMP6:
@@ -264,48 +266,23 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
break;
}
- peer->error_report = err;
+ rxrpc_distribute_error(peer, err, compl);
}
/*
- * Distribute an error that occurred on a peer
+ * Distribute an error that occurred on a peer.
*/
-void rxrpc_peer_error_distributor(struct work_struct *work)
+static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
+ enum rxrpc_call_completion compl)
{
- struct rxrpc_peer *peer =
- container_of(work, struct rxrpc_peer, error_distributor);
struct rxrpc_call *call;
- enum rxrpc_call_completion compl;
- int error;
-
- _enter("");
-
- error = READ_ONCE(peer->error_report);
- if (error < RXRPC_LOCAL_ERROR_OFFSET) {
- compl = RXRPC_CALL_NETWORK_ERROR;
- } else {
- compl = RXRPC_CALL_LOCAL_ERROR;
- error -= RXRPC_LOCAL_ERROR_OFFSET;
- }
-
- _debug("ISSUE ERROR %s %d", rxrpc_call_completions[compl], error);
- spin_lock_bh(&peer->lock);
-
- while (!hlist_empty(&peer->error_targets)) {
- call = hlist_entry(peer->error_targets.first,
- struct rxrpc_call, error_link);
- hlist_del_init(&call->error_link);
+ hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
rxrpc_see_call(call);
-
- if (rxrpc_set_call_completion(call, compl, 0, -error))
+ if (call->state < RXRPC_CALL_COMPLETE &&
+ rxrpc_set_call_completion(call, compl, 0, -error))
rxrpc_notify_socket(call);
}
-
- spin_unlock_bh(&peer->lock);
-
- rxrpc_put_peer(peer);
- _leave("");
}
/*
@@ -325,6 +302,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
if (rtt < 0)
return;
+ spin_lock(&peer->rtt_input_lock);
+
/* Replace the oldest datum in the RTT buffer */
sum -= peer->rtt_cache[cursor];
sum += rtt;
@@ -336,6 +315,8 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
peer->rtt_usage = usage;
}
+ spin_unlock(&peer->rtt_input_lock);
+
/* Now recalculate the average */
if (usage == RXRPC_RTT_CACHE_SIZE) {
avg = sum / RXRPC_RTT_CACHE_SIZE;
@@ -344,6 +325,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
do_div(avg, usage);
}
+ /* Don't need to update this under lock */
peer->rtt = avg;
trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
usage, avg);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 1dc7648e3eff..5691b7d266ca 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -124,11 +124,9 @@ static struct rxrpc_peer *__rxrpc_lookup_peer_rcu(
struct rxrpc_net *rxnet = local->rxnet;
hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
- if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0) {
- if (atomic_read(&peer->usage) == 0)
- return NULL;
+ if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
+ atomic_read(&peer->usage) > 0)
return peer;
- }
}
return NULL;
@@ -155,8 +153,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
* assess the MTU size for the network interface through which this peer is
* reached
*/
-static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
+static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
+ struct rxrpc_peer *peer)
{
+ struct net *net = sock_net(&rx->sk);
struct dst_entry *dst;
struct rtable *rt;
struct flowi fl;
@@ -171,7 +171,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
switch (peer->srx.transport.family) {
case AF_INET:
rt = ip_route_output_ports(
- &init_net, fl4, NULL,
+ net, fl4, NULL,
peer->srx.transport.sin.sin_addr.s_addr, 0,
htons(7000), htons(7001), IPPROTO_UDP, 0, 0);
if (IS_ERR(rt)) {
@@ -190,7 +190,7 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
sizeof(struct in6_addr));
fl6->fl6_dport = htons(7001);
fl6->fl6_sport = htons(7000);
- dst = ip6_route_output(&init_net, NULL, fl6);
+ dst = ip6_route_output(net, NULL, fl6);
if (dst->error) {
_leave(" [route err %d]", dst->error);
return;
@@ -222,11 +222,10 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
atomic_set(&peer->usage, 1);
peer->local = local;
INIT_HLIST_HEAD(&peer->error_targets);
- INIT_WORK(&peer->error_distributor,
- &rxrpc_peer_error_distributor);
peer->service_conns = RB_ROOT;
seqlock_init(&peer->service_conn_lock);
spin_lock_init(&peer->lock);
+ spin_lock_init(&peer->rtt_input_lock);
peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
if (RXRPC_TX_SMSS > 2190)
@@ -244,10 +243,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
/*
* Initialise peer record.
*/
-static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
+static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
+ unsigned long hash_key)
{
peer->hash_key = hash_key;
- rxrpc_assess_MTU_size(peer);
+ rxrpc_assess_MTU_size(rx, peer);
peer->mtu = peer->if_mtu;
peer->rtt_last_req = ktime_get_real();
@@ -279,7 +279,8 @@ static void rxrpc_init_peer(struct rxrpc_peer *peer, unsigned long hash_key)
/*
* Set up a new peer.
*/
-static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
+static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
+ struct rxrpc_local *local,
struct sockaddr_rxrpc *srx,
unsigned long hash_key,
gfp_t gfp)
@@ -291,7 +292,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
peer = rxrpc_alloc_peer(local, gfp);
if (peer) {
memcpy(&peer->srx, srx, sizeof(*srx));
- rxrpc_init_peer(peer, hash_key);
+ rxrpc_init_peer(rx, peer, hash_key);
}
_leave(" = %p", peer);
@@ -299,40 +300,31 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
}
/*
- * Set up a new incoming peer. The address is prestored in the preallocated
- * peer.
+ * Set up a new incoming peer. There shouldn't be any other matching peers
+ * since we've already done a search in the list from the non-reentrant context
+ * (the data_ready handler) that is the only place we can add new peers.
*/
-struct rxrpc_peer *rxrpc_lookup_incoming_peer(struct rxrpc_local *local,
- struct rxrpc_peer *prealloc)
+void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
+ struct rxrpc_peer *peer)
{
- struct rxrpc_peer *peer;
struct rxrpc_net *rxnet = local->rxnet;
unsigned long hash_key;
- hash_key = rxrpc_peer_hash_key(local, &prealloc->srx);
- prealloc->local = local;
- rxrpc_init_peer(prealloc, hash_key);
+ hash_key = rxrpc_peer_hash_key(local, &peer->srx);
+ peer->local = local;
+ rxrpc_init_peer(rx, peer, hash_key);
spin_lock(&rxnet->peer_hash_lock);
-
- /* Need to check that we aren't racing with someone else */
- peer = __rxrpc_lookup_peer_rcu(local, &prealloc->srx, hash_key);
- if (peer && !rxrpc_get_peer_maybe(peer))
- peer = NULL;
- if (!peer) {
- peer = prealloc;
- hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
- list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
- }
-
+ hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
+ list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
spin_unlock(&rxnet->peer_hash_lock);
- return peer;
}
/*
* obtain a remote transport endpoint for the specified address
*/
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
+ struct rxrpc_local *local,
struct sockaddr_rxrpc *srx, gfp_t gfp)
{
struct rxrpc_peer *peer, *candidate;
@@ -352,7 +344,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
/* The peer is not yet present in hash - create a candidate
* for a new record and then redo the search.
*/
- candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
+ candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
if (!candidate) {
_leave(" = NULL [nomem]");
return NULL;
@@ -416,21 +408,6 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
}
/*
- * Queue a peer record. This passes the caller's ref to the workqueue.
- */
-void __rxrpc_queue_peer_error(struct rxrpc_peer *peer)
-{
- const void *here = __builtin_return_address(0);
- int n;
-
- n = atomic_read(&peer->usage);
- if (rxrpc_queue_work(&peer->error_distributor))
- trace_rxrpc_peer(peer, rxrpc_peer_queued_error, n, here);
- else
- rxrpc_put_peer(peer);
-}
-
-/*
* Discard a peer record.
*/
static void __rxrpc_put_peer(struct rxrpc_peer *peer)
diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h
index 93da73bf7098..f9cb83c938f3 100644
--- a/net/rxrpc/protocol.h
+++ b/net/rxrpc/protocol.h
@@ -50,7 +50,6 @@ struct rxrpc_wire_header {
#define RXRPC_PACKET_TYPE_10 10 /* Ignored */
#define RXRPC_PACKET_TYPE_11 11 /* Ignored */
#define RXRPC_PACKET_TYPE_VERSION 13 /* version string request */
-#define RXRPC_N_PACKET_TYPES 14 /* number of packet types (incl type 0) */
uint8_t flags; /* packet flags */
#define RXRPC_CLIENT_INITIATED 0x01 /* signifies a packet generated by a client */
@@ -72,20 +71,6 @@ struct rxrpc_wire_header {
} __packed;
-#define RXRPC_SUPPORTED_PACKET_TYPES ( \
- (1 << RXRPC_PACKET_TYPE_DATA) | \
- (1 << RXRPC_PACKET_TYPE_ACK) | \
- (1 << RXRPC_PACKET_TYPE_BUSY) | \
- (1 << RXRPC_PACKET_TYPE_ABORT) | \
- (1 << RXRPC_PACKET_TYPE_ACKALL) | \
- (1 << RXRPC_PACKET_TYPE_CHALLENGE) | \
- (1 << RXRPC_PACKET_TYPE_RESPONSE) | \
- /*(1 << RXRPC_PACKET_TYPE_DEBUG) | */ \
- (1 << RXRPC_PACKET_TYPE_PARAMS) | \
- (1 << RXRPC_PACKET_TYPE_10) | \
- (1 << RXRPC_PACKET_TYPE_11) | \
- (1 << RXRPC_PACKET_TYPE_VERSION))
-
/*****************************************************************************/
/*
* jumbo packet secondary header
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 23273b5303fd..8525de811616 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -135,7 +135,7 @@ static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
}
td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
- if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size) {
+ if (nla_len(tb[TCA_IPT_TARG]) != td->u.target_size) {
if (exists)
tcf_idr_release(*a, bind);
else
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 44e9c00657bc..6b67aa13d2dd 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -69,7 +69,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
if (!exists) {
ret = tcf_idr_create(tn, parm->index, est, a,
- &act_sample_ops, bind, false);
+ &act_sample_ops, bind, true);
if (ret) {
tcf_idr_cleanup(tn, parm->index);
return ret;
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 420759153d5f..681f6f04e7da 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -317,7 +317,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
&metadata->u.tun_info,
opts_len, extack);
if (ret < 0)
- goto err_out;
+ goto release_tun_meta;
}
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
@@ -333,23 +333,24 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
&act_tunnel_key_ops, bind, true);
if (ret) {
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
- goto err_out;
+ goto release_tun_meta;
}
ret = ACT_P_CREATED;
} else if (!ovr) {
- tcf_idr_release(*a, bind);
NL_SET_ERR_MSG(extack, "TC IDR already exists");
- return -EEXIST;
+ ret = -EEXIST;
+ goto release_tun_meta;
}
t = to_tunnel_key(*a);
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
- tcf_idr_release(*a, bind);
NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
- return -ENOMEM;
+ ret = -ENOMEM;
+ exists = true;
+ goto release_tun_meta;
}
params_new->tcft_action = parm->t_action;
params_new->tcft_enc_metadata = metadata;
@@ -367,6 +368,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
return ret;
+release_tun_meta:
+ dst_release(&metadata->dst);
+
err_out:
if (exists)
tcf_idr_release(*a, bind);
@@ -408,8 +412,10 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
opt->type) ||
nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
- opt->length * 4, opt + 1))
+ opt->length * 4, opt + 1)) {
+ nla_nest_cancel(skb, start);
return -EMSGSIZE;
+ }
len -= sizeof(struct geneve_opt) + opt->length * 4;
src += sizeof(struct geneve_opt) + opt->length * 4;
@@ -423,7 +429,7 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
const struct ip_tunnel_info *info)
{
struct nlattr *start;
- int err;
+ int err = -EINVAL;
if (!info->options_len)
return 0;
@@ -435,9 +441,11 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
err = tunnel_key_geneve_opts_dump(skb, info);
if (err)
- return err;
+ goto err_out;
} else {
- return -EINVAL;
+err_out:
+ nla_nest_cancel(skb, start);
+ return err;
}
nla_nest_end(skb, start);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 1a67af8a6e8c..70f144ac5e1d 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -31,6 +31,8 @@
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
+extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+
/* The list of all installed classifier types */
static LIST_HEAD(tcf_proto_base);
@@ -1211,7 +1213,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
replay:
tp_created = 0;
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1360,7 +1362,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1475,7 +1477,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
void *fh = NULL;
int err;
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1838,7 +1840,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n,
return -EPERM;
replay:
- err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack);
if (err < 0)
return err;
@@ -1902,6 +1904,8 @@ replay:
RTM_NEWCHAIN, false);
break;
case RTM_DELCHAIN:
+ tfilter_notify_chain(net, skb, block, q, parent, n,
+ chain, RTM_DELTFILTER);
/* Flush the chain first as the user requested chain removal. */
tcf_chain_flush(chain);
/* In case the chain was successfully deleted, put a reference
@@ -1947,7 +1951,8 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb)
if (nlmsg_len(cb->nlh) < sizeof(*tcm))
return skb->len;
- err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
+ err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+ NULL);
if (err)
return err;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index f218ccf1e2d9..b2c3406a2cf2 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -398,6 +398,7 @@ static int u32_init(struct tcf_proto *tp)
rcu_assign_pointer(tp_c->hlist, root_ht);
root_ht->tp_c = tp_c;
+ root_ht->refcnt++;
rcu_assign_pointer(tp->root, root_ht);
tp->data = tp_c;
return 0;
@@ -610,7 +611,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
struct tc_u_hnode __rcu **hn;
struct tc_u_hnode *phn;
- WARN_ON(ht->refcnt);
+ WARN_ON(--ht->refcnt);
u32_clear_hnode(tp, ht, extack);
@@ -649,7 +650,7 @@ static void u32_destroy(struct tcf_proto *tp, struct netlink_ext_ack *extack)
WARN_ON(root_ht == NULL);
- if (root_ht && --root_ht->refcnt == 0)
+ if (root_ht && --root_ht->refcnt == 1)
u32_destroy_hnode(tp, root_ht, extack);
if (--tp_c->refcnt == 0) {
@@ -698,7 +699,6 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
}
if (ht->refcnt == 1) {
- ht->refcnt--;
u32_destroy_hnode(tp, ht, extack);
} else {
NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
@@ -708,11 +708,11 @@ static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
out:
*last = true;
if (root_ht) {
- if (root_ht->refcnt > 1) {
+ if (root_ht->refcnt > 2) {
*last = false;
goto ret;
}
- if (root_ht->refcnt == 1) {
+ if (root_ht->refcnt == 2) {
if (!ht_empty(root_ht)) {
*last = false;
goto ret;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 98541c6399db..3dc0acf54245 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1307,6 +1307,18 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
return 0;
}
+const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
+ [TCA_KIND] = { .type = NLA_STRING },
+ [TCA_OPTIONS] = { .type = NLA_NESTED },
+ [TCA_RATE] = { .type = NLA_BINARY,
+ .len = sizeof(struct tc_estimator) },
+ [TCA_STAB] = { .type = NLA_NESTED },
+ [TCA_DUMP_INVISIBLE] = { .type = NLA_FLAG },
+ [TCA_CHAIN] = { .type = NLA_U32 },
+ [TCA_INGRESS_BLOCK] = { .type = NLA_U32 },
+ [TCA_EGRESS_BLOCK] = { .type = NLA_U32 },
+};
+
/*
* Delete/get qdisc.
*/
@@ -1327,7 +1339,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+ extack);
if (err < 0)
return err;
@@ -1411,7 +1424,8 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
replay:
/* Reinit, just in case something touches this. */
- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+ extack);
if (err < 0)
return err;
@@ -1645,7 +1659,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
idx = 0;
ASSERT_RTNL();
- err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX, NULL, NULL);
+ err = nlmsg_parse(nlh, sizeof(struct tcmsg), tca, TCA_MAX,
+ rtm_tca_policy, NULL);
if (err < 0)
return err;
@@ -1864,7 +1879,8 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
+ err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy,
+ extack);
if (err < 0)
return err;
@@ -2043,7 +2059,8 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
if (tcm->tcm_parent) {
q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
- if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
+ if (q && q != root &&
+ tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
return -1;
return 0;
}
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index c07c30b916d5..793016d722ec 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -2644,7 +2644,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
for (i = 1; i <= CAKE_QUEUES; i++)
quantum_div[i] = 65535 / i;
- q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data),
+ q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
GFP_KERNEL);
if (!q->tins)
goto nomem;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 297d9cf960b9..a827a1f562bf 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1450,7 +1450,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
/* Get the lowest pmtu of all the transports. */
list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
if (t->pmtu_pending && t->dst) {
- sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst));
+ sctp_transport_update_pmtu(t,
+ atomic_read(&t->mtu_info));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 9bbc5f92c941..5c36a99882ed 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -395,6 +395,7 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
return;
if (sock_owned_by_user(sk)) {
+ atomic_set(&t->mtu_info, pmtu);
asoc->pmtu_pending = 1;
t->pmtu_pending = 1;
return;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 7f849b01ec8e..67939ad99c01 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -120,6 +120,12 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
sctp_assoc_sync_pmtu(asoc);
}
+ if (asoc->pmtu_pending) {
+ if (asoc->param_flags & SPP_PMTUD_ENABLE)
+ sctp_assoc_sync_pmtu(asoc);
+ asoc->pmtu_pending = 0;
+ }
+
/* If there a is a prepend chunk stick it on the list before
* any other chunks get appended.
*/
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index d74d00b29942..42191ed9902b 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
if (!ctx->packet || !ctx->packet->has_cookie_echo)
return;
- /* fallthru */
+ /* fall through */
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_RECEIVED:
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index f73e9d38d5ba..c1c1bda334a4 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -271,11 +271,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
spin_lock_bh(&sctp_assocs_id_lock);
asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
+ if (asoc && (asoc->base.sk != sk || asoc->base.dead))
+ asoc = NULL;
spin_unlock_bh(&sctp_assocs_id_lock);
- if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
- return NULL;
-
return asoc;
}
@@ -1946,8 +1945,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc,
if (sp->strm_interleave) {
timeo = sock_sndtimeo(sk, 0);
err = sctp_wait_for_connect(asoc, &timeo);
- if (err)
+ if (err) {
+ err = -ESRCH;
goto err;
+ }
} else {
wait_connect = true;
}
@@ -7100,14 +7101,14 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
}
policy = params.sprstat_policy;
- if (policy & ~SCTP_PR_SCTP_MASK)
+ if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
if (!asoc)
goto out;
- if (policy == SCTP_PR_SCTP_NONE) {
+ if (policy & SCTP_PR_SCTP_ALL) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
@@ -7159,7 +7160,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
}
policy = params.sprstat_policy;
- if (policy & ~SCTP_PR_SCTP_MASK)
+ if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
goto out;
asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
@@ -7175,7 +7176,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
goto out;
}
- if (policy == SCTP_PR_SCTP_NONE) {
+ if (policy == SCTP_PR_SCTP_ALL) {
params.sprstat_abandoned_unsent = 0;
params.sprstat_abandoned_sent = 0;
for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 12cac85da994..033696e6f74f 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -260,6 +260,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst = sctp_transport_dst_check(t);
+ struct sock *sk = t->asoc->base.sk;
bool change = true;
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
@@ -271,12 +272,19 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
pmtu = SCTP_TRUNC4(pmtu);
if (dst) {
- dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
+ struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
+ union sctp_addr addr;
+
+ pf->af->from_sk(&addr, sk);
+ pf->to_sk_daddr(&t->ipaddr, sk);
+ dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+ pf->to_sk_daddr(&addr, sk);
+
dst = sctp_transport_dst_check(t);
}
if (!dst) {
- t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
+ t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
dst = t->dst;
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 2d8a1e15e4f9..015231789ed2 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -742,7 +742,10 @@ static void smc_connect_work(struct work_struct *work)
smc->sk.sk_err = -rc;
out:
- smc->sk.sk_state_change(&smc->sk);
+ if (smc->sk.sk_err)
+ smc->sk.sk_state_change(&smc->sk);
+ else
+ smc->sk.sk_write_space(&smc->sk);
kfree(smc->connect_info);
smc->connect_info = NULL;
release_sock(&smc->sk);
@@ -1150,9 +1153,9 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
}
/* listen worker: finish RDMA setup */
-static void smc_listen_rdma_finish(struct smc_sock *new_smc,
- struct smc_clc_msg_accept_confirm *cclc,
- int local_contact)
+static int smc_listen_rdma_finish(struct smc_sock *new_smc,
+ struct smc_clc_msg_accept_confirm *cclc,
+ int local_contact)
{
struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
int reason_code = 0;
@@ -1175,11 +1178,12 @@ static void smc_listen_rdma_finish(struct smc_sock *new_smc,
if (reason_code)
goto decline;
}
- return;
+ return 0;
decline:
mutex_unlock(&smc_create_lgr_pending);
smc_listen_decline(new_smc, reason_code, local_contact);
+ return reason_code;
}
/* setup for RDMA connection of server */
@@ -1276,8 +1280,10 @@ static void smc_listen_work(struct work_struct *work)
}
/* finish worker */
- if (!ism_supported)
- smc_listen_rdma_finish(new_smc, &cclc, local_contact);
+ if (!ism_supported) {
+ if (smc_listen_rdma_finish(new_smc, &cclc, local_contact))
+ return;
+ }
smc_conn_save_peer_info(new_smc, &cclc);
mutex_unlock(&smc_create_lgr_pending);
smc_listen_out_connected(new_smc);
@@ -1529,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
return EPOLLNVAL;
smc = smc_sk(sock->sk);
- if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
+ if (smc->use_fallback) {
/* delegate to CLC child sock */
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
sk->sk_err = smc->clcsock->sk->sk_err;
@@ -1560,9 +1566,9 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
if (sk->sk_state == SMC_APPCLOSEWAIT1)
mask |= EPOLLIN;
+ if (smc->conn.urg_state == SMC_URG_VALID)
+ mask |= EPOLLPRI;
}
- if (smc->conn.urg_state == SMC_URG_VALID)
- mask |= EPOLLPRI;
}
return mask;
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 83aba9ade060..52241d679cc9 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -446,14 +446,12 @@ int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
vec[i++].iov_len = sizeof(trl);
/* due to the few bytes needed for clc-handshake this cannot block */
len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
- if (len < sizeof(pclc)) {
- if (len >= 0) {
- reason_code = -ENETUNREACH;
- smc->sk.sk_err = -reason_code;
- } else {
- smc->sk.sk_err = smc->clcsock->sk->sk_err;
- reason_code = -smc->sk.sk_err;
- }
+ if (len < 0) {
+ smc->sk.sk_err = smc->clcsock->sk->sk_err;
+ reason_code = -smc->sk.sk_err;
+ } else if (len < (int)sizeof(pclc)) {
+ reason_code = -ENETUNREACH;
+ smc->sk.sk_err = -reason_code;
}
return reason_code;
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index ac961dfb1ea1..ea2b87f29469 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -100,15 +100,14 @@ static void smc_close_active_abort(struct smc_sock *smc)
struct smc_cdc_conn_state_flags *txflags =
&smc->conn.local_tx_ctrl.conn_state_flags;
- sk->sk_err = ECONNABORTED;
- if (smc->clcsock && smc->clcsock->sk) {
- smc->clcsock->sk->sk_err = ECONNABORTED;
- smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+ if (sk->sk_state != SMC_INIT && smc->clcsock && smc->clcsock->sk) {
+ sk->sk_err = ECONNABORTED;
+ if (smc->clcsock && smc->clcsock->sk) {
+ smc->clcsock->sk->sk_err = ECONNABORTED;
+ smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
+ }
}
switch (sk->sk_state) {
- case SMC_INIT:
- sk->sk_state = SMC_PEERABORTWAIT;
- break;
case SMC_ACTIVE:
sk->sk_state = SMC_PEERABORTWAIT;
release_sock(sk);
@@ -143,6 +142,7 @@ static void smc_close_active_abort(struct smc_sock *smc)
case SMC_PEERFINCLOSEWAIT:
sock_put(sk); /* passive closing */
break;
+ case SMC_INIT:
case SMC_PEERABORTWAIT:
case SMC_CLOSED:
break;
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 01c6ce042a1c..7cb3e4f07c10 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -461,7 +461,7 @@ static const struct genl_ops smc_pnet_ops[] = {
};
/* SMC_PNETID family definition */
-static struct genl_family smc_pnet_nl_family = {
+static struct genl_family smc_pnet_nl_family __ro_after_init = {
.hdrsize = 0,
.name = SMCR_GENL_FAMILY_NAME,
.version = SMCR_GENL_FAMILY_VERSION,
diff --git a/net/socket.c b/net/socket.c
index e6945e318f02..390a8ecef4bf 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -941,7 +941,8 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
EXPORT_SYMBOL(dlci_ioctl_set);
static long sock_do_ioctl(struct net *net, struct socket *sock,
- unsigned int cmd, unsigned long arg)
+ unsigned int cmd, unsigned long arg,
+ unsigned int ifreq_size)
{
int err;
void __user *argp = (void __user *)arg;
@@ -967,11 +968,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
} else {
struct ifreq ifr;
bool need_copyout;
- if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
+ if (copy_from_user(&ifr, argp, ifreq_size))
return -EFAULT;
err = dev_ioctl(net, cmd, &ifr, &need_copyout);
if (!err && need_copyout)
- if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
+ if (copy_to_user(argp, &ifr, ifreq_size))
return -EFAULT;
}
return err;
@@ -1070,7 +1071,8 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
err = open_related_ns(&net->ns, get_net_ns);
break;
default:
- err = sock_do_ioctl(net, sock, cmd, arg);
+ err = sock_do_ioctl(net, sock, cmd, arg,
+ sizeof(struct ifreq));
break;
}
return err;
@@ -2750,7 +2752,8 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
int err;
set_fs(KERNEL_DS);
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
+ sizeof(struct compat_ifreq));
set_fs(old_fs);
if (!err)
err = compat_put_timeval(&ktv, up);
@@ -2766,7 +2769,8 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
int err;
set_fs(KERNEL_DS);
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
+ sizeof(struct compat_ifreq));
set_fs(old_fs);
if (!err)
err = compat_put_timespec(&kts, up);
@@ -2871,9 +2875,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
copy_in_user(&rxnfc->fs.ring_cookie,
&compat_rxnfc->fs.ring_cookie,
(void __user *)(&rxnfc->fs.location + 1) -
- (void __user *)&rxnfc->fs.ring_cookie) ||
- copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
- sizeof(rxnfc->rule_cnt)))
+ (void __user *)&rxnfc->fs.ring_cookie))
+ return -EFAULT;
+ if (ethcmd == ETHTOOL_GRXCLSRLALL) {
+ if (put_user(rule_cnt, &rxnfc->rule_cnt))
+ return -EFAULT;
+ } else if (copy_in_user(&rxnfc->rule_cnt,
+ &compat_rxnfc->rule_cnt,
+ sizeof(rxnfc->rule_cnt)))
return -EFAULT;
}
@@ -3072,7 +3081,8 @@ static int routing_ioctl(struct net *net, struct socket *sock,
}
set_fs(KERNEL_DS);
- ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
+ ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
+ sizeof(struct compat_ifreq));
set_fs(old_fs);
out:
@@ -3185,7 +3195,8 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCBONDSETHWADDR:
case SIOCBONDCHANGEACTIVE:
case SIOCGIFNAME:
- return sock_do_ioctl(net, sock, cmd, arg);
+ return sock_do_ioctl(net, sock, cmd, arg,
+ sizeof(struct compat_ifreq));
}
return -ENOIOCTLCMD;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 418f03d0be90..645c16052052 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -609,16 +609,18 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
switch (evt) {
case NETDEV_CHANGE:
- if (netif_carrier_ok(dev))
+ if (netif_carrier_ok(dev) && netif_oper_up(dev)) {
+ test_and_set_bit_lock(0, &b->up);
break;
- /* else: fall through */
- case NETDEV_UP:
- test_and_set_bit_lock(0, &b->up);
- break;
+ }
+ /* fall through */
case NETDEV_GOING_DOWN:
clear_bit_unlock(0, &b->up);
tipc_reset_bearer(net, b);
break;
+ case NETDEV_UP:
+ test_and_set_bit_lock(0, &b->up);
+ break;
case NETDEV_CHANGEMTU:
if (tipc_mtu_bad(dev, 0)) {
bearer_disable(net, b);
diff --git a/net/tipc/group.c b/net/tipc/group.c
index e82f13cb2dc5..06fee142f09f 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -666,6 +666,7 @@ static void tipc_group_create_event(struct tipc_group *grp,
struct sk_buff *skb;
struct tipc_msg *hdr;
+ memset(&evt, 0, sizeof(evt));
evt.event = event;
evt.found_lower = m->instance;
evt.found_upper = m->instance;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b1f0bee54eac..201c3b5bc96b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -410,6 +410,11 @@ char *tipc_link_name(struct tipc_link *l)
return l->name;
}
+u32 tipc_link_state(struct tipc_link *l)
+{
+ return l->state;
+}
+
/**
* tipc_link_create - create a new link
* @n: pointer to associated node
@@ -472,6 +477,8 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
l->in_session = false;
l->bearer_id = bearer_id;
l->tolerance = tolerance;
+ if (bc_rcvlink)
+ bc_rcvlink->tolerance = tolerance;
l->net_plane = net_plane;
l->advertised_mtu = mtu;
l->mtu = mtu;
@@ -838,12 +845,24 @@ static void link_prepare_wakeup(struct tipc_link *l)
void tipc_link_reset(struct tipc_link *l)
{
+ struct sk_buff_head list;
+
+ __skb_queue_head_init(&list);
+
l->in_session = false;
l->session++;
l->mtu = l->advertised_mtu;
+
+ spin_lock_bh(&l->wakeupq.lock);
+ skb_queue_splice_init(&l->wakeupq, &list);
+ spin_unlock_bh(&l->wakeupq.lock);
+
+ spin_lock_bh(&l->inputq->lock);
+ skb_queue_splice_init(&list, l->inputq);
+ spin_unlock_bh(&l->inputq->lock);
+
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
- skb_queue_splice_init(&l->wakeupq, l->inputq);
__skb_queue_purge(&l->backlogq);
l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
@@ -1021,7 +1040,8 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
/* Detect repeated retransmit failures on same packet */
if (r->last_retransm != buf_seqno(skb)) {
r->last_retransm = buf_seqno(skb);
- r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance);
+ r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
+ r->stale_cnt = 0;
} else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
link_retransmit_failure(l, skb);
if (link_is_bc_sndlink(l))
@@ -1380,6 +1400,36 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
__skb_queue_tail(xmitq, skb);
}
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
+ struct sk_buff_head *xmitq)
+{
+ u32 onode = tipc_own_addr(l->net);
+ struct tipc_msg *hdr, *ihdr;
+ struct sk_buff_head tnlq;
+ struct sk_buff *skb;
+ u32 dnode = l->addr;
+
+ skb_queue_head_init(&tnlq);
+ skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
+ INT_H_SIZE, BASIC_H_SIZE,
+ dnode, onode, 0, 0, 0);
+ if (!skb) {
+ pr_warn("%sunable to create tunnel packet\n", link_co_err);
+ return;
+ }
+
+ hdr = buf_msg(skb);
+ msg_set_msgcnt(hdr, 1);
+ msg_set_bearer_id(hdr, l->peer_bearer_id);
+
+ ihdr = (struct tipc_msg *)msg_data(hdr);
+ tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
+ BASIC_H_SIZE, dnode);
+ msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
+ __skb_queue_tail(&tnlq, skb);
+ tipc_link_xmit(l, &tnlq, xmitq);
+}
+
/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
* with contents of the link's transmit and backlog queues.
*/
@@ -1476,6 +1526,9 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
return false;
if (session != curr_session)
return false;
+ /* Extra sanity check */
+ if (!link_is_up(l) && msg_ack(hdr))
+ return false;
if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
return true;
/* Accept only STATE with new sequence number */
@@ -1533,9 +1586,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
strncpy(if_name, data, TIPC_MAX_IF_NAME);
/* Update own tolerance if peer indicates a non-zero value */
- if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
-
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
/* Update own priority if peer's priority is higher */
if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
l->priority = peers_prio;
@@ -1561,9 +1615,10 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
l->rcv_nxt_state = msg_seqno(hdr) + 1;
/* Update own tolerance if peer indicates a non-zero value */
- if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
+ if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
l->tolerance = peers_tol;
-
+ l->bc_rcvlink->tolerance = peers_tol;
+ }
/* Update own prio if peer indicates a different value */
if ((peers_prio != l->priority) &&
in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
@@ -2180,6 +2235,8 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
struct sk_buff_head *xmitq)
{
l->tolerance = tol;
+ if (l->bc_rcvlink)
+ l->bc_rcvlink->tolerance = tol;
if (link_is_up(l))
tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 7bc494a33fdf..90488c538a4e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -88,6 +88,8 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
struct tipc_link **link);
void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
int mtyp, struct sk_buff_head *xmitq);
+void tipc_link_create_dummy_tnl_msg(struct tipc_link *tnl,
+ struct sk_buff_head *xmitq);
void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq);
int tipc_link_fsm_evt(struct tipc_link *l, int evt);
bool tipc_link_is_up(struct tipc_link *l);
@@ -107,6 +109,7 @@ u16 tipc_link_rcv_nxt(struct tipc_link *l);
u16 tipc_link_acked(struct tipc_link *l);
u32 tipc_link_id(struct tipc_link *l);
char *tipc_link_name(struct tipc_link *l);
+u32 tipc_link_state(struct tipc_link *l);
char tipc_link_plane(struct tipc_link *l);
int tipc_link_prio(struct tipc_link *l);
int tipc_link_window(struct tipc_link *l);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 51b4b96f89db..3cfeb9df64b0 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -115,7 +115,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
struct sk_buff *buf;
struct distr_item *item;
- list_del(&publ->binding_node);
+ list_del_rcu(&publ->binding_node);
if (publ->scope == TIPC_NODE_SCOPE)
return NULL;
@@ -147,7 +147,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
ITEM_SIZE) * ITEM_SIZE;
u32 msg_rem = msg_dsz;
- list_for_each_entry(publ, pls, binding_node) {
+ list_for_each_entry_rcu(publ, pls, binding_node) {
/* Prepare next buffer: */
if (!skb) {
skb = named_prepare_buf(net, PUBLICATION, msg_rem,
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index a2f76743c73a..6376467e78f8 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -185,6 +185,10 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
return -ENOMEM;
buf->sk = msg->dst_sk;
+ if (__tipc_dump_start(&cb, msg->net)) {
+ kfree_skb(buf);
+ return -ENOMEM;
+ }
do {
int rem;
@@ -216,6 +220,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
err = 0;
err_out:
+ tipc_dump_done(&cb);
kfree_skb(buf);
if (err == -EMSGSIZE) {
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 68014f1b6976..2afc4f8c37a7 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -111,6 +111,7 @@ struct tipc_node {
int action_flags;
struct list_head list;
int state;
+ bool failover_sent;
u16 sync_point;
int link_cnt;
u16 working_links;
@@ -680,6 +681,7 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
*slot0 = bearer_id;
*slot1 = bearer_id;
tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
+ n->failover_sent = false;
n->action_flags |= TIPC_NOTIFY_NODE_UP;
tipc_link_set_active(nl, true);
tipc_bcast_add_peer(n->net, nl, xmitq);
@@ -911,6 +913,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
bool reset = true;
char *if_name;
unsigned long intv;
+ u16 session;
*dupl_addr = false;
*respond = false;
@@ -997,9 +1000,10 @@ void tipc_node_check_dest(struct net *net, u32 addr,
goto exit;
if_name = strchr(b->name, ':') + 1;
+ get_random_bytes(&session, sizeof(u16));
if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
b->net_plane, b->mtu, b->priority,
- b->window, mod(tipc_net(net)->random),
+ b->window, session,
tipc_own_addr(net), addr, peer_id,
n->capabilities,
tipc_bc_sndlink(n->net), n->bc_entry.link,
@@ -1615,6 +1619,14 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
tipc_link_inputq(l));
}
+ /* If parallel link was already down, and this happened before
+ * the tunnel link came up, FAILOVER was never sent. Ensure that
+ * FAILOVER is sent to get peer out of NODE_FAILINGOVER state.
+ */
+ if (n->state != NODE_FAILINGOVER && !n->failover_sent) {
+ tipc_link_create_dummy_tnl_msg(l, xmitq);
+ n->failover_sent = true;
+ }
/* If pkts arrive out of order, use lowest calculated syncpt */
if (less(syncpt, n->sync_point))
n->sync_point = syncpt;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ab7a2a7178f7..49810fdff4c5 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -576,6 +576,7 @@ static int tipc_release(struct socket *sock)
sk_stop_timer(sk, &sk->sk_timer);
tipc_sk_remove(tsk);
+ sock_orphan(sk);
/* Reject any messages that accumulated in backlog queue */
release_sock(sk);
tipc_dest_list_purge(&tsk->cong_links);
@@ -1195,6 +1196,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
* @skb: pointer to message buffer.
*/
static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *inputq,
struct sk_buff_head *xmitq)
{
struct tipc_msg *hdr = buf_msg(skb);
@@ -1212,7 +1214,16 @@ static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
tsk_peer_port(tsk));
sk->sk_state_change(sk);
- goto exit;
+
+ /* State change is ignored if socket already awake,
+ * - convert msg to abort msg and add to inqueue
+ */
+ msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
+ msg_set_type(hdr, TIPC_CONN_MSG);
+ msg_set_size(hdr, BASIC_H_SIZE);
+ msg_set_hdr_sz(hdr, BASIC_H_SIZE);
+ __skb_queue_tail(inputq, skb);
+ return;
}
tsk->probe_unacked = false;
@@ -1418,8 +1429,10 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
/* Handle implicit connection setup */
if (unlikely(dest)) {
rc = __tipc_sendmsg(sock, m, dlen);
- if (dlen && (dlen == rc))
+ if (dlen && dlen == rc) {
+ tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
+ }
return rc;
}
@@ -1933,7 +1946,7 @@ static void tipc_sk_proto_rcv(struct sock *sk,
switch (msg_user(hdr)) {
case CONN_MANAGER:
- tipc_sk_conn_proto_rcv(tsk, skb, xmitq);
+ tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
return;
case SOCK_WAKEUP:
tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
@@ -3229,7 +3242,7 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
struct netlink_callback *cb,
struct tipc_sock *tsk))
{
- struct rhashtable_iter *iter = (void *)cb->args[0];
+ struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_sock *tsk;
int err;
@@ -3265,8 +3278,14 @@ EXPORT_SYMBOL(tipc_nl_sk_walk);
int tipc_dump_start(struct netlink_callback *cb)
{
- struct rhashtable_iter *iter = (void *)cb->args[0];
- struct net *net = sock_net(cb->skb->sk);
+ return __tipc_dump_start(cb, sock_net(cb->skb->sk));
+}
+EXPORT_SYMBOL(tipc_dump_start);
+
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
+{
+ /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
+ struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_net *tn = tipc_net(net);
if (!iter) {
@@ -3274,17 +3293,16 @@ int tipc_dump_start(struct netlink_callback *cb)
if (!iter)
return -ENOMEM;
- cb->args[0] = (long)iter;
+ cb->args[4] = (long)iter;
}
rhashtable_walk_enter(&tn->sk_rht, iter);
return 0;
}
-EXPORT_SYMBOL(tipc_dump_start);
int tipc_dump_done(struct netlink_callback *cb)
{
- struct rhashtable_iter *hti = (void *)cb->args[0];
+ struct rhashtable_iter *hti = (void *)cb->args[4];
rhashtable_walk_exit(hti);
kfree(hti);
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index d43032e26532..5e575f205afe 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -69,5 +69,6 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
struct netlink_callback *cb,
struct tipc_sock *tsk));
int tipc_dump_start(struct netlink_callback *cb);
+int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
int tipc_dump_done(struct netlink_callback *cb);
#endif
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 292742e50bfa..961b07d4d41c 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -686,7 +686,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
goto free_marker_record;
}
- crypto_info = &ctx->crypto_send;
+ crypto_info = &ctx->crypto_send.info;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128:
nonce_size = TLS_CIPHER_AES_GCM_128_IV_SIZE;
@@ -780,7 +780,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx)
ctx->priv_ctx_tx = offload_ctx;
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_TX,
- &ctx->crypto_send,
+ &ctx->crypto_send.info,
tcp_sk(sk)->write_seq);
if (rc)
goto release_netdev;
@@ -862,7 +862,7 @@ int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx)
goto release_ctx;
rc = netdev->tlsdev_ops->tls_dev_add(netdev, sk, TLS_OFFLOAD_CTX_DIR_RX,
- &ctx->crypto_recv,
+ &ctx->crypto_recv.info,
tcp_sk(sk)->copied_seq);
if (rc) {
pr_err_ratelimited("%s: The netdev has refused to offload this socket\n",
diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
index 6102169239d1..450a6dbc5a88 100644
--- a/net/tls/tls_device_fallback.c
+++ b/net/tls/tls_device_fallback.c
@@ -320,7 +320,7 @@ static struct sk_buff *tls_enc_skb(struct tls_context *tls_ctx,
goto free_req;
iv = buf;
- memcpy(iv, tls_ctx->crypto_send_aes_gcm_128.salt,
+ memcpy(iv, tls_ctx->crypto_send.aes_gcm_128.salt,
TLS_CIPHER_AES_GCM_128_SALT_SIZE);
aad = buf + TLS_CIPHER_AES_GCM_128_SALT_SIZE +
TLS_CIPHER_AES_GCM_128_IV_SIZE;
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 180b6640e531..523622dc74f8 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -241,6 +241,16 @@ static void tls_write_space(struct sock *sk)
ctx->sk_write_space(sk);
}
+static void tls_ctx_free(struct tls_context *ctx)
+{
+ if (!ctx)
+ return;
+
+ memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
+ memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
+ kfree(ctx);
+}
+
static void tls_sk_proto_close(struct sock *sk, long timeout)
{
struct tls_context *ctx = tls_get_ctx(sk);
@@ -294,7 +304,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
#else
{
#endif
- kfree(ctx);
+ tls_ctx_free(ctx);
ctx = NULL;
}
@@ -305,7 +315,7 @@ skip_tx_cleanup:
* for sk->sk_prot->unhash [tls_hw_unhash]
*/
if (free_ctx)
- kfree(ctx);
+ tls_ctx_free(ctx);
}
static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
@@ -330,7 +340,7 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
}
/* get user crypto info */
- crypto_info = &ctx->crypto_send;
+ crypto_info = &ctx->crypto_send.info;
if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
rc = -EBUSY;
@@ -417,9 +427,9 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
}
if (tx)
- crypto_info = &ctx->crypto_send;
+ crypto_info = &ctx->crypto_send.info;
else
- crypto_info = &ctx->crypto_recv;
+ crypto_info = &ctx->crypto_recv.info;
/* Currently we don't support set crypto info more than one time */
if (TLS_CRYPTO_INFO_READY(crypto_info)) {
@@ -499,7 +509,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
goto out;
err_crypto_info:
- memset(crypto_info, 0, sizeof(*crypto_info));
+ memzero_explicit(crypto_info, sizeof(union tls_crypto_context));
out:
return rc;
}
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 52fbe727d7c1..b9c6ecfbcfea 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -125,6 +125,9 @@ static int alloc_encrypted_sg(struct sock *sk, int len)
&ctx->sg_encrypted_num_elem,
&ctx->sg_encrypted_size, 0);
+ if (rc == -ENOSPC)
+ ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
+
return rc;
}
@@ -138,6 +141,9 @@ static int alloc_plaintext_sg(struct sock *sk, int len)
&ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
tls_ctx->pending_open_record_frags);
+ if (rc == -ENOSPC)
+ ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
+
return rc;
}
@@ -925,7 +931,15 @@ int tls_sw_recvmsg(struct sock *sk,
if (control != TLS_RECORD_TYPE_DATA)
goto recv_end;
}
+ } else {
+ /* MSG_PEEK right now cannot look beyond current skb
+ * from strparser, meaning we cannot advance skb here
+ * and thus unpause strparser since we'd loose original
+ * one.
+ */
+ break;
}
+
/* If we have a new message from strparser, continue now. */
if (copied >= target && !ctx->recv_pkt)
break;
@@ -1049,8 +1063,8 @@ static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
goto read_failure;
}
- if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.version) ||
- header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.version)) {
+ if (header[1] != TLS_VERSION_MINOR(tls_ctx->crypto_recv.info.version) ||
+ header[2] != TLS_VERSION_MAJOR(tls_ctx->crypto_recv.info.version)) {
ret = -EINVAL;
goto read_failure;
}
@@ -1130,7 +1144,6 @@ void tls_sw_free_resources_rx(struct sock *sk)
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
{
- char keyval[TLS_CIPHER_AES_GCM_128_KEY_SIZE];
struct tls_crypto_info *crypto_info;
struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
struct tls_sw_context_tx *sw_ctx_tx = NULL;
@@ -1175,12 +1188,12 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
if (tx) {
crypto_init_wait(&sw_ctx_tx->async_wait);
- crypto_info = &ctx->crypto_send;
+ crypto_info = &ctx->crypto_send.info;
cctx = &ctx->tx;
aead = &sw_ctx_tx->aead_send;
} else {
crypto_init_wait(&sw_ctx_rx->async_wait);
- crypto_info = &ctx->crypto_recv;
+ crypto_info = &ctx->crypto_recv.info;
cctx = &ctx->rx;
aead = &sw_ctx_rx->aead_recv;
}
@@ -1259,9 +1272,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
ctx->push_pending_record = tls_sw_push_pending_record;
- memcpy(keyval, gcm_128_info->key, TLS_CIPHER_AES_GCM_128_KEY_SIZE);
-
- rc = crypto_aead_setkey(*aead, keyval,
+ rc = crypto_aead_setkey(*aead, gcm_128_info->key,
TLS_CIPHER_AES_GCM_128_KEY_SIZE);
if (rc)
goto free_aead;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4b8ec659e797..176edfefcbaa 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3756,6 +3756,7 @@ static bool ht_rateset_to_mask(struct ieee80211_supported_band *sband,
return false;
/* check availability */
+ ridx = array_index_nospec(ridx, IEEE80211_HT_MCS_MASK_LEN);
if (sband->ht_cap.mcs.rx_mask[ridx] & rbit)
mcs[ridx] |= rbit;
else
@@ -10230,7 +10231,7 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev = dev->ieee80211_ptr;
s32 last, low, high;
u32 hyst;
- int i, n;
+ int i, n, low_index;
int err;
/* RSSI reporting disabled? */
@@ -10267,10 +10268,19 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev,
if (last < wdev->cqm_config->rssi_thresholds[i])
break;
- low = i > 0 ?
- (wdev->cqm_config->rssi_thresholds[i - 1] - hyst) : S32_MIN;
- high = i < n ?
- (wdev->cqm_config->rssi_thresholds[i] + hyst - 1) : S32_MAX;
+ low_index = i - 1;
+ if (low_index >= 0) {
+ low_index = array_index_nospec(low_index, n);
+ low = wdev->cqm_config->rssi_thresholds[low_index] - hyst;
+ } else {
+ low = S32_MIN;
+ }
+ if (i < n) {
+ i = array_index_nospec(i, n);
+ high = wdev->cqm_config->rssi_thresholds[i] + hyst - 1;
+ } else {
+ high = S32_MAX;
+ }
return rdev_set_cqm_rssi_range_config(rdev, dev, low, high);
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2f702adf2912..24cfa2776f50 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2661,11 +2661,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
{
struct wiphy *wiphy = NULL;
enum reg_request_treatment treatment;
+ enum nl80211_reg_initiator initiator = reg_request->initiator;
if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
- switch (reg_request->initiator) {
+ switch (initiator) {
case NL80211_REGDOM_SET_BY_CORE:
treatment = reg_process_hint_core(reg_request);
break;
@@ -2683,7 +2684,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
treatment = reg_process_hint_country_ie(wiphy, reg_request);
break;
default:
- WARN(1, "invalid initiator %d\n", reg_request->initiator);
+ WARN(1, "invalid initiator %d\n", initiator);
goto out_free;
}
@@ -2698,7 +2699,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
*/
if (treatment == REG_REQ_ALREADY_SET && wiphy &&
wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
- wiphy_update_regulatory(wiphy, reg_request->initiator);
+ wiphy_update_regulatory(wiphy, initiator);
wiphy_all_share_dfs_chan_state(wiphy);
reg_check_channels();
}
@@ -2867,6 +2868,7 @@ static int regulatory_hint_core(const char *alpha2)
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_CORE;
+ request->wiphy_idx = WIPHY_IDX_INVALID;
queue_regulatory_request(request);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index d36c3eb7b931..d0e7472dd9fd 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -1058,13 +1058,23 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
return NULL;
}
+/*
+ * Update RX channel information based on the available frame payload
+ * information. This is mainly for the 2.4 GHz band where frames can be received
+ * from neighboring channels and the Beacon frames use the DSSS Parameter Set
+ * element to indicate the current (transmitting) channel, but this might also
+ * be needed on other bands if RX frequency does not match with the actual
+ * operating channel of a BSS.
+ */
static struct ieee80211_channel *
cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
- struct ieee80211_channel *channel)
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width)
{
const u8 *tmp;
u32 freq;
int channel_number = -1;
+ struct ieee80211_channel *alt_channel;
tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
if (tmp && tmp[1] == 1) {
@@ -1078,16 +1088,45 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
}
}
- if (channel_number < 0)
+ if (channel_number < 0) {
+ /* No channel information in frame payload */
return channel;
+ }
freq = ieee80211_channel_to_frequency(channel_number, channel->band);
- channel = ieee80211_get_channel(wiphy, freq);
- if (!channel)
- return NULL;
- if (channel->flags & IEEE80211_CHAN_DISABLED)
+ alt_channel = ieee80211_get_channel(wiphy, freq);
+ if (!alt_channel) {
+ if (channel->band == NL80211_BAND_2GHZ) {
+ /*
+ * Better not allow unexpected channels when that could
+ * be going beyond the 1-11 range (e.g., discovering
+ * BSS on channel 12 when radio is configured for
+ * channel 11.
+ */
+ return NULL;
+ }
+
+ /* No match for the payload channel number - ignore it */
+ return channel;
+ }
+
+ if (scan_width == NL80211_BSS_CHAN_WIDTH_10 ||
+ scan_width == NL80211_BSS_CHAN_WIDTH_5) {
+ /*
+ * Ignore channel number in 5 and 10 MHz channels where there
+ * may not be an n:1 or 1:n mapping between frequencies and
+ * channel numbers.
+ */
+ return channel;
+ }
+
+ /*
+ * Use the channel determined through the payload channel number
+ * instead of the RX channel reported by the driver.
+ */
+ if (alt_channel->flags & IEEE80211_CHAN_DISABLED)
return NULL;
- return channel;
+ return alt_channel;
}
/* Returned bss is reference counted and must be cleaned up appropriately. */
@@ -1112,7 +1151,8 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
(data->signal < 0 || data->signal > 100)))
return NULL;
- channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan);
+ channel = cfg80211_get_bss_channel(wiphy, ie, ielen, data->chan,
+ data->scan_width);
if (!channel)
return NULL;
@@ -1210,7 +1250,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
return NULL;
channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
- ielen, data->chan);
+ ielen, data->chan, data->scan_width);
if (!channel)
return NULL;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 167f7025ac98..06943d9c9835 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1278,12 +1278,16 @@ static int cfg80211_wext_giwrate(struct net_device *dev,
if (err)
return err;
- if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE)))
- return -EOPNOTSUPP;
+ if (!(sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_BITRATE))) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
- return 0;
+free:
+ cfg80211_sinfo_release_content(&sinfo);
+ return err;
}
/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
@@ -1293,7 +1297,7 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
/* we are under RTNL - globally locked - so can use static structs */
static struct iw_statistics wstats;
- static struct station_info sinfo;
+ static struct station_info sinfo = {};
u8 bssid[ETH_ALEN];
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
@@ -1352,6 +1356,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
if (sinfo.filled & BIT_ULL(NL80211_STA_INFO_TX_FAILED))
wstats.discard.retries = sinfo.tx_failed;
+ cfg80211_sinfo_release_content(&sinfo);
+
return &wstats;
}
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 4e937cd7c17d..661504042d30 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -744,6 +744,8 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
sk->sk_destruct = xsk_destruct;
sk_refcnt_debug_inc(sk);
+ sock_set_flag(sk, SOCK_RCU_FREE);
+
xs = xdp_sk(sk);
mutex_init(&xs->mutex);
spin_lock_init(&xs->tx_completion_lock);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index b89c9c7f8c5c..be3520e429c9 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -458,6 +458,7 @@ resume:
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
goto drop;
}
+ crypto_done = false;
} while (!err);
err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 31acc6f33d98..6f05e831a73e 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -116,6 +116,9 @@ static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
static void xfrmi_dev_free(struct net_device *dev)
{
+ struct xfrm_if *xi = netdev_priv(dev);
+
+ gro_cells_destroy(&xi->gro_cells);
free_percpu(dev->tstats);
}
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 45ba07ab3e4f..261995d37ced 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -100,6 +100,10 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
spin_unlock_bh(&x->lock);
skb_dst_force(skb);
+ if (!skb_dst(skb)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+ goto error_nolock;
+ }
if (xfrm_offload(skb)) {
x->type_offload->encap(x, skb);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 3110c3fbee20..119a427d9b2b 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -632,9 +632,9 @@ static void xfrm_hash_rebuild(struct work_struct *work)
break;
}
if (newpos)
- hlist_add_behind(&policy->bydst, newpos);
+ hlist_add_behind_rcu(&policy->bydst, newpos);
else
- hlist_add_head(&policy->bydst, chain);
+ hlist_add_head_rcu(&policy->bydst, chain);
}
spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
@@ -774,9 +774,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
break;
}
if (newpos)
- hlist_add_behind(&policy->bydst, newpos);
+ hlist_add_behind_rcu(&policy->bydst, newpos);
else
- hlist_add_head(&policy->bydst, chain);
+ hlist_add_head_rcu(&policy->bydst, chain);
__xfrm_policy_link(policy, dir);
/* After previous checking, family can either be AF_INET or AF_INET6 */
@@ -2491,6 +2491,10 @@ int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
}
skb_dst_force(skb);
+ if (!skb_dst(skb)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
+ return 0;
+ }
dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
if (IS_ERR(dst)) {
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 4791aa8b8185..df7ca2dabc48 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
err = -EINVAL;
switch (p->family) {
case AF_INET:
+ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+ goto out;
+
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
+ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+ goto out;
+
break;
#else
err = -EAFNOSUPPORT;
@@ -1396,10 +1402,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
switch (p->sel.family) {
case AF_INET:
+ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+ return -EINVAL;
+
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
+ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+ return -EINVAL;
+
break;
#else
return -EAFNOSUPPORT;
@@ -1480,6 +1492,9 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
(ut[i].family != prev_family))
return -EINVAL;
+ if (ut[i].mode >= XFRM_MODE_MAX)
+ return -EINVAL;
+
prev_family = ut[i].family;
switch (ut[i].family) {
diff --git a/samples/Kconfig b/samples/Kconfig
index bd133efc1a56..ad1ec7016d4c 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -1,5 +1,6 @@
menuconfig SAMPLES
bool "Sample kernel code"
+ depends on !UML
help
You can build and test sample kernel code here.
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 5a2d1c9578a0..54da4b070db3 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -219,7 +219,7 @@ else
sub_cmd_record_mcount = set -e ; perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
"$(if $(CONFIG_CPU_BIG_ENDIAN),big,little)" \
"$(if $(CONFIG_64BIT),64,32)" \
- "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CFLAGS)" \
+ "$(OBJDUMP)" "$(OBJCOPY)" "$(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS)" \
"$(LD) $(KBUILD_LDFLAGS)" "$(NM)" "$(RM)" "$(MV)" \
"$(if $(part-of-module),1,0)" "$(@)";
recordmcount_source := $(srctree)/scripts/recordmcount.pl
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 999d585eaa73..e083bcae343f 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -11,13 +11,14 @@ DEPMOD=$1
KERNELRELEASE=$2
if ! test -r System.map ; then
+ echo "Warning: modules_install: missing 'System.map' file. Skipping depmod." >&2
exit 0
fi
if [ -z $(command -v $DEPMOD) ]; then
- echo "'make modules_install' requires $DEPMOD. Please install it." >&2
+ echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
echo "This is probably in the kmod package." >&2
- exit 1
+ exit 0
fi
# older versions of depmod require the version string to start with three
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 4a7bd2192073..67ed9f6ccdf8 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -221,7 +221,6 @@ $(obj)/zconf.tab.o: $(obj)/zconf.lex.c
# check if necessary packages are available, and configure build flags
define filechk_conf_cfg
- $(CONFIG_SHELL) $(srctree)/scripts/kconfig/check-pkgconfig.sh; \
$(CONFIG_SHELL) $<
endef
diff --git a/scripts/kconfig/check-pkgconfig.sh b/scripts/kconfig/check-pkgconfig.sh
deleted file mode 100644
index 7a1c40bfb58c..000000000000
--- a/scripts/kconfig/check-pkgconfig.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# Check for pkg-config presence
-
-if [ -z $(command -v pkg-config) ]; then
- echo "'make *config' requires 'pkg-config'. Please install it." 1>&2
- exit 1
-fi
diff --git a/scripts/kconfig/gconf-cfg.sh b/scripts/kconfig/gconf-cfg.sh
index 533b3d8f8f08..480ecd8b9f41 100755
--- a/scripts/kconfig/gconf-cfg.sh
+++ b/scripts/kconfig/gconf-cfg.sh
@@ -3,6 +3,13 @@
PKG="gtk+-2.0 gmodule-2.0 libglade-2.0"
+if [ -z "$(command -v pkg-config)" ]; then
+ echo >&2 "*"
+ echo >&2 "* 'make gconfig' requires 'pkg-config'. Please install it."
+ echo >&2 "*"
+ exit 1
+fi
+
if ! pkg-config --exists $PKG; then
echo >&2 "*"
echo >&2 "* Unable to find the GTK+ installation. Please make sure that"
diff --git a/scripts/kconfig/mconf-cfg.sh b/scripts/kconfig/mconf-cfg.sh
index e6f9facd0077..c812872d7f9d 100755
--- a/scripts/kconfig/mconf-cfg.sh
+++ b/scripts/kconfig/mconf-cfg.sh
@@ -4,20 +4,23 @@
PKG="ncursesw"
PKG2="ncurses"
-if pkg-config --exists $PKG; then
- echo cflags=\"$(pkg-config --cflags $PKG)\"
- echo libs=\"$(pkg-config --libs $PKG)\"
- exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+ if pkg-config --exists $PKG; then
+ echo cflags=\"$(pkg-config --cflags $PKG)\"
+ echo libs=\"$(pkg-config --libs $PKG)\"
+ exit 0
+ fi
-if pkg-config --exists $PKG2; then
- echo cflags=\"$(pkg-config --cflags $PKG2)\"
- echo libs=\"$(pkg-config --libs $PKG2)\"
- exit 0
+ if pkg-config --exists $PKG2; then
+ echo cflags=\"$(pkg-config --cflags $PKG2)\"
+ echo libs=\"$(pkg-config --libs $PKG2)\"
+ exit 0
+ fi
fi
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
if [ -f /usr/include/ncursesw/ncurses.h ]; then
echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
echo libs=\"-lncursesw\"
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 83b5836615fb..143c05fec161 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -490,7 +490,6 @@ static void build_conf(struct menu *menu)
switch (prop->type) {
case P_MENU:
child_count++;
- prompt = prompt;
if (single_menu_mode) {
item_make("%s%*c%s",
menu->data ? "-->" : "++>",
diff --git a/scripts/kconfig/nconf-cfg.sh b/scripts/kconfig/nconf-cfg.sh
index 42f5ac73548e..001559ef0a60 100644
--- a/scripts/kconfig/nconf-cfg.sh
+++ b/scripts/kconfig/nconf-cfg.sh
@@ -4,20 +4,23 @@
PKG="ncursesw menuw panelw"
PKG2="ncurses menu panel"
-if pkg-config --exists $PKG; then
- echo cflags=\"$(pkg-config --cflags $PKG)\"
- echo libs=\"$(pkg-config --libs $PKG)\"
- exit 0
-fi
+if [ -n "$(command -v pkg-config)" ]; then
+ if pkg-config --exists $PKG; then
+ echo cflags=\"$(pkg-config --cflags $PKG)\"
+ echo libs=\"$(pkg-config --libs $PKG)\"
+ exit 0
+ fi
-if pkg-config --exists $PKG2; then
- echo cflags=\"$(pkg-config --cflags $PKG2)\"
- echo libs=\"$(pkg-config --libs $PKG2)\"
- exit 0
+ if pkg-config --exists $PKG2; then
+ echo cflags=\"$(pkg-config --cflags $PKG2)\"
+ echo libs=\"$(pkg-config --libs $PKG2)\"
+ exit 0
+ fi
fi
-# Unfortunately, some distributions (e.g. openSUSE) cannot find ncurses
-# by pkg-config.
+# Check the default paths in case pkg-config is not installed.
+# (Even if it is installed, some distributions such as openSUSE cannot
+# find ncurses by pkg-config.)
if [ -f /usr/include/ncursesw/ncurses.h ]; then
echo cflags=\"-D_GNU_SOURCE -I/usr/include/ncursesw\"
echo libs=\"-lncursesw -lmenuw -lpanelw\"
diff --git a/scripts/kconfig/qconf-cfg.sh b/scripts/kconfig/qconf-cfg.sh
index 0862e1562536..02ccc0ae1031 100755
--- a/scripts/kconfig/qconf-cfg.sh
+++ b/scripts/kconfig/qconf-cfg.sh
@@ -4,6 +4,13 @@
PKG="Qt5Core Qt5Gui Qt5Widgets"
PKG2="QtCore QtGui"
+if [ -z "$(command -v pkg-config)" ]; then
+ echo >&2 "*"
+ echo >&2 "* 'make xconfig' requires 'pkg-config'. Please install it."
+ echo >&2 "*"
+ exit 1
+fi
+
if pkg-config --exists $PKG; then
echo cflags=\"-std=c++11 -fPIC $(pkg-config --cflags Qt5Core Qt5Gui Qt5Widgets)\"
echo libs=\"$(pkg-config --libs $PKG)\"
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 71f39410691b..79f7dd57d571 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -74,7 +74,7 @@ scm_version()
fi
# Check for uncommitted changes
- if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
+ if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
printf '%s' -dirty
fi
diff --git a/scripts/subarch.include b/scripts/subarch.include
new file mode 100644
index 000000000000..650682821126
--- /dev/null
+++ b/scripts/subarch.include
@@ -0,0 +1,13 @@
+# SUBARCH tells the usermode build what the underlying arch is. That is set
+# first, and if a usermode build is happening, the "ARCH=um" on the command
+# line overrides the setting of ARCH below. If a native build is happening,
+# then ARCH is assigned, getting whatever value it gets normally, and
+# SUBARCH is subsequently ignored.
+
+SUBARCH := $(shell uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
+ -e s/sun4u/sparc64/ \
+ -e s/arm.*/arm/ -e s/sa110/arm/ \
+ -e s/s390x/s390/ -e s/parisc64/parisc/ \
+ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
+ -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
+ -e s/riscv.*/riscv/)
diff --git a/security/Kconfig b/security/Kconfig
index 27d8b2688f75..d9aa521b5206 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -57,7 +57,7 @@ config SECURITY_NETWORK
config PAGE_TABLE_ISOLATION
bool "Remove the kernel mapping in user mode"
default y
- depends on X86 && !UML
+ depends on (X86_64 || X86_PAE) && !UML
help
This feature reduces the number of hardware side channels by
ensuring that the majority of kernel addresses are not mapped
diff --git a/security/keys/dh.c b/security/keys/dh.c
index 3b602a1e27fa..711e89d8c415 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -300,7 +300,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
}
dh_inputs.g_size = dlen;
- dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
+ dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
if (dlen < 0) {
ret = dlen;
goto out2;
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 730ea91d9be8..93676354f87f 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -263,6 +263,8 @@ do_registration(struct work_struct *work)
error:
mutex_unlock(&devices_mutex);
snd_bebob_stream_destroy_duplex(bebob);
+ kfree(bebob->maudio_special_quirk);
+ bebob->maudio_special_quirk = NULL;
snd_card_free(bebob->card);
dev_info(&bebob->unit->device,
"Sound card registration failed: %d\n", err);
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index bd55620c6a47..c266997ad299 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
struct fw_device *device = fw_parent_device(unit);
int err, rcode;
u64 date;
- __le32 cues[3] = {
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
- };
+ __le32 *cues;
/* check date of software used to build */
err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
&date, sizeof(u64));
if (err < 0)
- goto end;
+ return err;
/*
* firmware version 5058 or later has date later than "20070401", but
* 'date' is not null-terminated.
@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
if (date < 0x3230303730343031LL) {
dev_err(&unit->device,
"Use firmware version 5058 or later\n");
- err = -ENOSYS;
- goto end;
+ return -ENXIO;
}
+ cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+ if (!cues)
+ return -ENOMEM;
+
+ cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+ cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+ cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
device->node_id, device->generation,
device->max_speed, BEBOB_ADDR_REG_REQ,
- cues, sizeof(cues));
+ cues, 3 * sizeof(*cues));
+ kfree(cues);
if (rcode != RCODE_COMPLETE) {
dev_err(&unit->device,
"Failed to send a cue to load firmware\n");
err = -EIO;
}
-end:
+
return err;
}
@@ -290,10 +294,6 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814)
bebob->midi_output_ports = 2;
}
end:
- if (err < 0) {
- kfree(params);
- bebob->maudio_special_quirk = NULL;
- }
mutex_unlock(&bebob->mutex);
return err;
}
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index 1f5e1d23f31a..ef689997d6a5 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -49,6 +49,7 @@ static void dg00x_free(struct snd_dg00x *dg00x)
fw_unit_put(dg00x->unit);
mutex_destroy(&dg00x->mutex);
+ kfree(dg00x);
}
static void dg00x_card_free(struct snd_card *card)
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
index ad7a0a32557d..64c3cb0fb926 100644
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
@@ -146,6 +146,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
{
__le32 *reg;
int i;
+ int err;
reg = kcalloc(18, sizeof(__le32), GFP_KERNEL);
if (reg == NULL)
@@ -163,9 +164,11 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
reg[i] = cpu_to_le32(0x00000001);
}
- return snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
- FF400_FETCH_PCM_FRAMES, reg,
- sizeof(__le32) * 18, 0);
+ err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
+ FF400_FETCH_PCM_FRAMES, reg,
+ sizeof(__le32) * 18, 0);
+ kfree(reg);
+ return err;
}
static void ff400_dump_sync_status(struct snd_ff *ff,
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 71a0613d3da0..f2d073365cf6 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -301,6 +301,8 @@ error:
snd_efw_transaction_remove_instance(efw);
snd_efw_stream_destroy_duplex(efw);
snd_card_free(efw->card);
+ kfree(efw->resp_buf);
+ efw->resp_buf = NULL;
dev_info(&efw->unit->device,
"Sound card registration failed: %d\n", err);
}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 1e5b2c802635..2ea8be6c8584 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -130,6 +130,7 @@ static void oxfw_free(struct snd_oxfw *oxfw)
kfree(oxfw->spec);
mutex_destroy(&oxfw->mutex);
+ kfree(oxfw);
}
/*
@@ -207,6 +208,7 @@ static int detect_quirks(struct snd_oxfw *oxfw)
static void do_registration(struct work_struct *work)
{
struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
+ int i;
int err;
if (oxfw->registered)
@@ -269,7 +271,15 @@ error:
snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
if (oxfw->has_output)
snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
+ kfree(oxfw->tx_stream_formats[i]);
+ oxfw->tx_stream_formats[i] = NULL;
+ kfree(oxfw->rx_stream_formats[i]);
+ oxfw->rx_stream_formats[i] = NULL;
+ }
snd_card_free(oxfw->card);
+ kfree(oxfw->spec);
+ oxfw->spec = NULL;
dev_info(&oxfw->unit->device,
"Sound card registration failed: %d\n", err);
}
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index 44ad41fb7374..d3fdc463a884 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -93,6 +93,7 @@ static void tscm_free(struct snd_tscm *tscm)
fw_unit_put(tscm->unit);
mutex_destroy(&tscm->mutex);
+ kfree(tscm);
}
static void tscm_card_free(struct snd_card *card)
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 560ec0986e1a..74244d8e2909 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
*/
void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
{
+ WARN_ON_ONCE(!bus->rb.area);
+
spin_lock_irq(&bus->reg_lock);
/* CORB set up */
bus->corb.addr = bus->rb.addr;
@@ -383,7 +385,7 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus)
EXPORT_SYMBOL_GPL(snd_hdac_bus_exit_link_reset);
/* reset codec link */
-static int azx_reset(struct hdac_bus *bus, bool full_reset)
+int snd_hdac_bus_reset_link(struct hdac_bus *bus, bool full_reset)
{
if (!full_reset)
goto skip_reset;
@@ -408,7 +410,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
skip_reset:
/* check to see if controller is ready */
if (!snd_hdac_chip_readb(bus, GCTL)) {
- dev_dbg(bus->dev, "azx_reset: controller not ready!\n");
+ dev_dbg(bus->dev, "controller not ready!\n");
return -EBUSY;
}
@@ -423,6 +425,7 @@ static int azx_reset(struct hdac_bus *bus, bool full_reset)
return 0;
}
+EXPORT_SYMBOL_GPL(snd_hdac_bus_reset_link);
/* enable interrupts */
static void azx_int_enable(struct hdac_bus *bus)
@@ -477,15 +480,17 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
return false;
/* reset controller */
- azx_reset(bus, full_reset);
+ snd_hdac_bus_reset_link(bus, full_reset);
- /* initialize interrupts */
+ /* clear interrupts */
azx_int_clear(bus);
- azx_int_enable(bus);
/* initialize the codec command I/O */
snd_hdac_bus_init_cmd_io(bus);
+ /* enable interrupts after CORB/RIRB buffers are initialized above */
+ azx_int_enable(bus);
+
/* program the position buffer */
if (bus->use_posbuf && bus->posbuf.addr) {
snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index b5282cbbe489..617ff1aa818f 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -145,9 +145,11 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
if (!acomp->ops) {
request_module("i915");
/* 10s timeout */
- wait_for_completion_timeout(&bind_complete, 10 * 1000);
+ wait_for_completion_timeout(&bind_complete,
+ msecs_to_jiffies(10 * 1000));
}
if (!acomp->ops) {
+ dev_info(bus->dev, "couldn't bind with audio component\n");
snd_hdac_acomp_exit(bus);
return -ENODEV;
}
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 90713741c2dc..6ebe817801ea 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2540,7 +2540,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
emu->support_tlv = 1;
return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
case SNDRV_EMU10K1_IOCTL_INFO:
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
snd_emu10k1_fx8010_info(emu, info);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1b2ce304152a..aa4c672dbaf7 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -365,8 +365,10 @@ enum {
*/
#ifdef SUPPORT_VGA_SWITCHEROO
#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo)
+#define needs_eld_notify_link(chip) ((chip)->need_eld_notify_link)
#else
#define use_vga_switcheroo(chip) 0
+#define needs_eld_notify_link(chip) false
#endif
#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
@@ -453,6 +455,7 @@ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
#endif
static int azx_acquire_irq(struct azx *chip, int do_disconnect);
+static void set_default_power_save(struct azx *chip);
/*
* initialize the PCI registers
@@ -1201,6 +1204,10 @@ static int azx_runtime_idle(struct device *dev)
azx_bus(chip)->codec_powered || !chip->running)
return -EBUSY;
+ /* ELD notification gets broken when HD-audio bus is off */
+ if (needs_eld_notify_link(hda))
+ return -EBUSY;
+
return 0;
}
@@ -1298,6 +1305,36 @@ static bool azx_vs_can_switch(struct pci_dev *pci)
return true;
}
+/*
+ * The discrete GPU cannot power down unless the HDA controller runtime
+ * suspends, so activate runtime PM on codecs even if power_save == 0.
+ */
+static void setup_vga_switcheroo_runtime_pm(struct azx *chip)
+{
+ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+ struct hda_codec *codec;
+
+ if (hda->use_vga_switcheroo && !hda->need_eld_notify_link) {
+ list_for_each_codec(codec, &chip->bus)
+ codec->auto_runtime_pm = 1;
+ /* reset the power save setup */
+ if (chip->running)
+ set_default_power_save(chip);
+ }
+}
+
+static void azx_vs_gpu_bound(struct pci_dev *pci,
+ enum vga_switcheroo_client_id client_id)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct azx *chip = card->private_data;
+ struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+
+ if (client_id == VGA_SWITCHEROO_DIS)
+ hda->need_eld_notify_link = 0;
+ setup_vga_switcheroo_runtime_pm(chip);
+}
+
static void init_vga_switcheroo(struct azx *chip)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -1306,6 +1343,7 @@ static void init_vga_switcheroo(struct azx *chip)
dev_info(chip->card->dev,
"Handle vga_switcheroo audio client\n");
hda->use_vga_switcheroo = 1;
+ hda->need_eld_notify_link = 1; /* cleared in gpu_bound op */
chip->driver_caps |= AZX_DCAPS_PM_RUNTIME;
pci_dev_put(p);
}
@@ -1314,6 +1352,7 @@ static void init_vga_switcheroo(struct azx *chip)
static const struct vga_switcheroo_client_ops azx_vs_ops = {
.set_gpu_state = azx_vs_set_state,
.can_switch = azx_vs_can_switch,
+ .gpu_bound = azx_vs_gpu_bound,
};
static int register_vga_switcheroo(struct azx *chip)
@@ -1339,6 +1378,7 @@ static int register_vga_switcheroo(struct azx *chip)
#define init_vga_switcheroo(chip) /* NOP */
#define register_vga_switcheroo(chip) 0
#define check_hdmi_disabled(pci) false
+#define setup_vga_switcheroo_runtime_pm(chip) /* NOP */
#endif /* SUPPORT_VGA_SWITCHER */
/*
@@ -1352,6 +1392,7 @@ static int azx_free(struct azx *chip)
if (azx_has_pm_runtime(chip) && chip->running)
pm_runtime_get_noresume(&pci->dev);
+ chip->running = 0;
azx_del_card_list(chip);
@@ -2230,6 +2271,25 @@ static struct snd_pci_quirk power_save_blacklist[] = {
};
#endif /* CONFIG_PM */
+static void set_default_power_save(struct azx *chip)
+{
+ int val = power_save;
+
+#ifdef CONFIG_PM
+ if (pm_blacklist) {
+ const struct snd_pci_quirk *q;
+
+ q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
+ if (q && val) {
+ dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
+ q->subvendor, q->subdevice);
+ val = 0;
+ }
+ }
+#endif /* CONFIG_PM */
+ snd_hda_set_power_save(&chip->bus, val * 1000);
+}
+
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
[AZX_DRIVER_NVIDIA] = 8,
@@ -2241,9 +2301,7 @@ static int azx_probe_continue(struct azx *chip)
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci;
- struct hda_codec *codec;
int dev = chip->dev_index;
- int val;
int err;
hda->probe_continued = 1;
@@ -2322,31 +2380,13 @@ static int azx_probe_continue(struct azx *chip)
if (err < 0)
goto out_free;
+ setup_vga_switcheroo_runtime_pm(chip);
+
chip->running = 1;
azx_add_card_list(chip);
- val = power_save;
-#ifdef CONFIG_PM
- if (pm_blacklist) {
- const struct snd_pci_quirk *q;
-
- q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
- if (q && val) {
- dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
- q->subvendor, q->subdevice);
- val = 0;
- }
- }
-#endif /* CONFIG_PM */
- /*
- * The discrete GPU cannot power down unless the HDA controller runtime
- * suspends, so activate runtime PM on codecs even if power_save == 0.
- */
- if (use_vga_switcheroo(hda))
- list_for_each_codec(codec, &chip->bus)
- codec->auto_runtime_pm = 1;
+ set_default_power_save(chip);
- snd_hda_set_power_save(&chip->bus, val * 1000);
if (azx_has_pm_runtime(chip))
pm_runtime_put_autosuspend(&pci->dev);
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
index e3a3d318d2e5..f59719e06b91 100644
--- a/sound/pci/hda/hda_intel.h
+++ b/sound/pci/hda/hda_intel.h
@@ -37,6 +37,7 @@ struct hda_intel {
/* vga_switcheroo setup */
unsigned int use_vga_switcheroo:1;
+ unsigned int need_eld_notify_link:1;
unsigned int vga_switcheroo_registered:1;
unsigned int init_failed:1; /* delayed init failed */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 1d117f00d04d..3ac7ba9b342d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6409,6 +6409,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index e359938e3d7e..77b265bd0505 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/sizes.h>
#include <linux/pm_runtime.h>
@@ -184,6 +185,24 @@ static void config_dma_descriptor_in_sram(void __iomem *acp_mmio,
acp_reg_write(descr_info->xfer_val, acp_mmio, mmACP_SRBM_Targ_Idx_Data);
}
+static void pre_config_reset(void __iomem *acp_mmio, u16 ch_num)
+{
+ u32 dma_ctrl;
+ int ret;
+
+ /* clear the reset bit */
+ dma_ctrl = acp_reg_read(acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
+ dma_ctrl &= ~ACP_DMA_CNTL_0__DMAChRst_MASK;
+ acp_reg_write(dma_ctrl, acp_mmio, mmACP_DMA_CNTL_0 + ch_num);
+ /* check the reset bit before programming configuration registers */
+ ret = readl_poll_timeout(acp_mmio + ((mmACP_DMA_CNTL_0 + ch_num) * 4),
+ dma_ctrl,
+ !(dma_ctrl & ACP_DMA_CNTL_0__DMAChRst_MASK),
+ 100, ACP_DMA_RESET_TIME);
+ if (ret < 0)
+ pr_err("Failed to clear reset of channel : %d\n", ch_num);
+}
+
/*
* Initialize the DMA descriptor information for transfer between
* system memory <-> ACP SRAM
@@ -236,6 +255,7 @@ static void set_acp_sysmem_dma_descriptors(void __iomem *acp_mmio,
config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
&dmadscr[i]);
}
+ pre_config_reset(acp_mmio, ch);
config_acp_dma_channel(acp_mmio, ch,
dma_dscr_idx - 1,
NUM_DSCRS_PER_CHANNEL,
@@ -275,6 +295,7 @@ static void set_acp_to_i2s_dma_descriptors(void __iomem *acp_mmio, u32 size,
config_dma_descriptor_in_sram(acp_mmio, dma_dscr_idx,
&dmadscr[i]);
}
+ pre_config_reset(acp_mmio, ch);
/* Configure the DMA channel with the above descriptore */
config_acp_dma_channel(acp_mmio, ch, dma_dscr_idx - 1,
NUM_DSCRS_PER_CHANNEL,
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 275677de669f..407554175282 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
3, 1, 0),
SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
- SOC_SINGLE("MMTLR Data Switch", 0,
- 1, 1, 0),
+ SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
+ 0, 1, 0),
SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
};
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index 92b7125ea169..1093f766d0d2 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -520,6 +520,7 @@ static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
+ case MAX98373_R203E_AMP_PATH_GAIN:
case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
@@ -729,6 +730,7 @@ static int max98373_probe(struct snd_soc_component *component)
/* Software Reset */
regmap_write(max98373->regmap,
MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+ usleep_range(10000, 11000);
/* IV default slot configuration */
regmap_write(max98373->regmap,
@@ -817,6 +819,7 @@ static int max98373_resume(struct device *dev)
regmap_write(max98373->regmap,
MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+ usleep_range(10000, 11000);
regcache_cache_only(max98373->regmap, false);
regcache_sync(max98373->regmap);
return 0;
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index dca82dd6e3bf..32fe76c3134a 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -64,8 +64,8 @@ static const struct reg_sequence rt5514_patch[] = {
{RT5514_ANA_CTRL_LDO10, 0x00028604},
{RT5514_ANA_CTRL_ADCFED, 0x00000800},
{RT5514_ASRC_IN_CTRL1, 0x00000003},
- {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
- {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
+ {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
+ {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
};
static const struct reg_default rt5514_reg[] = {
@@ -92,10 +92,10 @@ static const struct reg_default rt5514_reg[] = {
{RT5514_ASRC_IN_CTRL1, 0x00000003},
{RT5514_DOWNFILTER0_CTRL1, 0x00020c2f},
{RT5514_DOWNFILTER0_CTRL2, 0x00020c2f},
- {RT5514_DOWNFILTER0_CTRL3, 0x10000352},
+ {RT5514_DOWNFILTER0_CTRL3, 0x10000342},
{RT5514_DOWNFILTER1_CTRL1, 0x00020c2f},
{RT5514_DOWNFILTER1_CTRL2, 0x00020c2f},
- {RT5514_DOWNFILTER1_CTRL3, 0x10000352},
+ {RT5514_DOWNFILTER1_CTRL3, 0x10000342},
{RT5514_ANA_CTRL_LDO10, 0x00028604},
{RT5514_ANA_CTRL_LDO18_16, 0x02000345},
{RT5514_ANA_CTRL_ADC12, 0x0000a2a8},
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 640d400ca013..afe7d5b19313 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -750,8 +750,8 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg)
}
static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0);
-static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
-static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
/* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
@@ -1114,7 +1114,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
/* DAC Digital Volume */
SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL,
- RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 175, 0, dac_vol_tlv),
+ RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv),
/* IN Boost Volume */
SOC_SINGLE_TLV("CBJ Boost Volume", RT5682_CBJ_BST_CTRL,
@@ -1124,7 +1124,7 @@ static const struct snd_kcontrol_new rt5682_snd_controls[] = {
SOC_DOUBLE("STO1 ADC Capture Switch", RT5682_STO1_ADC_DIG_VOL,
RT5682_L_MUTE_SFT, RT5682_R_MUTE_SFT, 1, 1),
SOC_DOUBLE_TLV("STO1 ADC Capture Volume", RT5682_STO1_ADC_DIG_VOL,
- RT5682_L_VOL_SFT, RT5682_R_VOL_SFT, 127, 0, adc_vol_tlv),
+ RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 63, 0, adc_vol_tlv),
/* ADC Boost Volume Control */
SOC_DOUBLE_TLV("STO1 ADC Boost Gain Volume", RT5682_STO1_ADC_BOOST,
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index d53680ac78e4..6df158669420 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
struct sigmadsp_control *ctrl, void *data)
{
/* safeload loads up to 20 bytes in a atomic operation */
- if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
- sigmadsp->ops->safeload)
+ if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
ctrl->num_bytes);
else
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
index 14999b999fd3..0d6145549a98 100644
--- a/sound/soc/codecs/tas6424.c
+++ b/sound/soc/codecs/tas6424.c
@@ -424,8 +424,10 @@ static void tas6424_fault_check_work(struct work_struct *work)
TAS6424_FAULT_PVDD_UV |
TAS6424_FAULT_VBAT_UV;
- if (reg)
+ if (!reg) {
+ tas6424->last_fault1 = reg;
goto check_global_fault2_reg;
+ }
/*
* Only flag errors once for a given occurrence. This is needed as
@@ -461,8 +463,10 @@ check_global_fault2_reg:
TAS6424_FAULT_OTSD_CH3 |
TAS6424_FAULT_OTSD_CH4;
- if (!reg)
+ if (!reg) {
+ tas6424->last_fault2 = reg;
goto check_warn_reg;
+ }
if ((reg & TAS6424_FAULT_OTSD) && !(tas6424->last_fault2 & TAS6424_FAULT_OTSD))
dev_crit(dev, "experienced a global overtemp shutdown\n");
@@ -497,8 +501,10 @@ check_warn_reg:
TAS6424_WARN_VDD_OTW_CH3 |
TAS6424_WARN_VDD_OTW_CH4;
- if (!reg)
+ if (!reg) {
+ tas6424->last_warn = reg;
goto out;
+ }
if ((reg & TAS6424_WARN_VDD_UV) && !(tas6424->last_warn & TAS6424_WARN_VDD_UV))
dev_warn(dev, "experienced a VDD under voltage condition\n");
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
index f27464c2c5ba..79541960f45d 100644
--- a/sound/soc/codecs/wm8804-i2c.c
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/acpi.h>
#include "wm8804.h"
@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
+#if defined(CONFIG_OF)
static const struct of_device_id wm8804_of_match[] = {
{ .compatible = "wlf,wm8804", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8804_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id wm8804_acpi_match[] = {
+ { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
+ { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
+#endif
static struct i2c_driver wm8804_i2c_driver = {
.driver = {
.name = "wm8804",
.pm = &wm8804_pm,
- .of_match_table = wm8804_of_match,
+ .of_match_table = of_match_ptr(wm8804_of_match),
+ .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
},
.probe = wm8804_i2c_probe,
.remove = wm8804_i2c_remove,
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index 953d94d50586..ade34c26ad2f 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -719,7 +719,7 @@ static int wm9712_probe(struct platform_device *pdev)
static struct platform_driver wm9712_component_driver = {
.driver = {
- .name = "wm9712-component",
+ .name = "wm9712-codec",
},
.probe = wm9712_probe,
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index d32844f94d74..b6dc524830b2 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -575,6 +575,17 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_MONO_SPEAKER |
BYT_RT5640_MCLK_EN),
},
+ { /* Linx Linx7 tablet */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LINX"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LINX7"),
+ },
+ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
+ BYT_RT5640_MONO_SPEAKER |
+ BYT_RT5640_JD_NOT_INV |
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* MSI S100 tablet */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Micro-Star International Co., Ltd."),
@@ -602,6 +613,21 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
BYT_RT5640_SSP0_AIF1 |
BYT_RT5640_MCLK_EN),
},
+ { /* Onda v975w */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ /* The above are too generic, also match BIOS info */
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "5.6.5"),
+ DMI_EXACT_MATCH(DMI_BIOS_DATE, "07/25/2014"),
+ },
+ .driver_data = (void *)(BYT_RT5640_IN1_MAP |
+ BYT_RT5640_JD_SRC_JD2_IN4N |
+ BYT_RT5640_OVCD_TH_2000UA |
+ BYT_RT5640_OVCD_SF_0P75 |
+ BYT_RT5640_DIFF_MIC |
+ BYT_RT5640_MCLK_EN),
+ },
{ /* Pipo W4 */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index dce649485649..1d17be0f78a0 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -834,7 +834,7 @@ static int skl_first_init(struct hdac_bus *bus)
return -ENXIO;
}
- skl_init_chip(bus, true);
+ snd_hdac_bus_reset_link(bus, true);
snd_hdac_bus_parse_capabilities(bus);
diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
index dc94c5c53788..c6b51571be94 100644
--- a/sound/soc/qcom/qdsp6/q6routing.c
+++ b/sound/soc/qcom/qdsp6/q6routing.c
@@ -960,8 +960,10 @@ static int msm_routing_probe(struct snd_soc_component *c)
{
int i;
- for (i = 0; i < MAX_SESSIONS; i++)
+ for (i = 0; i < MAX_SESSIONS; i++) {
routing_data->sessions[i].port_id = -1;
+ routing_data->sessions[i].fedai_id = -1;
+ }
return 0;
}
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 3a3064dda57f..051f96405346 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -462,6 +462,11 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
goto rsnd_adg_get_clkout_end;
req_size = prop->length / sizeof(u32);
+ if (req_size > REQ_SIZE) {
+ dev_err(dev,
+ "too many clock-frequency, use top %d\n", REQ_SIZE);
+ req_size = REQ_SIZE;
+ }
of_property_read_u32_array(np, "clock-frequency", req_rate, req_size);
req_48kHz_rate = 0;
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index f8425d8b44d2..d23c2bbff0cf 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -478,7 +478,7 @@ static int rsnd_status_update(u32 *status,
(func_call && (mod)->ops->fn) ? #fn : ""); \
if (func_call && (mod)->ops->fn) \
tmp = (mod)->ops->fn(mod, io, param); \
- if (tmp) \
+ if (tmp && (tmp != -EPROBE_DEFER)) \
dev_err(dev, "%s[%d] : %s error %d\n", \
rsnd_mod_name(mod), rsnd_mod_id(mod), \
#fn, tmp); \
@@ -958,12 +958,23 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
rsnd_dai_stream_quit(io);
}
+static int rsnd_soc_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct rsnd_priv *priv = rsnd_dai_to_priv(dai);
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+ return rsnd_dai_call(prepare, io, priv);
+}
+
static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
.startup = rsnd_soc_dai_startup,
.shutdown = rsnd_soc_dai_shutdown,
.trigger = rsnd_soc_dai_trigger,
.set_fmt = rsnd_soc_dai_set_fmt,
.set_tdm_slot = rsnd_soc_set_dai_tdm_slot,
+ .prepare = rsnd_soc_dai_prepare,
};
void rsnd_parse_connect_common(struct rsnd_dai *rdai,
@@ -1550,6 +1561,14 @@ exit_snd_probe:
rsnd_dai_call(remove, &rdai->capture, priv);
}
+ /*
+ * adg is very special mod which can't use rsnd_dai_call(remove),
+ * and it registers ADG clock on probe.
+ * It should be unregister if probe failed.
+ * Mainly it is assuming -EPROBE_DEFER case
+ */
+ rsnd_adg_remove(priv);
+
return ret;
}
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index fe63ef8600d0..d65ea7bc4dac 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -241,6 +241,10 @@ static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
/* try to get DMAEngine channel */
chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
if (IS_ERR_OR_NULL(chan)) {
+ /* Let's follow when -EPROBE_DEFER case */
+ if (PTR_ERR(chan) == -EPROBE_DEFER)
+ return PTR_ERR(chan);
+
/*
* DMA failed. try to PIO mode
* see
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 96d93330b1e1..8f7a0abfa751 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -280,6 +280,9 @@ struct rsnd_mod_ops {
int (*nolock_stop)(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv);
+ int (*prepare)(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv);
};
struct rsnd_dai_stream;
@@ -309,6 +312,7 @@ struct rsnd_mod {
* H 0: fallback
* H 0: hw_params
* H 0: pointer
+ * H 0: prepare
*/
#define __rsnd_mod_shift_nolock_start 0
#define __rsnd_mod_shift_nolock_stop 0
@@ -323,6 +327,7 @@ struct rsnd_mod {
#define __rsnd_mod_shift_fallback 28 /* always called */
#define __rsnd_mod_shift_hw_params 28 /* always called */
#define __rsnd_mod_shift_pointer 28 /* always called */
+#define __rsnd_mod_shift_prepare 28 /* always called */
#define __rsnd_mod_add_probe 0
#define __rsnd_mod_add_remove 0
@@ -337,6 +342,7 @@ struct rsnd_mod {
#define __rsnd_mod_add_fallback 0
#define __rsnd_mod_add_hw_params 0
#define __rsnd_mod_add_pointer 0
+#define __rsnd_mod_add_prepare 0
#define __rsnd_mod_call_probe 0
#define __rsnd_mod_call_remove 0
@@ -351,6 +357,7 @@ struct rsnd_mod {
#define __rsnd_mod_call_pointer 0
#define __rsnd_mod_call_nolock_start 0
#define __rsnd_mod_call_nolock_stop 1
+#define __rsnd_mod_call_prepare 0
#define rsnd_mod_to_priv(mod) ((mod)->priv)
#define rsnd_mod_name(mod) ((mod)->ops->name)
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 8304e4ec9242..3f880ec66459 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -283,7 +283,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
if (rsnd_ssi_is_multi_slave(mod, io))
return 0;
- if (ssi->usrcnt > 1) {
+ if (ssi->rate) {
if (ssi->rate != rate) {
dev_err(dev, "SSI parent/child should use same rate\n");
return -EINVAL;
@@ -434,7 +434,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
struct rsnd_priv *priv)
{
struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- int ret;
if (!rsnd_ssi_is_run_mods(mod, io))
return 0;
@@ -443,10 +442,6 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
rsnd_mod_power_on(mod);
- ret = rsnd_ssi_master_clk_start(mod, io);
- if (ret < 0)
- return ret;
-
rsnd_ssi_config_init(mod, io);
rsnd_ssi_register_setup(mod);
@@ -852,6 +847,13 @@ static int rsnd_ssi_pio_pointer(struct rsnd_mod *mod,
return 0;
}
+static int rsnd_ssi_prepare(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
+{
+ return rsnd_ssi_master_clk_start(mod, io);
+}
+
static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.name = SSI_NAME,
.probe = rsnd_ssi_common_probe,
@@ -864,6 +866,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
.pointer = rsnd_ssi_pio_pointer,
.pcm_new = rsnd_ssi_pcm_new,
.hw_params = rsnd_ssi_hw_params,
+ .prepare = rsnd_ssi_prepare,
};
static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
@@ -940,6 +943,7 @@ static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
.pcm_new = rsnd_ssi_pcm_new,
.fallback = rsnd_ssi_fallback,
.hw_params = rsnd_ssi_hw_params,
+ .prepare = rsnd_ssi_prepare,
};
int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 9cfe10d8040c..473eefe8658e 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1447,7 +1447,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
sink = codec_dai->playback_widget;
source = cpu_dai->capture_widget;
if (sink && source) {
- ret = snd_soc_dapm_new_pcm(card, dai_link->params,
+ ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
dai_link->num_params,
source, sink);
if (ret != 0) {
@@ -1460,7 +1460,7 @@ static int soc_link_dai_widgets(struct snd_soc_card *card,
sink = cpu_dai->playback_widget;
source = codec_dai->capture_widget;
if (sink && source) {
- ret = snd_soc_dapm_new_pcm(card, dai_link->params,
+ ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
dai_link->num_params,
source, sink);
if (ret != 0) {
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7e96793050c9..461d951917c0 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3652,6 +3652,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
{
struct snd_soc_dapm_path *source_p, *sink_p;
struct snd_soc_dai *source, *sink;
+ struct snd_soc_pcm_runtime *rtd = w->priv;
const struct snd_soc_pcm_stream *config = w->params + w->params_select;
struct snd_pcm_substream substream;
struct snd_pcm_hw_params *params = NULL;
@@ -3711,6 +3712,7 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
goto out;
}
substream.runtime = runtime;
+ substream.private_data = rtd;
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
@@ -3895,6 +3897,7 @@ outfree_w_param:
}
int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
+ struct snd_soc_pcm_runtime *rtd,
const struct snd_soc_pcm_stream *params,
unsigned int num_params,
struct snd_soc_dapm_widget *source,
@@ -3963,6 +3966,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
w->params = params;
w->num_params = num_params;
+ w->priv = rtd;
ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL);
if (ret)
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 16e006f708ca..4602464ebdfb 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -27,6 +27,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -125,6 +126,18 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot {
};
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+ struct {
+ __u8 serror_pending;
+ __u8 serror_has_esr;
+ /* Align it to 8 bytes */
+ __u8 pad[6];
+ __u64 serror_esr;
+ } exception;
+ __u32 reserved[12];
+};
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 4e76630dd655..97c3478ee6e7 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -39,6 +39,7 @@
#define __KVM_HAVE_GUEST_DEBUG
#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_READONLY_MEM
+#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
@@ -154,6 +155,18 @@ struct kvm_sync_regs {
struct kvm_arch_memory_slot {
};
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+ struct {
+ __u8 serror_pending;
+ __u8 serror_has_esr;
+ /* Align it to 8 bytes */
+ __u8 pad[6];
+ __u64 serror_esr;
+ } exception;
+ __u32 reserved[12];
+};
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 4cdaa55fabfe..9a50f02b9894 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -4,7 +4,7 @@
/*
* KVM s390 specific structures and definitions
*
- * Copyright IBM Corp. 2008
+ * Copyright IBM Corp. 2008, 2018
*
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
@@ -225,6 +225,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_FPRS (1UL << 8)
#define KVM_SYNC_GSCB (1UL << 9)
#define KVM_SYNC_BPBC (1UL << 10)
+#define KVM_SYNC_ETOKEN (1UL << 11)
/* length and alignment of the sdnx as a power of two */
#define SDNXC 8
#define SDNXL (1UL << SDNXC)
@@ -258,6 +259,8 @@ struct kvm_sync_regs {
struct {
__u64 reserved1[2];
__u64 gscb[4];
+ __u64 etoken;
+ __u64 etoken_extension;
};
};
};
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index c535c2fdea13..fd23d5778ea1 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -377,5 +377,43 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
+
+#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
+#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
+
+#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
+#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
+
+struct kvm_vmx_nested_state {
+ __u64 vmxon_pa;
+ __u64 vmcs_pa;
+
+ struct {
+ __u16 flags;
+ } smm;
+};
+
+/* for KVM_CAP_NESTED_STATE */
+struct kvm_nested_state {
+ /* KVM_STATE_* flags */
+ __u16 flags;
+
+ /* 0 for VMX, 1 for SVM. */
+ __u16 format;
+
+ /* 128 for SVM, 128 + VMCS size for VMX. */
+ __u32 size;
+
+ union {
+ /* VMXON, VMCS */
+ struct kvm_vmx_nested_state vmx;
+
+ /* Pad the header to 128 bytes. */
+ __u8 pad[120];
+ };
+
+ __u8 data[0];
+};
#endif /* _ASM_X86_KVM_H */
diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c
index d78aed86af09..8ff8cb1a11f4 100644
--- a/tools/hv/hv_fcopy_daemon.c
+++ b/tools/hv/hv_fcopy_daemon.c
@@ -234,6 +234,7 @@ int main(int argc, char *argv[])
break;
default:
+ error = HV_E_FAIL;
syslog(LOG_ERR, "Unknown operation: %d",
buffer.hdr.operation);
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index dbf6e8bd98ba..bbb2a8ef367c 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -286,7 +286,7 @@ static int kvp_key_delete(int pool, const __u8 *key, int key_size)
* Found a match; just move the remaining
* entries up.
*/
- if (i == num_records) {
+ if (i == (num_records - 1)) {
kvp_file_info[pool].num_records--;
kvp_update_file(pool);
return 0;
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
index 6b0c36a58fcb..e56997288f2b 100644
--- a/tools/include/linux/lockdep.h
+++ b/tools/include/linux/lockdep.h
@@ -30,9 +30,12 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
int pid;
+ int state;
char comm[17];
};
+#define TASK_RUNNING 0
+
extern struct task_struct *__curr(void);
#define current (__curr())
diff --git a/tools/include/linux/nmi.h b/tools/include/linux/nmi.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/tools/include/linux/nmi.h
diff --git a/tools/include/tools/libc_compat.h b/tools/include/tools/libc_compat.h
index 664ced8cb1b0..e907ba6f15e5 100644
--- a/tools/include/tools/libc_compat.h
+++ b/tools/include/tools/libc_compat.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+// SPDX-License-Identifier: (LGPL-2.0+ OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
#ifndef __TOOLS_LIBC_COMPAT_H
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 42990676a55e..df4bedb9b01c 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free)
__SYSCALL(__NR_statx, sys_statx)
#define __NR_io_pgetevents 292
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+#define __NR_rseq 293
+__SYSCALL(__NR_rseq, sys_rseq)
#undef __NR_syscalls
-#define __NR_syscalls 293
+#define __NR_syscalls 294
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 9c660e1688ab..300f336633f2 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -687,6 +687,15 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. Depends on client
+ * also supporting DRM_CLIENT_CAP_ATOMIC
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index cf01b6824244..43391e2d1153 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -164,6 +164,8 @@ enum {
IFLA_CARRIER_UP_COUNT,
IFLA_CARRIER_DOWN_COUNT,
IFLA_NEW_IFINDEX,
+ IFLA_MIN_MTU,
+ IFLA_MAX_MTU,
__IFLA_MAX
};
@@ -334,6 +336,7 @@ enum {
IFLA_BRPORT_GROUP_FWD_MASK,
IFLA_BRPORT_NEIGH_SUPPRESS,
IFLA_BRPORT_ISOLATED,
+ IFLA_BRPORT_BACKUP_PORT,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -459,6 +462,16 @@ enum {
#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
+/* XFRM section */
+enum {
+ IFLA_XFRM_UNSPEC,
+ IFLA_XFRM_LINK,
+ IFLA_XFRM_IF_ID,
+ __IFLA_XFRM_MAX
+};
+
+#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1)
+
enum macsec_validation_type {
MACSEC_VALIDATE_DISABLED = 0,
MACSEC_VALIDATE_CHECK = 1,
@@ -920,6 +933,7 @@ enum {
XDP_ATTACHED_DRV,
XDP_ATTACHED_SKB,
XDP_ATTACHED_HW,
+ XDP_ATTACHED_MULTI,
};
enum {
@@ -928,6 +942,9 @@ enum {
IFLA_XDP_ATTACHED,
IFLA_XDP_FLAGS,
IFLA_XDP_PROG_ID,
+ IFLA_XDP_DRV_PROG_ID,
+ IFLA_XDP_SKB_PROG_ID,
+ IFLA_XDP_HW_PROG_ID,
__IFLA_XDP_MAX,
};
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index b6270a3b38e9..251be353f950 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -949,6 +949,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
#define KVM_CAP_HYPERV_TLBFLUSH 155
+#define KVM_CAP_S390_HPAGE_1M 156
+#define KVM_CAP_NESTED_STATE 157
+#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
+#define KVM_CAP_MSR_PLATFORM_INFO 159
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1391,6 +1395,9 @@ struct kvm_enc_region {
/* Available with KVM_CAP_HYPERV_EVENTFD */
#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd)
+/* Available with KVM_CAP_NESTED_STATE */
+#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
+#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index eeb787b1c53c..f35eb72739c0 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -144,7 +144,7 @@ enum perf_event_sample_format {
PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
- __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
+ __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
};
/*
diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h
index c51f8e5cc608..84c3de89696a 100644
--- a/tools/include/uapi/linux/vhost.h
+++ b/tools/include/uapi/linux/vhost.h
@@ -65,6 +65,7 @@ struct vhost_iotlb_msg {
};
#define VHOST_IOTLB_MSG 0x1
+#define VHOST_IOTLB_MSG_V2 0x2
struct vhost_msg {
int type;
@@ -74,6 +75,15 @@ struct vhost_msg {
};
};
+struct vhost_msg_v2 {
+ __u32 type;
+ __u32 reserved;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ __u8 padding[64];
+ };
+};
+
struct vhost_memory_region {
__u64 guest_phys_addr;
__u64 memory_size; /* bytes */
@@ -160,6 +170,14 @@ struct vhost_memory {
#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
struct vhost_vring_state)
+/* Set or get vhost backend capability */
+
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+
+#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
+
/* VHOST_NET specific defines */
/* Attach virtio net ring to a raw socket, or tap device.
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index 56c4b3f8a01b..195ba486640f 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -759,12 +759,18 @@ class DebugfsProvider(Provider):
if len(vms) == 0:
self.do_read = False
- self.paths = filter(lambda x: "{}-".format(pid) in x, vms)
+ self.paths = list(filter(lambda x: "{}-".format(pid) in x, vms))
else:
self.paths = []
self.do_read = True
- self.reset()
+
+ def _verify_paths(self):
+ """Remove invalid paths"""
+ for path in self.paths:
+ if not os.path.exists(os.path.join(PATH_DEBUGFS_KVM, path)):
+ self.paths.remove(path)
+ continue
def read(self, reset=0, by_guest=0):
"""Returns a dict with format:'file name / field -> current value'.
@@ -780,6 +786,7 @@ class DebugfsProvider(Provider):
# If no debugfs filtering support is available, then don't read.
if not self.do_read:
return results
+ self._verify_paths()
paths = self.paths
if self._pid == 0:
@@ -1096,15 +1103,16 @@ class Tui(object):
pid = self.stats.pid_filter
self.screen.erase()
gname = self.get_gname_from_pid(pid)
+ self._gname = gname
if gname:
gname = ('({})'.format(gname[:MAX_GUEST_NAME_LEN] + '...'
if len(gname) > MAX_GUEST_NAME_LEN
else gname))
if pid > 0:
- self.screen.addstr(0, 0, 'kvm statistics - pid {0} {1}'
- .format(pid, gname), curses.A_BOLD)
+ self._headline = 'kvm statistics - pid {0} {1}'.format(pid, gname)
else:
- self.screen.addstr(0, 0, 'kvm statistics - summary', curses.A_BOLD)
+ self._headline = 'kvm statistics - summary'
+ self.screen.addstr(0, 0, self._headline, curses.A_BOLD)
if self.stats.fields_filter:
regex = self.stats.fields_filter
if len(regex) > MAX_REGEX_LEN:
@@ -1162,6 +1170,19 @@ class Tui(object):
return sorted_items
+ if not self._is_running_guest(self.stats.pid_filter):
+ if self._gname:
+ try: # ...to identify the guest by name in case it's back
+ pids = self.get_pid_from_gname(self._gname)
+ if len(pids) == 1:
+ self._refresh_header(pids[0])
+ self._update_pid(pids[0])
+ return
+ except:
+ pass
+ self._display_guest_dead()
+ # leave final data on screen
+ return
row = 3
self.screen.move(row, 0)
self.screen.clrtobot()
@@ -1184,6 +1205,7 @@ class Tui(object):
# print events
tavg = 0
tcur = 0
+ guest_removed = False
for key, values in get_sorted_events(self, stats):
if row >= self.screen.getmaxyx()[0] - 1 or values == (0, 0):
break
@@ -1191,7 +1213,10 @@ class Tui(object):
key = self.get_gname_from_pid(key)
if not key:
continue
- cur = int(round(values.delta / sleeptime)) if values.delta else ''
+ cur = int(round(values.delta / sleeptime)) if values.delta else 0
+ if cur < 0:
+ guest_removed = True
+ continue
if key[0] != ' ':
if values.delta:
tcur += values.delta
@@ -1204,13 +1229,21 @@ class Tui(object):
values.value * 100 / float(ltotal), cur))
row += 1
if row == 3:
- self.screen.addstr(4, 1, 'No matching events reported yet')
+ if guest_removed:
+ self.screen.addstr(4, 1, 'Guest removed, updating...')
+ else:
+ self.screen.addstr(4, 1, 'No matching events reported yet')
if row > 4:
tavg = int(round(tcur / sleeptime)) if tcur > 0 else ''
self.screen.addstr(row, 1, '%-40s %10d %8s' %
('Total', total, tavg), curses.A_BOLD)
self.screen.refresh()
+ def _display_guest_dead(self):
+ marker = ' Guest is DEAD '
+ y = min(len(self._headline), 80 - len(marker))
+ self.screen.addstr(0, y, marker, curses.A_BLINK | curses.A_STANDOUT)
+
def _show_msg(self, text):
"""Display message centered text and exit on key press"""
hint = 'Press any key to continue'
@@ -1219,10 +1252,10 @@ class Tui(object):
(x, term_width) = self.screen.getmaxyx()
row = 2
for line in text:
- start = (term_width - len(line)) / 2
+ start = (term_width - len(line)) // 2
self.screen.addstr(row, start, line)
row += 1
- self.screen.addstr(row + 1, (term_width - len(hint)) / 2, hint,
+ self.screen.addstr(row + 1, (term_width - len(hint)) // 2, hint,
curses.A_STANDOUT)
self.screen.getkey()
@@ -1292,7 +1325,7 @@ class Tui(object):
msg = ''
while True:
self.screen.erase()
- self.screen.addstr(0, 0, 'Set update interval (defaults to %fs).' %
+ self.screen.addstr(0, 0, 'Set update interval (defaults to %.1fs).' %
DELAY_DEFAULT, curses.A_BOLD)
self.screen.addstr(4, 0, msg)
self.screen.addstr(2, 0, 'Change delay from %.1fs to ' %
@@ -1319,6 +1352,12 @@ class Tui(object):
msg = '"' + str(val) + '": Invalid value'
self._refresh_header()
+ def _is_running_guest(self, pid):
+ """Check if pid is still a running process."""
+ if not pid:
+ return True
+ return os.path.isdir(os.path.join('/proc/', str(pid)))
+
def _show_vm_selection_by_guest(self):
"""Draws guest selection mask.
@@ -1346,7 +1385,7 @@ class Tui(object):
if not guest or guest == '0':
break
if guest.isdigit():
- if not os.path.isdir(os.path.join('/proc/', guest)):
+ if not self._is_running_guest(guest):
msg = '"' + guest + '": Not a running process'
continue
pid = int(guest)
diff --git a/tools/lib/api/fs/tracing_path.c b/tools/lib/api/fs/tracing_path.c
index 120037496f77..5afb11b30fca 100644
--- a/tools/lib/api/fs/tracing_path.c
+++ b/tools/lib/api/fs/tracing_path.c
@@ -36,7 +36,7 @@ static const char *tracing_path_tracefs_mount(void)
__tracing_path_set("", mnt);
- return mnt;
+ return tracing_path;
}
static const char *tracing_path_debugfs_mount(void)
@@ -49,7 +49,7 @@ static const char *tracing_path_debugfs_mount(void)
__tracing_path_set("tracing/", mnt);
- return mnt;
+ return tracing_path;
}
const char *tracing_path_mount(void)
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index 13a861135127..6eb9bacd1948 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1 @@
-libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o
+libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 2abd0f112627..bdb94939fd60 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -50,6 +50,7 @@
#include "libbpf.h"
#include "bpf.h"
#include "btf.h"
+#include "str_error.h"
#ifndef EM_BPF
#define EM_BPF 247
@@ -469,7 +470,7 @@ static int bpf_object__elf_init(struct bpf_object *obj)
obj->efile.fd = open(obj->path, O_RDONLY);
if (obj->efile.fd < 0) {
char errmsg[STRERR_BUFSIZE];
- char *cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ char *cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("failed to open %s: %s\n", obj->path, cp);
return -errno;
@@ -810,8 +811,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
data->d_size, name, idx);
if (err) {
char errmsg[STRERR_BUFSIZE];
- char *cp = strerror_r(-err, errmsg,
- sizeof(errmsg));
+ char *cp = str_error(-err, errmsg, sizeof(errmsg));
pr_warning("failed to alloc program %s (%s): %s",
name, obj->path, cp);
@@ -1140,7 +1140,7 @@ bpf_object__create_maps(struct bpf_object *obj)
*pfd = bpf_create_map_xattr(&create_attr);
if (*pfd < 0 && create_attr.btf_key_type_id) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
map->name, cp, errno);
create_attr.btf_fd = 0;
@@ -1155,7 +1155,7 @@ bpf_object__create_maps(struct bpf_object *obj)
size_t j;
err = *pfd;
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("failed to create map (name: '%s'): %s\n",
map->name, cp);
for (j = 0; j < i; j++)
@@ -1339,7 +1339,7 @@ load_program(enum bpf_prog_type type, enum bpf_attach_type expected_attach_type,
}
ret = -LIBBPF_ERRNO__LOAD;
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("load bpf program failed: %s\n", cp);
if (log_buf && log_buf[0] != '\0') {
@@ -1654,7 +1654,7 @@ static int check_path(const char *path)
dir = dirname(dname);
if (statfs(dir, &st_fs)) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("failed to statfs %s: %s\n", dir, cp);
err = -errno;
}
@@ -1690,7 +1690,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
}
if (bpf_obj_pin(prog->instances.fds[instance], path)) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("failed to pin program: %s\n", cp);
return -errno;
}
@@ -1708,7 +1708,7 @@ static int make_dir(const char *path)
err = -errno;
if (err) {
- cp = strerror_r(-err, errmsg, sizeof(errmsg));
+ cp = str_error(-err, errmsg, sizeof(errmsg));
pr_warning("failed to mkdir %s: %s\n", path, cp);
}
return err;
@@ -1770,7 +1770,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
}
if (bpf_obj_pin(map->fd, path)) {
- cp = strerror_r(errno, errmsg, sizeof(errmsg));
+ cp = str_error(errno, errmsg, sizeof(errmsg));
pr_warning("failed to pin map: %s\n", cp);
return -errno;
}
diff --git a/tools/lib/bpf/str_error.c b/tools/lib/bpf/str_error.c
new file mode 100644
index 000000000000..b8798114a357
--- /dev/null
+++ b/tools/lib/bpf/str_error.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: LGPL-2.1
+#undef _GNU_SOURCE
+#include <string.h>
+#include <stdio.h>
+#include "str_error.h"
+
+/*
+ * Wrapper to allow for building in non-GNU systems such as Alpine Linux's musl
+ * libc, while checking strerror_r() return to avoid having to check this in
+ * all places calling it.
+ */
+char *str_error(int err, char *dst, int len)
+{
+ int ret = strerror_r(err, dst, len);
+ if (ret)
+ snprintf(dst, len, "ERROR: strerror_r(%d)=%d", err, ret);
+ return dst;
+}
diff --git a/tools/lib/bpf/str_error.h b/tools/lib/bpf/str_error.h
new file mode 100644
index 000000000000..355b1db571d1
--- /dev/null
+++ b/tools/lib/bpf/str_error.h
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: LGPL-2.1
+#ifndef BPF_STR_ERROR
+#define BPF_STR_ERROR
+
+char *str_error(int err, char *dst, int len);
+#endif // BPF_STR_ERROR
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index 42261a9b280e..ac841bc5c35b 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -280,7 +280,7 @@ $(MAN_HTML): $(OUTPUT)%.html : %.txt
mv $@+ $@
ifdef USE_ASCIIDOCTOR
-$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.txt
+$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : %.txt
$(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
$(ASCIIDOC) -b manpage -d manpage \
$(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index f6d1a03c7523..e30d20fb482d 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -833,7 +833,7 @@ ifndef NO_JVMTI
JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | awk '{print $$3}')
else
ifneq (,$(wildcard /usr/sbin/alternatives))
- JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
+ JDIR=$(shell /usr/sbin/alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
endif
endif
ifndef JDIR
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index b3d1b12a5081..0be411695379 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -635,7 +635,7 @@ $(LIBPERF_IN): prepare FORCE
$(LIB_FILE): $(LIBPERF_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBPERF_IN) $(LIB_OBJS)
-LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
+LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
$(LIBTRACEEVENT): FORCE
$(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
@@ -777,14 +777,12 @@ endif
$(call QUIET_INSTALL, libexec) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
ifndef NO_LIBBPF
- $(call QUIET_INSTALL, lib) \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
- $(call QUIET_INSTALL, include/bpf) \
- $(INSTALL) include/bpf/*.h '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
- $(call QUIET_INSTALL, lib) \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
- $(call QUIET_INSTALL, examples/bpf) \
- $(INSTALL) examples/bpf/*.c '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
+ $(call QUIET_INSTALL, bpf-headers) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
+ $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'
+ $(call QUIET_INSTALL, bpf-examples) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
+ $(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
endif
$(call QUIET_INSTALL, perf-archive) \
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile
index f013b115dc86..dbef716a1913 100644
--- a/tools/perf/arch/arm64/Makefile
+++ b/tools/perf/arch/arm64/Makefile
@@ -11,7 +11,8 @@ PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
out := $(OUTPUT)arch/arm64/include/generated/asm
header := $(out)/syscalls.c
-sysdef := $(srctree)/tools/include/uapi/asm-generic/unistd.h
+incpath := $(srctree)/tools
+sysdef := $(srctree)/tools/arch/arm64/include/uapi/asm/unistd.h
sysprf := $(srctree)/tools/perf/arch/arm64/entry/syscalls/
systbl := $(sysprf)/mksyscalltbl
@@ -19,7 +20,7 @@ systbl := $(sysprf)/mksyscalltbl
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
$(header): $(sysdef) $(systbl)
- $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(sysdef) > $@
+ $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@
clean::
$(call QUIET_CLEAN, arm64) $(RM) $(header)
diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
index 52e197317d3e..2dbb8cade048 100755
--- a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
+++ b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
@@ -11,7 +11,8 @@
gcc=$1
hostcc=$2
-input=$3
+incpath=$3
+input=$4
if ! test -r $input; then
echo "Could not read input file" >&2
@@ -28,7 +29,6 @@ create_table_from_c()
cat <<-_EoHEADER
#include <stdio.h>
- #define __ARCH_WANT_RENAMEAT
#include "$input"
int main(int argc, char *argv[])
{
@@ -42,7 +42,7 @@ create_table_from_c()
printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);"
printf "}\n"
- } | $hostcc -o $create_table_exe -x c -
+ } | $hostcc -I $incpath/include/uapi -o $create_table_exe -x c -
$create_table_exe
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index 20e7d74d86cd..10a44e946f77 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -22,15 +22,16 @@ bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
#endif
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
int arch__choose_best_symbol(struct symbol *syma,
struct symbol *symb __maybe_unused)
{
char *sym = syma->name;
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
/* Skip over any initial dot */
if (*sym == '.')
sym++;
+#endif
/* Avoid "SyS" kernel syscall aliases */
if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
@@ -41,6 +42,7 @@ int arch__choose_best_symbol(struct symbol *syma,
return SYMBOL_A;
}
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
/* Allow matching against dot variants */
int arch__compare_symbol_names(const char *namea, const char *nameb)
{
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index c1bd979b957b..613709cfbbd0 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -9,6 +9,7 @@ struct test;
int test__rdpmc(struct test *test __maybe_unused, int subtest);
int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest);
int test__insn_x86(struct test *test __maybe_unused, int subtest);
+int test__bp_modify(struct test *test, int subtest);
#ifdef HAVE_DWARF_UNWIND_SUPPORT
struct thread;
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index 8e2c5a38c3b9..586849ff83a0 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -5,3 +5,4 @@ libperf-y += arch-tests.o
libperf-y += rdpmc.o
libperf-y += perf-time-to-tsc.o
libperf-$(CONFIG_AUXTRACE) += insn-x86.o
+libperf-$(CONFIG_X86_64) += bp-modify.o
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
index cc1802ff5410..d47d3f8e3c8e 100644
--- a/tools/perf/arch/x86/tests/arch-tests.c
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -24,6 +24,12 @@ struct test arch_tests[] = {
.func = test__insn_x86,
},
#endif
+#if defined(__x86_64__)
+ {
+ .desc = "x86 bp modify",
+ .func = test__bp_modify,
+ },
+#endif
{
.func = NULL,
},
diff --git a/tools/perf/arch/x86/tests/bp-modify.c b/tools/perf/arch/x86/tests/bp-modify.c
new file mode 100644
index 000000000000..f53e4406709f
--- /dev/null
+++ b/tools/perf/arch/x86/tests/bp-modify.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/user.h>
+#include <syscall.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/ptrace.h>
+#include <asm/ptrace.h>
+#include <errno.h>
+#include "debug.h"
+#include "tests/tests.h"
+#include "arch-tests.h"
+
+static noinline int bp_1(void)
+{
+ pr_debug("in %s\n", __func__);
+ return 0;
+}
+
+static noinline int bp_2(void)
+{
+ pr_debug("in %s\n", __func__);
+ return 0;
+}
+
+static int spawn_child(void)
+{
+ int child = fork();
+
+ if (child == 0) {
+ /*
+ * The child sets itself for as tracee and
+ * waits in signal for parent to trace it,
+ * then it calls bp_1 and quits.
+ */
+ int err = ptrace(PTRACE_TRACEME, 0, NULL, NULL);
+
+ if (err) {
+ pr_debug("failed to PTRACE_TRACEME\n");
+ exit(1);
+ }
+
+ raise(SIGCONT);
+ bp_1();
+ exit(0);
+ }
+
+ return child;
+}
+
+/*
+ * This tests creates HW breakpoint, tries to
+ * change it and checks it was properly changed.
+ */
+static int bp_modify1(void)
+{
+ pid_t child;
+ int status;
+ unsigned long rip = 0, dr7 = 1;
+
+ child = spawn_child();
+
+ waitpid(child, &status, 0);
+ if (WIFEXITED(status)) {
+ pr_debug("tracee exited prematurely 1\n");
+ return TEST_FAIL;
+ }
+
+ /*
+ * The parent does following steps:
+ * - creates a new breakpoint (id 0) for bp_2 function
+ * - changes that breakponit to bp_1 function
+ * - waits for the breakpoint to hit and checks
+ * it has proper rip of bp_1 function
+ * - detaches the child
+ */
+ if (ptrace(PTRACE_POKEUSER, child,
+ offsetof(struct user, u_debugreg[0]), bp_2)) {
+ pr_debug("failed to set breakpoint, 1st time: %s\n",
+ strerror(errno));
+ goto out;
+ }
+
+ if (ptrace(PTRACE_POKEUSER, child,
+ offsetof(struct user, u_debugreg[0]), bp_1)) {
+ pr_debug("failed to set breakpoint, 2nd time: %s\n",
+ strerror(errno));
+ goto out;
+ }
+
+ if (ptrace(PTRACE_POKEUSER, child,
+ offsetof(struct user, u_debugreg[7]), dr7)) {
+ pr_debug("failed to set dr7: %s\n", strerror(errno));
+ goto out;
+ }
+
+ if (ptrace(PTRACE_CONT, child, NULL, NULL)) {
+ pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno));
+ goto out;
+ }
+
+ waitpid(child, &status, 0);
+ if (WIFEXITED(status)) {
+ pr_debug("tracee exited prematurely 2\n");
+ return TEST_FAIL;
+ }
+
+ rip = ptrace(PTRACE_PEEKUSER, child,
+ offsetof(struct user_regs_struct, rip), NULL);
+ if (rip == (unsigned long) -1) {
+ pr_debug("failed to PTRACE_PEEKUSER: %s\n",
+ strerror(errno));
+ goto out;
+ }
+
+ pr_debug("rip %lx, bp_1 %p\n", rip, bp_1);
+
+out:
+ if (ptrace(PTRACE_DETACH, child, NULL, NULL)) {
+ pr_debug("failed to PTRACE_DETACH: %s", strerror(errno));
+ return TEST_FAIL;
+ }
+
+ return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
+}
+
+/*
+ * This tests creates HW breakpoint, tries to
+ * change it to bogus value and checks the original
+ * breakpoint is hit.
+ */
+static int bp_modify2(void)
+{
+ pid_t child;
+ int status;
+ unsigned long rip = 0, dr7 = 1;
+
+ child = spawn_child();
+
+ waitpid(child, &status, 0);
+ if (WIFEXITED(status)) {
+ pr_debug("tracee exited prematurely 1\n");
+ return TEST_FAIL;
+ }
+
+ /*
+ * The parent does following steps:
+ * - creates a new breakpoint (id 0) for bp_1 function
+ * - tries to change that breakpoint to (-1) address
+ * - waits for the breakpoint to hit and checks
+ * it has proper rip of bp_1 function
+ * - detaches the child
+ */
+ if (ptrace(PTRACE_POKEUSER, child,
+ offsetof(struct user, u_debugreg[0]), bp_1)) {
+ pr_debug("failed to set breakpoint: %s\n",
+ strerror(errno));
+ goto out;
+ }
+
+ if (ptrace(PTRACE_POKEUSER, child,
+ offsetof(struct user, u_debugreg[7]), dr7)) {
+ pr_debug("failed to set dr7: %s\n", strerror(errno));
+ goto out;
+ }
+
+ if (!ptrace(PTRACE_POKEUSER, child,
+ offsetof(struct user, u_debugreg[0]), (unsigned long) (-1))) {
+ pr_debug("failed, breakpoint set to bogus address\n");
+ goto out;
+ }
+
+ if (ptrace(PTRACE_CONT, child, NULL, NULL)) {
+ pr_debug("failed to PTRACE_CONT: %s\n", strerror(errno));
+ goto out;
+ }
+
+ waitpid(child, &status, 0);
+ if (WIFEXITED(status)) {
+ pr_debug("tracee exited prematurely 2\n");
+ return TEST_FAIL;
+ }
+
+ rip = ptrace(PTRACE_PEEKUSER, child,
+ offsetof(struct user_regs_struct, rip), NULL);
+ if (rip == (unsigned long) -1) {
+ pr_debug("failed to PTRACE_PEEKUSER: %s\n",
+ strerror(errno));
+ goto out;
+ }
+
+ pr_debug("rip %lx, bp_1 %p\n", rip, bp_1);
+
+out:
+ if (ptrace(PTRACE_DETACH, child, NULL, NULL)) {
+ pr_debug("failed to PTRACE_DETACH: %s", strerror(errno));
+ return TEST_FAIL;
+ }
+
+ return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
+}
+
+int test__bp_modify(struct test *test __maybe_unused,
+ int subtest __maybe_unused)
+{
+ TEST_ASSERT_VAL("modify test 1 failed\n", !bp_modify1());
+ TEST_ASSERT_VAL("modify test 2 failed\n", !bp_modify2());
+
+ return 0;
+}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 76e12bcd1765..b2188e623e22 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -981,6 +981,7 @@ int cmd_report(int argc, const char **argv)
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_event__process_auxtrace_info,
.auxtrace = perf_event__process_auxtrace,
+ .event_update = perf_event__process_event_update,
.feature = process_feature_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
index d40498f2cb1e..635c09fda1d9 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
@@ -188,7 +188,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
- "Filter": "filter_band0=1200",
+ "Filter": "filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@@ -199,7 +199,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
- "Filter": "filter_band1=2000",
+ "Filter": "filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@@ -210,7 +210,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
- "Filter": "filter_band2=3000",
+ "Filter": "filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@@ -221,7 +221,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
- "Filter": "filter_band3=4000",
+ "Filter": "filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",
@@ -232,7 +232,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band0=1200",
+ "Filter": "edge=1,filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@@ -243,7 +243,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band1=2000",
+ "Filter": "edge=1,filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@@ -254,7 +254,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band2=4000",
+ "Filter": "edge=1,filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@@ -265,7 +265,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band3=4000",
+ "Filter": "edge=1,filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
index 16034bfd06dd..8755693d86c6 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
@@ -187,7 +187,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
- "Filter": "filter_band0=1200",
+ "Filter": "filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@@ -198,7 +198,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
- "Filter": "filter_band1=2000",
+ "Filter": "filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@@ -209,7 +209,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
- "Filter": "filter_band2=3000",
+ "Filter": "filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@@ -220,7 +220,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
- "Filter": "filter_band3=4000",
+ "Filter": "filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",
@@ -231,7 +231,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xb",
"EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band0=1200",
+ "Filter": "edge=1,filter_band0=12",
"MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_1200mhz_cycles %",
"PerPkg": "1",
@@ -242,7 +242,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xc",
"EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band1=2000",
+ "Filter": "edge=1,filter_band1=20",
"MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_2000mhz_cycles %",
"PerPkg": "1",
@@ -253,7 +253,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xd",
"EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band2=4000",
+ "Filter": "edge=1,filter_band2=30",
"MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_3000mhz_cycles %",
"PerPkg": "1",
@@ -264,7 +264,7 @@
"Counter": "0,1,2,3",
"EventCode": "0xe",
"EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band3=4000",
+ "Filter": "edge=1,filter_band3=40",
"MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
"MetricName": "freq_ge_4000mhz_cycles %",
"PerPkg": "1",
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index efcaf6cac2eb..e46f51b17513 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -204,14 +204,23 @@ from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
PQexec = libpq.PQexec
PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
diff --git a/tools/perf/scripts/python/export-to-sqlite.py b/tools/perf/scripts/python/export-to-sqlite.py
index f827bf77e9d2..e4bb82c8aba9 100644
--- a/tools/perf/scripts/python/export-to-sqlite.py
+++ b/tools/perf/scripts/python/export-to-sqlite.py
@@ -440,7 +440,11 @@ def branch_type_table(*x):
def sample_table(*x):
if branches:
- bind_exec(sample_query, 18, x)
+ for xx in x[0:15]:
+ sample_query.addBindValue(str(xx))
+ for xx in x[19:22]:
+ sample_query.addBindValue(str(xx))
+ do_query_(sample_query)
else:
bind_exec(sample_query, 22, x)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 20061cf42288..28cd6a17491b 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -246,8 +246,14 @@ find_target:
indirect_call:
tok = strchr(endptr, '*');
- if (tok != NULL)
- ops->target.addr = strtoull(tok + 1, NULL, 16);
+ if (tok != NULL) {
+ endptr++;
+
+ /* Indirect call can use a non-rip register and offset: callq *0x8(%rbx).
+ * Do not parse such instruction. */
+ if (strstr(endptr, "(%r") == NULL)
+ ops->target.addr = strtoull(endptr, NULL, 16);
+ }
goto find_target;
}
@@ -276,7 +282,19 @@ bool ins__is_call(const struct ins *ins)
return ins->ops == &call_ops || ins->ops == &s390_call_ops;
}
-static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *ops, struct map_symbol *ms)
+/*
+ * Prevents from matching commas in the comment section, e.g.:
+ * ffff200008446e70: b.cs ffff2000084470f4 <generic_exec_single+0x314> // b.hs, b.nlast
+ */
+static inline const char *validate_comma(const char *c, struct ins_operands *ops)
+{
+ if (ops->raw_comment && c > ops->raw_comment)
+ return NULL;
+
+ return c;
+}
+
+static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms)
{
struct map *map = ms->map;
struct symbol *sym = ms->sym;
@@ -285,6 +303,10 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
};
const char *c = strchr(ops->raw, ',');
u64 start, end;
+
+ ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char);
+ c = validate_comma(c, ops);
+
/*
* Examples of lines to parse for the _cpp_lex_token@@Base
* function:
@@ -304,6 +326,7 @@ static int jump__parse(struct arch *arch __maybe_unused, struct ins_operands *op
ops->target.addr = strtoull(c, NULL, 16);
if (!ops->target.addr) {
c = strchr(c, ',');
+ c = validate_comma(c, ops);
if (c++ != NULL)
ops->target.addr = strtoull(c, NULL, 16);
}
@@ -361,9 +384,12 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size,
return scnprintf(bf, size, "%-6s %s", ins->name, ops->target.sym->name);
c = strchr(ops->raw, ',');
+ c = validate_comma(c, ops);
+
if (c != NULL) {
const char *c2 = strchr(c + 1, ',');
+ c2 = validate_comma(c2, ops);
/* check for 3-op insn */
if (c2 != NULL)
c = c2;
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 005a5fe8a8c6..5399ba2321bb 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -22,6 +22,7 @@ struct ins {
struct ins_operands {
char *raw;
+ char *raw_comment;
struct {
char *raw;
char *name;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 0cd42150f712..bc646185f8d9 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1081,6 +1081,7 @@ void *cpu_map_data__alloc(struct cpu_map *map, size_t *size, u16 *type, int *max
}
*size += sizeof(struct cpu_map_data);
+ *size = PERF_ALIGN(*size, sizeof(u64));
return zalloc(*size);
}
@@ -1560,26 +1561,9 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
return NULL;
}
-try_again:
+
al->map = map_groups__find(mg, al->addr);
- if (al->map == NULL) {
- /*
- * If this is outside of all known maps, and is a negative
- * address, try to look it up in the kernel dso, as it might be
- * a vsyscall or vdso (which executes in user-mode).
- *
- * XXX This is nasty, we should have a symbol list in the
- * "[vdso]" dso, but for now lets use the old trick of looking
- * in the whole kernel symbol list.
- */
- if (cpumode == PERF_RECORD_MISC_USER && machine &&
- mg != &machine->kmaps &&
- machine__kernel_ip(machine, al->addr)) {
- mg = &machine->kmaps;
- load_map = true;
- goto try_again;
- }
- } else {
+ if (al->map != NULL) {
/*
* Kernel maps might be changed when loading symbols so loading
* must be done prior to using kernel maps.
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c980bbff6353..e596ae358c4d 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -251,8 +251,9 @@ struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
{
struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
- if (evsel != NULL)
- perf_evsel__init(evsel, attr, idx);
+ if (!evsel)
+ return NULL;
+ perf_evsel__init(evsel, attr, idx);
if (perf_evsel__is_bpf_output(evsel)) {
evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
@@ -1088,6 +1089,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
attr->exclude_user = 1;
}
+ if (evsel->own_cpus)
+ evsel->attr.read_format |= PERF_FORMAT_ID;
+
/*
* Apply event specific term settings,
* it overloads any global configuration.
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index c4acd2001db0..111ae858cbcb 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2286,7 +2286,8 @@ static int append_inlines(struct callchain_cursor *cursor,
if (!symbol_conf.inline_name || !map || !sym)
return ret;
- addr = map__rip_2objdump(map, ip);
+ addr = map__map_ip(map, ip);
+ addr = map__rip_2objdump(map, addr);
inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
if (!inline_node) {
@@ -2312,7 +2313,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
{
struct callchain_cursor *cursor = arg;
const char *srcline = NULL;
- u64 addr;
+ u64 addr = entry->ip;
if (symbol_conf.hide_unresolved && entry->sym == NULL)
return 0;
@@ -2324,7 +2325,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
* Convert entry->ip from a virtual address to an offset in
* its corresponding binary.
*/
- addr = map__map_ip(entry->map, entry->ip);
+ if (entry->map)
+ addr = map__map_ip(entry->map, entry->ip);
srcline = callchain_srcline(entry->map, entry->sym, addr);
return callchain_cursor_append(cursor, entry->ip,
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 36d0763311ef..6a6929f208b4 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -576,6 +576,13 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg,
return NULL;
}
+static bool map__contains_symbol(struct map *map, struct symbol *sym)
+{
+ u64 ip = map->unmap_ip(map, sym->start);
+
+ return ip >= map->start && ip < map->end;
+}
+
struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
struct map **mapp)
{
@@ -591,6 +598,10 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
if (sym == NULL)
continue;
+ if (!map__contains_symbol(pos, sym)) {
+ sym = NULL;
+ continue;
+ }
if (mapp != NULL)
*mapp = pos;
goto out;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index afd68524ffa9..7799788f662f 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -930,13 +930,14 @@ static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
static __u64 pmu_format_max_value(const unsigned long *format)
{
- __u64 w = 0;
- int fbit;
-
- for_each_set_bit(fbit, format, PERF_PMU_FORMAT_BITS)
- w |= (1ULL << fbit);
+ int w;
- return w;
+ w = bitmap_weight(format, PERF_PMU_FORMAT_BITS);
+ if (!w)
+ return 0;
+ if (w < 64)
+ return (1ULL << w) - 1;
+ return -1;
}
/*
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
index 97efbcad076e..1942f6dd24f6 100644
--- a/tools/perf/util/setup.py
+++ b/tools/perf/util/setup.py
@@ -35,7 +35,7 @@ class install_lib(_install_lib):
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
-cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
+cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter', '-Wno-redundant-decls' ]
if cc != "clang":
cflags += ['-Wno-cast-function-type' ]
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 09d6746e6ec8..e767c4a9d4d2 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -85,6 +85,9 @@ static struct symbol *new_inline_sym(struct dso *dso,
struct symbol *inline_sym;
char *demangled = NULL;
+ if (!funcname)
+ funcname = "??";
+
if (dso) {
demangled = dso__demangle_sym(dso, 0, funcname);
if (demangled)
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index c85d0d1a65ed..7b0ca7cbb7de 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -377,7 +377,7 @@ out:
static int record_saved_cmdline(void)
{
- unsigned int size;
+ unsigned long long size;
char *path;
struct stat st;
int ret, err = 0;
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 920b1d58a068..e76214f8d596 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -164,16 +164,15 @@ void parse_ftrace_printk(struct tep_handle *pevent,
void parse_saved_cmdline(struct tep_handle *pevent,
char *file, unsigned int size __maybe_unused)
{
- char *comm;
+ char comm[17]; /* Max comm length in the kernel is 16. */
char *line;
char *next = NULL;
int pid;
line = strtok_r(file, "\n", &next);
while (line) {
- sscanf(line, "%d %ms", &pid, &comm);
- tep_register_comm(pevent, comm, pid);
- free(comm);
+ if (sscanf(line, "%d %16s", &pid, comm) == 2)
+ tep_register_comm(pevent, comm, pid);
line = strtok_r(NULL, "\n", &next);
}
}
diff --git a/tools/testing/selftests/android/Makefile b/tools/testing/selftests/android/Makefile
index 72c25a3cb658..d9a725478375 100644
--- a/tools/testing/selftests/android/Makefile
+++ b/tools/testing/selftests/android/Makefile
@@ -6,7 +6,7 @@ TEST_PROGS := run.sh
include ../lib.mk
-all:
+all: khdr
@for DIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \
mkdir $$BUILD_TARGET -p; \
diff --git a/tools/testing/selftests/android/ion/config b/tools/testing/selftests/android/config
index b4ad748a9dd9..b4ad748a9dd9 100644
--- a/tools/testing/selftests/android/ion/config
+++ b/tools/testing/selftests/android/config
diff --git a/tools/testing/selftests/android/ion/Makefile b/tools/testing/selftests/android/ion/Makefile
index e03695287f76..88cfe88e466f 100644
--- a/tools/testing/selftests/android/ion/Makefile
+++ b/tools/testing/selftests/android/ion/Makefile
@@ -10,6 +10,8 @@ $(TEST_GEN_FILES): ipcsocket.c ionutils.c
TEST_PROGS := ion_test.sh
+KSFT_KHDR_INSTALL := 1
+top_srcdir = ../../../../..
include ../../lib.mk
$(OUTPUT)/ionapp_export: ionapp_export.c ipcsocket.c ionutils.c
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index 6f54f84144a0..9b552c0fc47d 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -580,7 +580,11 @@ static void test_sockmap(int tasks, void *data)
/* Test update without programs */
for (i = 0; i < 6; i++) {
err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY);
- if (err) {
+ if (i < 2 && !err) {
+ printf("Allowed update sockmap '%i:%i' not in ESTABLISHED\n",
+ i, sfd[i]);
+ goto out_sockmap;
+ } else if (i >= 2 && err) {
printf("Failed noprog update sockmap '%i:%i'\n",
i, sfd[i]);
goto out_sockmap;
@@ -741,7 +745,7 @@ static void test_sockmap(int tasks, void *data)
}
/* Test map update elem afterwards fd lives in fd and map_fd */
- for (i = 0; i < 6; i++) {
+ for (i = 2; i < 6; i++) {
err = bpf_map_update_elem(map_fd_rx, &i, &sfd[i], BPF_ANY);
if (err) {
printf("Failed map_fd_rx update sockmap %i '%i:%i'\n",
@@ -845,7 +849,7 @@ static void test_sockmap(int tasks, void *data)
}
/* Delete the elems without programs */
- for (i = 0; i < 6; i++) {
+ for (i = 2; i < 6; i++) {
err = bpf_map_delete_elem(fd, &i);
if (err) {
printf("Failed delete sockmap %i '%i:%i'\n",
diff --git a/tools/testing/selftests/cgroup/.gitignore b/tools/testing/selftests/cgroup/.gitignore
index 95eb3a53c381..adacda50a4b2 100644
--- a/tools/testing/selftests/cgroup/.gitignore
+++ b/tools/testing/selftests/cgroup/.gitignore
@@ -1 +1,2 @@
test_memcontrol
+test_core
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 1c5d2b2a583b..14c9fe284806 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -89,17 +89,28 @@ int cg_read(const char *cgroup, const char *control, char *buf, size_t len)
int cg_read_strcmp(const char *cgroup, const char *control,
const char *expected)
{
- size_t size = strlen(expected) + 1;
+ size_t size;
char *buf;
+ int ret;
+
+ /* Handle the case of comparing against empty string */
+ if (!expected)
+ size = 32;
+ else
+ size = strlen(expected) + 1;
buf = malloc(size);
if (!buf)
return -1;
- if (cg_read(cgroup, control, buf, size))
+ if (cg_read(cgroup, control, buf, size)) {
+ free(buf);
return -1;
+ }
- return strcmp(expected, buf);
+ ret = strcmp(expected, buf);
+ free(buf);
+ return ret;
}
int cg_read_strstr(const char *cgroup, const char *control, const char *needle)
@@ -337,3 +348,24 @@ int is_swap_enabled(void)
return cnt > 1;
}
+
+int set_oom_adj_score(int pid, int score)
+{
+ char path[PATH_MAX];
+ int fd, len;
+
+ sprintf(path, "/proc/%d/oom_score_adj", pid);
+
+ fd = open(path, O_WRONLY | O_APPEND);
+ if (fd < 0)
+ return fd;
+
+ len = dprintf(fd, "%d", score);
+ if (len < 0) {
+ close(fd);
+ return len;
+ }
+
+ close(fd);
+ return 0;
+}
diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h
index 1ff6f9f1abdc..9ac8b7958f83 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.h
+++ b/tools/testing/selftests/cgroup/cgroup_util.h
@@ -40,3 +40,4 @@ extern int get_temp_fd(void);
extern int alloc_pagecache(int fd, size_t size);
extern int alloc_anon(const char *cgroup, void *arg);
extern int is_swap_enabled(void);
+extern int set_oom_adj_score(int pid, int score);
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index cf0bddc9d271..28d321ba311b 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -2,6 +2,7 @@
#define _GNU_SOURCE
#include <linux/limits.h>
+#include <linux/oom.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
@@ -202,6 +203,36 @@ static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg)
return 0;
}
+static int alloc_anon_noexit(const char *cgroup, void *arg)
+{
+ int ppid = getppid();
+
+ if (alloc_anon(cgroup, arg))
+ return -1;
+
+ while (getppid() == ppid)
+ sleep(1);
+
+ return 0;
+}
+
+/*
+ * Wait until processes are killed asynchronously by the OOM killer
+ * If we exceed a timeout, fail.
+ */
+static int cg_test_proc_killed(const char *cgroup)
+{
+ int limit;
+
+ for (limit = 10; limit > 0; limit--) {
+ if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0)
+ return 0;
+
+ usleep(100000);
+ }
+ return -1;
+}
+
/*
* First, this test creates the following hierarchy:
* A memory.min = 50M, memory.max = 200M
@@ -964,6 +995,177 @@ cleanup:
return ret;
}
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes in the leaf (but not the parent) were killed.
+ */
+static int test_memcg_oom_group_leaf_events(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *parent, *child;
+
+ parent = cg_name(root, "memcg_test_0");
+ child = cg_name(root, "memcg_test_0/memcg_test_1");
+
+ if (!parent || !child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_write(parent, "cgroup.subtree_control", "+memory"))
+ goto cleanup;
+
+ if (cg_write(child, "memory.max", "50M"))
+ goto cleanup;
+
+ if (cg_write(child, "memory.swap.max", "0"))
+ goto cleanup;
+
+ if (cg_write(child, "memory.oom.group", "1"))
+ goto cleanup;
+
+ cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
+ cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+ cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+ if (!cg_run(child, alloc_anon, (void *)MB(100)))
+ goto cleanup;
+
+ if (cg_test_proc_killed(child))
+ goto cleanup;
+
+ if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0)
+ goto cleanup;
+
+ if (cg_read_key_long(parent, "memory.events", "oom_kill ") != 0)
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (child)
+ cg_destroy(child);
+ if (parent)
+ cg_destroy(parent);
+ free(child);
+ free(parent);
+
+ return ret;
+}
+
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes in the parent and leaf were killed.
+ */
+static int test_memcg_oom_group_parent_events(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *parent, *child;
+
+ parent = cg_name(root, "memcg_test_0");
+ child = cg_name(root, "memcg_test_0/memcg_test_1");
+
+ if (!parent || !child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_write(parent, "memory.max", "80M"))
+ goto cleanup;
+
+ if (cg_write(parent, "memory.swap.max", "0"))
+ goto cleanup;
+
+ if (cg_write(parent, "memory.oom.group", "1"))
+ goto cleanup;
+
+ cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60));
+ cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+ cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1));
+
+ if (!cg_run(child, alloc_anon, (void *)MB(100)))
+ goto cleanup;
+
+ if (cg_test_proc_killed(child))
+ goto cleanup;
+ if (cg_test_proc_killed(parent))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (child)
+ cg_destroy(child);
+ if (parent)
+ cg_destroy(parent);
+ free(child);
+ free(parent);
+
+ return ret;
+}
+
+/*
+ * This test disables swapping and tries to allocate anonymous memory
+ * up to OOM with memory.group.oom set. Then it checks that all
+ * processes were killed except those set with OOM_SCORE_ADJ_MIN
+ */
+static int test_memcg_oom_group_score_events(const char *root)
+{
+ int ret = KSFT_FAIL;
+ char *memcg;
+ int safe_pid;
+
+ memcg = cg_name(root, "memcg_test_0");
+
+ if (!memcg)
+ goto cleanup;
+
+ if (cg_create(memcg))
+ goto cleanup;
+
+ if (cg_write(memcg, "memory.max", "50M"))
+ goto cleanup;
+
+ if (cg_write(memcg, "memory.swap.max", "0"))
+ goto cleanup;
+
+ if (cg_write(memcg, "memory.oom.group", "1"))
+ goto cleanup;
+
+ safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
+ if (set_oom_adj_score(safe_pid, OOM_SCORE_ADJ_MIN))
+ goto cleanup;
+
+ cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1));
+ if (!cg_run(memcg, alloc_anon, (void *)MB(100)))
+ goto cleanup;
+
+ if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3)
+ goto cleanup;
+
+ if (kill(safe_pid, SIGKILL))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ if (memcg)
+ cg_destroy(memcg);
+ free(memcg);
+
+ return ret;
+}
+
+
#define T(x) { x, #x }
struct memcg_test {
int (*fn)(const char *root);
@@ -978,6 +1180,9 @@ struct memcg_test {
T(test_memcg_oom_events),
T(test_memcg_swap_max),
T(test_memcg_sock),
+ T(test_memcg_oom_group_leaf_events),
+ T(test_memcg_oom_group_parent_events),
+ T(test_memcg_oom_group_score_events),
};
#undef T
diff --git a/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh b/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh
index a72df93cf1f8..128f0ab24307 100755
--- a/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh
+++ b/tools/testing/selftests/drivers/usb/usbip/usbip_test.sh
@@ -141,6 +141,10 @@ echo "Import devices from localhost - should work"
src/usbip attach -r localhost -b $busid;
echo "=============================================================="
+# Wait for sysfs file to be updated. Without this sleep, usbip port
+# shows no imported devices.
+sleep 3;
+
echo "List imported devices - expect to see imported devices";
src/usbip port;
echo "=============================================================="
diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
new file mode 100644
index 000000000000..4e151f1005b2
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/config
@@ -0,0 +1 @@
+CONFIG_EFIVAR_FS=y
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
new file mode 100644
index 000000000000..88e6c3f43006
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
@@ -0,0 +1,80 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test synthetic_events syntax parser
+
+do_reset() {
+ reset_trigger
+ echo > set_event
+ clear_trace
+}
+
+fail() { #msg
+ do_reset
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f synthetic_events ]; then
+ echo "synthetic event is not supported"
+ exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+echo "Test synthetic_events syntax parser"
+
+echo > synthetic_events
+
+# synthetic event must have a field
+! echo "myevent" >> synthetic_events
+echo "myevent u64 var1" >> synthetic_events
+
+# synthetic event must be found in synthetic_events
+grep "myevent[[:space:]]u64 var1" synthetic_events
+
+# it is not possible to add same name event
+! echo "myevent u64 var2" >> synthetic_events
+
+# Non-append open will cleanup all events and add new one
+echo "myevent u64 var2" > synthetic_events
+
+# multiple fields with different spaces
+echo "myevent u64 var1; u64 var2;" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+echo "myevent u64 var1 ; u64 var2 ;" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+echo "myevent u64 var1 ;u64 var2" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+
+# test field types
+echo "myevent u32 var" > synthetic_events
+echo "myevent u16 var" > synthetic_events
+echo "myevent u8 var" > synthetic_events
+echo "myevent s64 var" > synthetic_events
+echo "myevent s32 var" > synthetic_events
+echo "myevent s16 var" > synthetic_events
+echo "myevent s8 var" > synthetic_events
+
+echo "myevent char var" > synthetic_events
+echo "myevent int var" > synthetic_events
+echo "myevent long var" > synthetic_events
+echo "myevent pid_t var" > synthetic_events
+
+echo "myevent unsigned char var" > synthetic_events
+echo "myevent unsigned int var" > synthetic_events
+echo "myevent unsigned long var" > synthetic_events
+grep "myevent[[:space:]]unsigned long var" synthetic_events
+
+# test string type
+echo "myevent char var[10]" > synthetic_events
+grep "myevent[[:space:]]char\[10\] var" synthetic_events
+
+do_reset
+
+exit 0
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index ff8feca49746..ad1eeb14fda7 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -18,6 +18,7 @@ TEST_GEN_FILES := \
TEST_PROGS := run.sh
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_FILES): $(HEADERS)
diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile
index 1bbb47565c55..4665cdbf1a8d 100644
--- a/tools/testing/selftests/gpio/Makefile
+++ b/tools/testing/selftests/gpio/Makefile
@@ -21,11 +21,8 @@ endef
CFLAGS += -O2 -g -std=gnu99 -Wall -I../../../../usr/include/
LDLIBS += -lmount -I/usr/include/libmount
-$(BINARIES): ../../../gpio/gpio-utils.o ../../../../usr/include/linux/gpio.h
+$(BINARIES):| khdr
+$(BINARIES): ../../../gpio/gpio-utils.o
../../../gpio/gpio-utils.o:
make ARCH=$(ARCH) CROSS_COMPILE=$(CROSS_COMPILE) -C ../../../gpio
-
-../../../../usr/include/linux/gpio.h:
- make -C ../../../.. headers_install INSTALL_HDR_PATH=$(shell pwd)/../../../../usr/
-
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index 15e6b75fc3a5..a3edb2c8e43d 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -19,7 +19,6 @@
#define KSFT_FAIL 1
#define KSFT_XFAIL 2
#define KSFT_XPASS 3
-/* Treat skip as pass */
#define KSFT_SKIP 4
/* counters */
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 4202139d81d9..5c34752e1cff 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,4 +1,5 @@
cr4_cpuid_sync_test
+platform_info_test
set_sregs_test
sync_regs_test
vmx_tsc_adjust_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 03b0f551bedf..ec32dad3c3f0 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -6,7 +6,8 @@ UNAME_M := $(shell uname -m)
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
LIBKVM_x86_64 = lib/x86.c lib/vmx.c
-TEST_GEN_PROGS_x86_64 = set_sregs_test
+TEST_GEN_PROGS_x86_64 = platform_info_test
+TEST_GEN_PROGS_x86_64 += set_sregs_test
TEST_GEN_PROGS_x86_64 += sync_regs_test
TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test
@@ -20,7 +21,7 @@ INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include
CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
-LDFLAGS += -lpthread
+LDFLAGS += -pthread
# After inclusion, $(OUTPUT) is defined and
# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
@@ -37,9 +38,6 @@ $(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
$(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
$(AR) crs $@ $^
-$(LINUX_HDR_PATH):
- make -C $(top_srcdir) headers_install
-
-all: $(STATIC_LIBS) $(LINUX_HDR_PATH)
+all: $(STATIC_LIBS)
$(TEST_GEN_PROGS): $(STATIC_LIBS)
-$(TEST_GEN_PROGS) $(LIBKVM_OBJ): | $(LINUX_HDR_PATH)
+$(STATIC_LIBS):| khdr
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index bb5a25fb82c6..3acf9a91704c 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -50,6 +50,7 @@ enum vm_mem_backing_src_type {
};
int kvm_check_cap(long cap);
+int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap);
struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm);
void kvm_vm_free(struct kvm_vm *vmp);
@@ -108,6 +109,9 @@ void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu_events *events);
void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu_events *events);
+uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value);
const char *exit_reason_str(unsigned int exit_reason);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index e9ba389c48db..6fd8c089cafc 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -63,6 +63,29 @@ int kvm_check_cap(long cap)
return ret;
}
+/* VM Enable Capability
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * cap - Capability
+ *
+ * Output Args: None
+ *
+ * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
+ *
+ * Enables a capability (KVM_CAP_*) on the VM.
+ */
+int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
+{
+ int ret;
+
+ ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
+ TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
+ " rc: %i errno: %i", ret, errno);
+
+ return ret;
+}
+
static void vm_open(struct kvm_vm *vm, int perm)
{
vm->kvm_fd = open(KVM_DEV_PATH, perm);
@@ -1220,6 +1243,72 @@ void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
ret, errno);
}
+/* VCPU Get MSR
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * msr_index - Index of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
+ *
+ * Get value of MSR for VCPU.
+ */
+uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ struct {
+ struct kvm_msrs header;
+ struct kvm_msr_entry entry;
+ } buffer = {};
+ int r;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+ buffer.header.nmsrs = 1;
+ buffer.entry.index = msr_index;
+ r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
+ TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
+ " rc: %i errno: %i", r, errno);
+
+ return buffer.entry.data;
+}
+
+/* VCPU Set MSR
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * msr_index - Index of MSR
+ * msr_value - New value of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, nothing. On failure a TEST_ASSERT is produced.
+ *
+ * Set value of MSR for VCPU.
+ */
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ struct {
+ struct kvm_msrs header;
+ struct kvm_msr_entry entry;
+ } buffer = {};
+ int r;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+ memset(&buffer, 0, sizeof(buffer));
+ buffer.header.nmsrs = 1;
+ buffer.entry.index = msr_index;
+ buffer.entry.data = msr_value;
+ r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
+ TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
+ " rc: %i errno: %i", r, errno);
+}
+
/* VM VCPU Args Set
*
* Input Args:
diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/platform_info_test.c
new file mode 100644
index 000000000000..3764e7121265
--- /dev/null
+++ b/tools/testing/selftests/kvm/platform_info_test.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test for x86 KVM_CAP_MSR_PLATFORM_INFO
+ *
+ * Copyright (C) 2018, Google LLC.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Verifies expected behavior of controlling guest access to
+ * MSR_PLATFORM_INFO.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "x86.h"
+
+#define VCPU_ID 0
+#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
+
+static void guest_code(void)
+{
+ uint64_t msr_platform_info;
+
+ for (;;) {
+ msr_platform_info = rdmsr(MSR_PLATFORM_INFO);
+ GUEST_SYNC(msr_platform_info);
+ asm volatile ("inc %r11");
+ }
+}
+
+static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
+{
+ struct kvm_enable_cap cap = {};
+
+ cap.cap = KVM_CAP_MSR_PLATFORM_INFO;
+ cap.flags = 0;
+ cap.args[0] = (int)enable;
+ vm_enable_cap(vm, &cap);
+}
+
+static void test_msr_platform_info_enabled(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct guest_args args;
+
+ set_msr_platform_info_enabled(vm, true);
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+ guest_args_read(vm, VCPU_ID, &args);
+ TEST_ASSERT(args.port == GUEST_PORT_SYNC,
+ "Received IO from port other than PORT_HOST_SYNC: %u\n",
+ run->io.port);
+ TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
+ MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
+ "Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
+ MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+}
+
+static void test_msr_platform_info_disabled(struct kvm_vm *vm)
+{
+ struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+
+ set_msr_platform_info_enabled(vm, false);
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
+ "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+ struct kvm_run *state;
+ int rv;
+ uint64_t msr_platform_info;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ rv = kvm_check_cap(KVM_CAP_MSR_PLATFORM_INFO);
+ if (!rv) {
+ fprintf(stderr,
+ "KVM_CAP_MSR_PLATFORM_INFO not supported, skip test\n");
+ exit(KSFT_SKIP);
+ }
+
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+ msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
+ vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
+ msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
+ test_msr_platform_info_disabled(vm);
+ test_msr_platform_info_enabled(vm);
+ vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
+
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 17ab36605a8e..0a8e75886224 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -16,8 +16,20 @@ TEST_GEN_PROGS := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS))
TEST_GEN_PROGS_EXTENDED := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_PROGS_EXTENDED))
TEST_GEN_FILES := $(patsubst %,$(OUTPUT)/%,$(TEST_GEN_FILES))
+top_srcdir ?= ../../../..
+include $(top_srcdir)/scripts/subarch.include
+ARCH ?= $(SUBARCH)
+
all: $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES)
+.PHONY: khdr
+khdr:
+ make ARCH=$(ARCH) -C $(top_srcdir) headers_install
+
+ifdef KSFT_KHDR_INSTALL
+$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES):| khdr
+endif
+
.ONESHELL:
define RUN_TEST_PRINT_RESULT
TEST_HDR_MSG="selftests: "`basename $$PWD`:" $$BASENAME_TEST"; \
diff --git a/tools/testing/selftests/memory-hotplug/config b/tools/testing/selftests/memory-hotplug/config
index 2fde30191a47..a7e8cd5bb265 100644
--- a/tools/testing/selftests/memory-hotplug/config
+++ b/tools/testing/selftests/memory-hotplug/config
@@ -2,3 +2,4 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTPLUG_SPARSE=y
CONFIG_NOTIFIER_ERROR_INJECTION=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_MEMORY_HOTREMOVE=y
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 9cca68e440a0..919aa2ac00af 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -15,6 +15,7 @@ TEST_GEN_FILES += udpgso udpgso_bench_tx udpgso_bench_rx
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
+KSFT_KHDR_INSTALL := 1
include ../lib.mk
$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh
index 32a194e3e07a..0ab9423d009f 100755
--- a/tools/testing/selftests/net/pmtu.sh
+++ b/tools/testing/selftests/net/pmtu.sh
@@ -178,8 +178,8 @@ setup() {
cleanup() {
[ ${cleanup_done} -eq 1 ] && return
- ip netns del ${NS_A} 2 > /dev/null
- ip netns del ${NS_B} 2 > /dev/null
+ ip netns del ${NS_A} 2> /dev/null
+ ip netns del ${NS_B} 2> /dev/null
cleanup_done=1
}
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
index cad14cd0ea92..b5277106df1f 100644
--- a/tools/testing/selftests/net/reuseport_bpf.c
+++ b/tools/testing/selftests/net/reuseport_bpf.c
@@ -437,14 +437,19 @@ void enable_fastopen(void)
}
}
-static struct rlimit rlim_old, rlim_new;
+static struct rlimit rlim_old;
static __attribute__((constructor)) void main_ctor(void)
{
getrlimit(RLIMIT_MEMLOCK, &rlim_old);
- rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
- rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
- setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+
+ if (rlim_old.rlim_cur != RLIM_INFINITY) {
+ struct rlimit rlim_new;
+
+ rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
+ rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
+ setrlimit(RLIMIT_MEMLOCK, &rlim_new);
+ }
}
static __attribute__((destructor)) void main_dtor(void)
diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh
index 08c341b49760..e101af52d1d6 100755
--- a/tools/testing/selftests/net/rtnetlink.sh
+++ b/tools/testing/selftests/net/rtnetlink.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
#
# This test is for checking rtnetlink callpaths, and get as much coverage as possible.
#
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index b3ebf2646e52..8fdfeafaf8c0 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -502,6 +502,55 @@ TEST_F(tls, recv_peek_multiple)
EXPECT_EQ(memcmp(test_str, buf, send_len), 0);
}
+TEST_F(tls, recv_peek_multiple_records)
+{
+ char const *test_str = "test_read_peek_mult_recs";
+ char const *test_str_first = "test_read_peek";
+ char const *test_str_second = "_mult_recs";
+ int len;
+ char buf[64];
+
+ len = strlen(test_str_first);
+ EXPECT_EQ(send(self->fd, test_str_first, len, 0), len);
+
+ len = strlen(test_str_second) + 1;
+ EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+
+ len = sizeof(buf);
+ memset(buf, 0, len);
+ EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
+
+ /* MSG_PEEK can only peek into the current record. */
+ len = strlen(test_str_first) + 1;
+ EXPECT_EQ(memcmp(test_str_first, buf, len), 0);
+
+ len = sizeof(buf);
+ memset(buf, 0, len);
+ EXPECT_NE(recv(self->cfd, buf, len, 0), -1);
+
+ /* Non-MSG_PEEK will advance strparser (and therefore record)
+ * however.
+ */
+ len = strlen(test_str) + 1;
+ EXPECT_EQ(memcmp(test_str, buf, len), 0);
+
+ /* MSG_MORE will hold current record open, so later MSG_PEEK
+ * will see everything.
+ */
+ len = strlen(test_str_first);
+ EXPECT_EQ(send(self->fd, test_str_first, len, MSG_MORE), len);
+
+ len = strlen(test_str_second) + 1;
+ EXPECT_EQ(send(self->fd, test_str_second, len, 0), len);
+
+ len = sizeof(buf);
+ memset(buf, 0, len);
+ EXPECT_NE(recv(self->cfd, buf, len, MSG_PEEK), -1);
+
+ len = strlen(test_str) + 1;
+ EXPECT_EQ(memcmp(test_str, buf, len), 0);
+}
+
TEST_F(tls, pollin)
{
char const *test_str = "test_poll";
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index 850767befa47..99e537ab5ad9 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
#
# Run a series of udpgso benchmarks
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
index a728040edbe1..14cfcf006936 100644
--- a/tools/testing/selftests/networking/timestamping/Makefile
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -5,6 +5,7 @@ TEST_PROGS := hwtstamp_config rxtimestamp timestamping txtimestamp
all: $(TEST_PROGS)
+top_srcdir = ../../../../..
include ../../lib.mk
clean:
diff --git a/tools/testing/selftests/powerpc/alignment/Makefile b/tools/testing/selftests/powerpc/alignment/Makefile
index 93baacab7693..d056486f49de 100644
--- a/tools/testing/selftests/powerpc/alignment/Makefile
+++ b/tools/testing/selftests/powerpc/alignment/Makefile
@@ -1,5 +1,6 @@
TEST_GEN_PROGS := copy_first_unaligned alignment_handler
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/benchmarks/Makefile b/tools/testing/selftests/powerpc/benchmarks/Makefile
index b4d7432a0ecd..d40300a65b42 100644
--- a/tools/testing/selftests/powerpc/benchmarks/Makefile
+++ b/tools/testing/selftests/powerpc/benchmarks/Makefile
@@ -4,6 +4,7 @@ TEST_GEN_FILES := exec_target
CFLAGS += -O2
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/cache_shape/Makefile b/tools/testing/selftests/powerpc/cache_shape/Makefile
index 1be547434a49..ede4d3dae750 100644
--- a/tools/testing/selftests/powerpc/cache_shape/Makefile
+++ b/tools/testing/selftests/powerpc/cache_shape/Makefile
@@ -5,6 +5,7 @@ all: $(TEST_PROGS)
$(TEST_PROGS): ../harness.c ../utils.c
+top_srcdir = ../../../../..
include ../../lib.mk
clean:
diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile
index 1cf89a34d97c..44574f3818b3 100644
--- a/tools/testing/selftests/powerpc/copyloops/Makefile
+++ b/tools/testing/selftests/powerpc/copyloops/Makefile
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \
EXTRA_SOURCES := validate.c ../harness.c stubs.S
+top_srcdir = ../../../../..
include ../../lib.mk
$(OUTPUT)/copyuser_64_t%: copyuser_64.S $(EXTRA_SOURCES)
diff --git a/tools/testing/selftests/powerpc/dscr/Makefile b/tools/testing/selftests/powerpc/dscr/Makefile
index 55d7db7a616b..5df476364b4d 100644
--- a/tools/testing/selftests/powerpc/dscr/Makefile
+++ b/tools/testing/selftests/powerpc/dscr/Makefile
@@ -3,6 +3,7 @@ TEST_GEN_PROGS := dscr_default_test dscr_explicit_test dscr_user_test \
dscr_inherit_test dscr_inherit_exec_test dscr_sysfs_test \
dscr_sysfs_thread_test
+top_srcdir = ../../../../..
include ../../lib.mk
$(OUTPUT)/dscr_default_test: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/powerpc/math/Makefile b/tools/testing/selftests/powerpc/math/Makefile
index 0dd3a01fdab9..11a10d7a2bbd 100644
--- a/tools/testing/selftests/powerpc/math/Makefile
+++ b/tools/testing/selftests/powerpc/math/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
TEST_GEN_PROGS := fpu_syscall fpu_preempt fpu_signal vmx_syscall vmx_preempt vmx_signal vsx_preempt
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 8ebbe96d80a8..33ced6e0ad25 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -5,6 +5,7 @@ noarg:
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors
TEST_GEN_FILES := tempfile
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index 6e1629bf5b09..19046db995fe 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -5,6 +5,7 @@ noarg:
TEST_GEN_PROGS := count_instructions l3_bank_test per_event_excludes
EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
+top_srcdir = ../../../../..
include ../../lib.mk
all: $(TEST_GEN_PROGS) ebb
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/Makefile b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
index c4e64bc2e265..bd5dfa509272 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/ebb/Makefile
@@ -17,6 +17,7 @@ TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test \
lost_exception_test no_handler_test \
cycles_with_mmcr2_test
+top_srcdir = ../../../../../..
include ../../../lib.mk
$(TEST_GEN_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \
diff --git a/tools/testing/selftests/powerpc/primitives/Makefile b/tools/testing/selftests/powerpc/primitives/Makefile
index 175366db7be8..ea2b7bd09e36 100644
--- a/tools/testing/selftests/powerpc/primitives/Makefile
+++ b/tools/testing/selftests/powerpc/primitives/Makefile
@@ -2,6 +2,7 @@ CFLAGS += -I$(CURDIR)
TEST_GEN_PROGS := load_unaligned_zeropad
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
index 28f5b781a553..923d531265f8 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -4,6 +4,7 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
perf-hwbreak
+top_srcdir = ../../../../..
include ../../lib.mk
all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/powerpc/signal/Makefile b/tools/testing/selftests/powerpc/signal/Makefile
index a7cbd5082e27..1fca25c6ace0 100644
--- a/tools/testing/selftests/powerpc/signal/Makefile
+++ b/tools/testing/selftests/powerpc/signal/Makefile
@@ -8,6 +8,7 @@ $(TEST_PROGS): ../harness.c ../utils.c signal.S
CFLAGS += -maltivec
signal_tm: CFLAGS += -mhtm
+top_srcdir = ../../../../..
include ../../lib.mk
clean:
diff --git a/tools/testing/selftests/powerpc/stringloops/Makefile b/tools/testing/selftests/powerpc/stringloops/Makefile
index 10b35c87a4f4..7fc0623d85c3 100644
--- a/tools/testing/selftests/powerpc/stringloops/Makefile
+++ b/tools/testing/selftests/powerpc/stringloops/Makefile
@@ -29,6 +29,7 @@ endif
ASFLAGS = $(CFLAGS)
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): $(EXTRA_SOURCES)
diff --git a/tools/testing/selftests/powerpc/switch_endian/Makefile b/tools/testing/selftests/powerpc/switch_endian/Makefile
index 30b8ff8fb82e..fcd2dcb8972b 100644
--- a/tools/testing/selftests/powerpc/switch_endian/Makefile
+++ b/tools/testing/selftests/powerpc/switch_endian/Makefile
@@ -5,6 +5,7 @@ ASFLAGS += -O2 -Wall -g -nostdlib -m64
EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
+top_srcdir = ../../../../..
include ../../lib.mk
$(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
diff --git a/tools/testing/selftests/powerpc/syscalls/Makefile b/tools/testing/selftests/powerpc/syscalls/Makefile
index da22ca7c38c1..161b8846336f 100644
--- a/tools/testing/selftests/powerpc/syscalls/Makefile
+++ b/tools/testing/selftests/powerpc/syscalls/Makefile
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := ipc_unmuxed
CFLAGS += -I../../../../../usr/include
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index c0e45d2dde25..9fc2cf6fbc92 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -6,6 +6,7 @@ TEST_GEN_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack
tm-vmxcopy tm-fork tm-tar tm-tmspr tm-vmx-unavail tm-unavailable tm-trap \
$(SIGNAL_CONTEXT_CHK_TESTS) tm-sigreturn
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/vphn/Makefile b/tools/testing/selftests/powerpc/vphn/Makefile
index f8ced26748f8..fb82068c9fda 100644
--- a/tools/testing/selftests/powerpc/vphn/Makefile
+++ b/tools/testing/selftests/powerpc/vphn/Makefile
@@ -2,6 +2,7 @@ TEST_GEN_PROGS := test-vphn
CFLAGS += -m64
+top_srcdir = ../../../../..
include ../../lib.mk
$(TEST_GEN_PROGS): ../harness.c
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 642d4e12abea..eec2663261f2 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -56,15 +56,13 @@ unsigned int yield_mod_cnt, nr_abort;
printf(fmt, ## __VA_ARGS__); \
} while (0)
-#if defined(__x86_64__) || defined(__i386__)
+#ifdef __i386__
#define INJECT_ASM_REG "eax"
#define RSEQ_INJECT_CLOBBER \
, INJECT_ASM_REG
-#ifdef __i386__
-
#define RSEQ_INJECT_ASM(n) \
"mov asm_loop_cnt_" #n ", %%" INJECT_ASM_REG "\n\t" \
"test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
@@ -76,9 +74,16 @@ unsigned int yield_mod_cnt, nr_abort;
#elif defined(__x86_64__)
+#define INJECT_ASM_REG_P "rax"
+#define INJECT_ASM_REG "eax"
+
+#define RSEQ_INJECT_CLOBBER \
+ , INJECT_ASM_REG_P \
+ , INJECT_ASM_REG
+
#define RSEQ_INJECT_ASM(n) \
- "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG "\n\t" \
- "mov (%%" INJECT_ASM_REG "), %%" INJECT_ASM_REG "\n\t" \
+ "lea asm_loop_cnt_" #n "(%%rip), %%" INJECT_ASM_REG_P "\n\t" \
+ "mov (%%" INJECT_ASM_REG_P "), %%" INJECT_ASM_REG "\n\t" \
"test %%" INJECT_ASM_REG ",%%" INJECT_ASM_REG "\n\t" \
"jz 333f\n\t" \
"222:\n\t" \
@@ -86,10 +91,6 @@ unsigned int yield_mod_cnt, nr_abort;
"jnz 222b\n\t" \
"333:\n\t"
-#else
-#error "Unsupported architecture"
-#endif
-
#elif defined(__s390__)
#define RSEQ_INJECT_INPUT \
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 9881876d2aa0..e94b7b14bcb2 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -26,10 +26,6 @@ TEST_PROGS := run_vmtests
include ../lib.mk
-$(OUTPUT)/userfaultfd: ../../../../usr/include/linux/kernel.h
$(OUTPUT)/userfaultfd: LDLIBS += -lpthread
$(OUTPUT)/mlock-random-test: LDLIBS += -lcap
-
-../../../../usr/include/linux/kernel.h:
- make -C ../../../.. headers_install
diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
index 235259011704..35edd61d1663 100644
--- a/tools/testing/selftests/x86/test_vdso.c
+++ b/tools/testing/selftests/x86/test_vdso.c
@@ -17,6 +17,7 @@
#include <errno.h>
#include <sched.h>
#include <stdbool.h>
+#include <limits.h>
#ifndef SYS_getcpu
# ifdef __x86_64__
@@ -31,6 +32,14 @@
int nerrs = 0;
+typedef int (*vgettime_t)(clockid_t, struct timespec *);
+
+vgettime_t vdso_clock_gettime;
+
+typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz);
+
+vgtod_t vdso_gettimeofday;
+
typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
getcpu_t vgetcpu;
@@ -95,6 +104,15 @@ static void fill_function_pointers()
printf("Warning: failed to find getcpu in vDSO\n");
vgetcpu = (getcpu_t) vsyscall_getcpu();
+
+ vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
+ if (!vdso_clock_gettime)
+ printf("Warning: failed to find clock_gettime in vDSO\n");
+
+ vdso_gettimeofday = (vgtod_t)dlsym(vdso, "__vdso_gettimeofday");
+ if (!vdso_gettimeofday)
+ printf("Warning: failed to find gettimeofday in vDSO\n");
+
}
static long sys_getcpu(unsigned * cpu, unsigned * node,
@@ -103,6 +121,16 @@ static long sys_getcpu(unsigned * cpu, unsigned * node,
return syscall(__NR_getcpu, cpu, node, cache);
}
+static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
+{
+ return syscall(__NR_clock_gettime, id, ts);
+}
+
+static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ return syscall(__NR_gettimeofday, tv, tz);
+}
+
static void test_getcpu(void)
{
printf("[RUN]\tTesting getcpu...\n");
@@ -155,10 +183,154 @@ static void test_getcpu(void)
}
}
+static bool ts_leq(const struct timespec *a, const struct timespec *b)
+{
+ if (a->tv_sec != b->tv_sec)
+ return a->tv_sec < b->tv_sec;
+ else
+ return a->tv_nsec <= b->tv_nsec;
+}
+
+static bool tv_leq(const struct timeval *a, const struct timeval *b)
+{
+ if (a->tv_sec != b->tv_sec)
+ return a->tv_sec < b->tv_sec;
+ else
+ return a->tv_usec <= b->tv_usec;
+}
+
+static char const * const clocknames[] = {
+ [0] = "CLOCK_REALTIME",
+ [1] = "CLOCK_MONOTONIC",
+ [2] = "CLOCK_PROCESS_CPUTIME_ID",
+ [3] = "CLOCK_THREAD_CPUTIME_ID",
+ [4] = "CLOCK_MONOTONIC_RAW",
+ [5] = "CLOCK_REALTIME_COARSE",
+ [6] = "CLOCK_MONOTONIC_COARSE",
+ [7] = "CLOCK_BOOTTIME",
+ [8] = "CLOCK_REALTIME_ALARM",
+ [9] = "CLOCK_BOOTTIME_ALARM",
+ [10] = "CLOCK_SGI_CYCLE",
+ [11] = "CLOCK_TAI",
+};
+
+static void test_one_clock_gettime(int clock, const char *name)
+{
+ struct timespec start, vdso, end;
+ int vdso_ret, end_ret;
+
+ printf("[RUN]\tTesting clock_gettime for clock %s (%d)...\n", name, clock);
+
+ if (sys_clock_gettime(clock, &start) < 0) {
+ if (errno == EINVAL) {
+ vdso_ret = vdso_clock_gettime(clock, &vdso);
+ if (vdso_ret == -EINVAL) {
+ printf("[OK]\tNo such clock.\n");
+ } else {
+ printf("[FAIL]\tNo such clock, but __vdso_clock_gettime returned %d\n", vdso_ret);
+ nerrs++;
+ }
+ } else {
+ printf("[WARN]\t clock_gettime(%d) syscall returned error %d\n", clock, errno);
+ }
+ return;
+ }
+
+ vdso_ret = vdso_clock_gettime(clock, &vdso);
+ end_ret = sys_clock_gettime(clock, &end);
+
+ if (vdso_ret != 0 || end_ret != 0) {
+ printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
+ vdso_ret, errno);
+ nerrs++;
+ return;
+ }
+
+ printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
+ (unsigned long long)start.tv_sec, start.tv_nsec,
+ (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
+ (unsigned long long)end.tv_sec, end.tv_nsec);
+
+ if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) {
+ printf("[FAIL]\tTimes are out of sequence\n");
+ nerrs++;
+ }
+}
+
+static void test_clock_gettime(void)
+{
+ for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
+ clock++) {
+ test_one_clock_gettime(clock, clocknames[clock]);
+ }
+
+ /* Also test some invalid clock ids */
+ test_one_clock_gettime(-1, "invalid");
+ test_one_clock_gettime(INT_MIN, "invalid");
+ test_one_clock_gettime(INT_MAX, "invalid");
+}
+
+static void test_gettimeofday(void)
+{
+ struct timeval start, vdso, end;
+ struct timezone sys_tz, vdso_tz;
+ int vdso_ret, end_ret;
+
+ if (!vdso_gettimeofday)
+ return;
+
+ printf("[RUN]\tTesting gettimeofday...\n");
+
+ if (sys_gettimeofday(&start, &sys_tz) < 0) {
+ printf("[FAIL]\tsys_gettimeofday failed (%d)\n", errno);
+ nerrs++;
+ return;
+ }
+
+ vdso_ret = vdso_gettimeofday(&vdso, &vdso_tz);
+ end_ret = sys_gettimeofday(&end, NULL);
+
+ if (vdso_ret != 0 || end_ret != 0) {
+ printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
+ vdso_ret, errno);
+ nerrs++;
+ return;
+ }
+
+ printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n",
+ (unsigned long long)start.tv_sec, start.tv_usec,
+ (unsigned long long)vdso.tv_sec, vdso.tv_usec,
+ (unsigned long long)end.tv_sec, end.tv_usec);
+
+ if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) {
+ printf("[FAIL]\tTimes are out of sequence\n");
+ nerrs++;
+ }
+
+ if (sys_tz.tz_minuteswest == vdso_tz.tz_minuteswest &&
+ sys_tz.tz_dsttime == vdso_tz.tz_dsttime) {
+ printf("[OK]\ttimezones match: minuteswest=%d, dsttime=%d\n",
+ sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
+ } else {
+ printf("[FAIL]\ttimezones do not match\n");
+ nerrs++;
+ }
+
+ /* And make sure that passing NULL for tz doesn't crash. */
+ vdso_gettimeofday(&vdso, NULL);
+}
+
int main(int argc, char **argv)
{
fill_function_pointers();
+ test_clock_gettime();
+ test_gettimeofday();
+
+ /*
+ * Test getcpu() last so that, if something goes wrong setting affinity,
+ * we still run the other tests.
+ */
test_getcpu();
return nerrs ? 1 : 0;
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index c92053bc3f96..150c8a69cdaf 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -496,7 +496,7 @@ static bool need_new_vmid_gen(struct kvm *kvm)
static void update_vttbr(struct kvm *kvm)
{
phys_addr_t pgd_phys;
- u64 vmid;
+ u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
bool new_gen;
read_lock(&kvm_vmid_lock);
@@ -546,7 +546,7 @@ static void update_vttbr(struct kvm *kvm)
pgd_phys = virt_to_phys(kvm->arch.pgd);
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
- kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
+ kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
write_unlock(&kvm_vmid_lock);
}
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 91aaf73b00df..ed162a6c57c5 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -1817,18 +1817,6 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *dat
return 0;
}
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
-{
- unsigned long end = hva + PAGE_SIZE;
-
- if (!kvm->arch.pgd)
- return 0;
-
- trace_kvm_unmap_hva(hva);
- handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
- return 0;
-}
-
int kvm_unmap_hva_range(struct kvm *kvm,
unsigned long start, unsigned long end)
{
@@ -1860,13 +1848,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
{
unsigned long end = hva + PAGE_SIZE;
+ kvm_pfn_t pfn = pte_pfn(pte);
pte_t stage2_pte;
if (!kvm->arch.pgd)
return;
trace_kvm_set_spte_hva(hva);
- stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
+
+ /*
+ * We've moved a page around, probably through CoW, so let's treat it
+ * just like a translation fault and clean the cache to the PoC.
+ */
+ clean_dcache_guest_page(pfn, PAGE_SIZE);
+ stage2_pte = pfn_pte(pfn, PAGE_S2);
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
}
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h
index e53b596f483b..57b3edebbb40 100644
--- a/virt/kvm/arm/trace.h
+++ b/virt/kvm/arm/trace.h
@@ -134,21 +134,6 @@ TRACE_EVENT(kvm_mmio_emulate,
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);
-TRACE_EVENT(kvm_unmap_hva,
- TP_PROTO(unsigned long hva),
- TP_ARGS(hva),
-
- TP_STRUCT__entry(
- __field( unsigned long, hva )
- ),
-
- TP_fast_assign(
- __entry->hva = hva;
- ),
-
- TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
-);
-
TRACE_EVENT(kvm_unmap_hva_range,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end),