summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acoutput.h3
-rw-r--r--include/acpi/acpi_bus.h8
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl.h4
-rw-r--r--include/acpi/actbl1.h2
-rw-r--r--include/acpi/actypes.h12
-rw-r--r--include/acpi/platform/aclinux.h5
-rw-r--r--include/asm-generic/futex.h8
-rw-r--r--include/asm-generic/hugetlb.h7
-rw-r--r--include/asm-generic/io.h7
-rw-r--r--include/asm-generic/mmiowb.h63
-rw-r--r--include/asm-generic/mmiowb_types.h12
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/asm-generic/rwsem.h140
-rw-r--r--include/asm-generic/sections.h14
-rw-r--r--include/asm-generic/syscall.h26
-rw-r--r--include/asm-generic/tlb.h297
-rw-r--r--include/asm-generic/vmlinux.lds.h1
-rw-r--r--include/crypto/aes.h8
-rw-r--r--include/crypto/akcipher.h54
-rw-r--r--include/crypto/cryptd.h18
-rw-r--r--include/crypto/des.h43
-rw-r--r--include/crypto/hash.h10
-rw-r--r--include/crypto/internal/simd.h44
-rw-r--r--include/crypto/morus1280_glue.h79
-rw-r--r--include/crypto/morus640_glue.h79
-rw-r--r--include/crypto/public_key.h4
-rw-r--r--include/crypto/streebog.h5
-rw-r--r--include/drm/drm_atomic.h6
-rw-r--r--include/drm/drm_audio_component.h7
-rw-r--r--include/drm/drm_auth.h6
-rw-r--r--include/drm/drm_bridge.h11
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/drm/drm_client.h2
-rw-r--r--include/drm/drm_connector.h136
-rw-r--r--include/drm/drm_crtc.h4
-rw-r--r--include/drm/drm_device.h3
-rw-r--r--include/drm/drm_drv.h16
-rw-r--r--include/drm/drm_dsc.h9
-rw-r--r--include/drm/drm_edid.h6
-rw-r--r--include/drm/drm_fb_helper.h48
-rw-r--r--include/drm/drm_file.h2
-rw-r--r--include/drm/drm_format_helper.h35
-rw-r--r--include/drm/drm_framebuffer.h1
-rw-r--r--include/drm/drm_gem.h32
-rw-r--r--include/drm/drm_gem_shmem_helper.h159
-rw-r--r--include/drm/drm_hdcp.h7
-rw-r--r--include/drm/drm_legacy.h2
-rw-r--r--include/drm/drm_modes.h17
-rw-r--r--include/drm/drm_modeset_helper_vtables.h11
-rw-r--r--include/drm/drm_print.h2
-rw-r--r--include/drm/drm_syncobj.h5
-rw-r--r--include/drm/drm_utils.h4
-rw-r--r--include/drm/drm_vma_manager.h12
-rw-r--r--include/drm/drm_writeback.h30
-rw-r--r--include/drm/i915_pciids.h217
-rw-r--r--include/drm/tinydrm/mipi-dbi.h32
-rw-r--r--include/drm/tinydrm/tinydrm-helpers.h21
-rw-r--r--include/drm/tinydrm/tinydrm.h75
-rw-r--r--include/drm/ttm/ttm_bo_driver.h3
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h30
-rw-r--r--include/dt-bindings/clock/exynos5410.h3
-rw-r--r--include/dt-bindings/clock/g12a-aoclkc.h2
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h5
-rw-r--r--include/dt-bindings/clock/imx7ulp-clock.h1
-rw-r--r--include/dt-bindings/clock/jz4725b-cgu.h1
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h6
-rw-r--r--include/dt-bindings/clock/mt8183-clk.h422
-rw-r--r--include/dt-bindings/clock/mt8516-clk.h211
-rw-r--r--include/dt-bindings/clock/qcom,gcc-qcs404.h5
-rw-r--r--include/dt-bindings/clock/qcom,turingcc-qcs404.h15
-rw-r--r--include/dt-bindings/clock/sifive-fu540-prci.h18
-rw-r--r--include/dt-bindings/clock/stm32fx-clock.h7
-rw-r--r--include/dt-bindings/clock/sun5i-ccu.h2
-rw-r--r--include/dt-bindings/iio/temperature/thermocouple.h16
-rw-r--r--include/dt-bindings/phy/phy-am654-serdes.h13
-rw-r--r--include/dt-bindings/pinctrl/stm32-pinfunc.h6
-rw-r--r--include/dt-bindings/reset/amlogic,meson-g12a-reset.h5
-rw-r--r--include/keys/trusted.h2
-rw-r--r--include/linux/acpi.h15
-rw-r--r--include/linux/acpi_iort.h8
-rw-r--r--include/linux/alcor_pci.h2
-rw-r--r--include/linux/amba/clcd.h31
-rw-r--r--include/linux/armada-37xx-rwtm-mailbox.h23
-rw-r--r--include/linux/atalk.h2
-rw-r--r--include/linux/audit.h75
-rw-r--r--include/linux/balloon_compaction.h15
-rw-r--r--include/linux/bio.h34
-rw-r--r--include/linux/bitrev.h46
-rw-r--r--include/linux/blk-mq-rdma.h1
-rw-r--r--include/linux/blk-mq.h6
-rw-r--r--include/linux/blk_types.h28
-rw-r--r--include/linux/blkdev.h46
-rw-r--r--include/linux/bpf-cgroup.h21
-rw-r--r--include/linux/bpf.h87
-rw-r--r--include/linux/bpf_types.h3
-rw-r--r--include/linux/bpf_verifier.h72
-rw-r--r--include/linux/brcmphy.h16
-rw-r--r--include/linux/bsg-lib.h16
-rw-r--r--include/linux/btf.h1
-rw-r--r--include/linux/bvec.h51
-rw-r--r--include/linux/ceph/libceph.h2
-rw-r--r--include/linux/cgroup-defs.h33
-rw-r--r--include/linux/cgroup.h43
-rw-r--r--include/linux/clk-provider.h112
-rw-r--r--include/linux/clk.h16
-rw-r--r--include/linux/clk/analogbits-wrpll-cln28hpc.h79
-rw-r--r--include/linux/clk/at91_pmc.h12
-rw-r--r--include/linux/clk/ti.h2
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/coresight-pmu.h2
-rw-r--r--include/linux/coresight.h7
-rw-r--r--include/linux/counter.h510
-rw-r--r--include/linux/counter_enum.h45
-rw-r--r--include/linux/cper.h336
-rw-r--r--include/linux/cpu.h46
-rw-r--r--include/linux/cpufreq.h14
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--include/linux/cred.h2
-rw-r--r--include/linux/dcache.h7
-rw-r--r--include/linux/device.h20
-rw-r--r--include/linux/dma-fence-chain.h81
-rw-r--r--include/linux/dma-fence.h21
-rw-r--r--include/linux/dma-mapping.h6
-rw-r--r--include/linux/dma-noncoherent.h6
-rw-r--r--include/linux/dma/idma64.h14
-rw-r--r--include/linux/dmi.h8
-rw-r--r--include/linux/dsa/8021q.h76
-rw-r--r--include/linux/dsa/sja1105.h40
-rw-r--r--include/linux/dynamic_debug.h11
-rw-r--r--include/linux/efi.h7
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/etherdevice.h14
-rw-r--r--include/linux/f2fs_fs.h11
-rw-r--r--include/linux/filter.h34
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h19
-rw-r--r--include/linux/fs.h24
-rw-r--r--include/linux/fs_context.h38
-rw-r--r--include/linux/fscrypt.h85
-rw-r--r--include/linux/fsl/ftm.h88
-rw-r--r--include/linux/fsnotify.h41
-rw-r--r--include/linux/fsnotify_backend.h10
-rw-r--r--include/linux/ftrace.h18
-rw-r--r--include/linux/genhd.h20
-rw-r--r--include/linux/genl_magic_func.h4
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/linux/gpio/driver.h18
-rw-r--r--include/linux/gpio/machine.h26
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/hmm.h310
-rw-r--r--include/linux/huge_mm.h6
-rw-r--r--include/linux/hugetlb.h12
-rw-r--r--include/linux/hwmon.h18
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/i2c-algo-bit.h1
-rw-r--r--include/linux/i2c.h41
-rw-r--r--include/linux/ieee80211.h14
-rw-r--r--include/linux/if_bridge.h3
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h1
-rw-r--r--include/linux/iio/driver.h1
-rw-r--r--include/linux/iio/frequency/ad9523.h8
-rw-r--r--include/linux/iio/gyro/itg3200.h1
-rw-r--r--include/linux/iio/iio.h4
-rw-r--r--include/linux/iio/imu/adis.h14
-rw-r--r--include/linux/iio/timer/stm32-timer-trigger.h11
-rw-r--r--include/linux/ima.h2
-rw-r--r--include/linux/inetdevice.h14
-rw-r--r--include/linux/intel-iommu.h13
-rw-r--r--include/linux/intel-ish-client-if.h112
-rw-r--r--include/linux/interrupt.h25
-rw-r--r--include/linux/io-pgtable.h7
-rw-r--r--include/linux/iomap.h22
-rw-r--r--include/linux/iommu.h144
-rw-r--r--include/linux/iova.h16
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/irqchip/arm-gic.h3
-rw-r--r--include/linux/jbd2.h1
-rw-r--r--include/linux/jiffies.h1
-rw-r--r--include/linux/jump_label_ratelimit.h64
-rw-r--r--include/linux/kcore.h13
-rw-r--r--include/linux/kernel.h18
-rw-r--r--include/linux/kernfs.h15
-rw-r--r--include/linux/kobject.h3
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--include/linux/kvm_host.h10
-rw-r--r--include/linux/list.h20
-rw-r--r--include/linux/livepatch.h3
-rw-r--r--include/linux/lockd/bind.h1
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lockdep.h11
-rw-r--r--include/linux/lsm_hooks.h189
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/math64.h13
-rw-r--r--include/linux/mdev.h35
-rw-r--r--include/linux/mdio.h3
-rw-r--r--include/linux/mei_cl_bus.h3
-rw-r--r--include/linux/memblock.h44
-rw-r--r--include/linux/memcontrol.h39
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/memory_hotplug.h42
-rw-r--r--include/linux/mfd/altera-sysmgr.h29
-rw-r--r--include/linux/mfd/cros_ec.h5
-rw-r--r--include/linux/mfd/cros_ec_commands.h91
-rw-r--r--include/linux/mfd/da9063/core.h7
-rw-r--r--include/linux/mfd/da9063/registers.h13
-rw-r--r--include/linux/mfd/max77620.h5
-rw-r--r--include/linux/mfd/max77650.h59
-rw-r--r--include/linux/mfd/palmas.h1
-rw-r--r--include/linux/mfd/stmfx.h123
-rw-r--r--include/linux/mfd/syscon/atmel-matrix.h6
-rw-r--r--include/linux/mfd/syscon/atmel-mc.h6
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h5
-rw-r--r--include/linux/mfd/syscon/atmel-st.h6
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h9
-rw-r--r--include/linux/mfd/ti-lmu-register.h44
-rw-r--r--include/linux/mfd/ti-lmu.h1
-rw-r--r--include/linux/mfd/wm831x/regulator.h2
-rw-r--r--include/linux/mfd/wm8400-private.h8
-rw-r--r--include/linux/mii.h2
-rw-r--r--include/linux/mlx5/cq.h2
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/doorbell.h39
-rw-r--r--include/linux/mlx5/driver.h16
-rw-r--r--include/linux/mlx5/eswitch.h2
-rw-r--r--include/linux/mlx5/fs.h7
-rw-r--r--include/linux/mlx5/mlx5_ifc.h183
-rw-r--r--include/linux/mlx5/port.h1
-rw-r--r--include/linux/mlx5/qp.h4
-rw-r--r--include/linux/mlx5/transobj.h3
-rw-r--r--include/linux/mlx5/vport.h4
-rw-r--r--include/linux/mm.h147
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmc/sdio_func.h12
-rw-r--r--include/linux/mmc/sdio_ids.h2
-rw-r--r--include/linux/mmu_notifier.h63
-rw-r--r--include/linux/mmzone.h5
-rw-r--r--include/linux/module.h12
-rw-r--r--include/linux/moduleparam.h12
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/msi.h18
-rw-r--r--include/linux/mtd/bbm.h14
-rw-r--r--include/linux/mtd/nand.h32
-rw-r--r--include/linux/mtd/nand_bch.h6
-rw-r--r--include/linux/mtd/onenand.h3
-rw-r--r--include/linux/mtd/rawnand.h122
-rw-r--r--include/linux/mtd/spinand.h7
-rw-r--r--include/linux/net.h8
-rw-r--r--include/linux/netdevice.h98
-rw-r--r--include/linux/netfilter.h18
-rw-r--r--include/linux/netfilter/ipset/ip_set.h11
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h11
-rw-r--r--include/linux/netfilter/x_tables.h1
-rw-r--r--include/linux/netfilter_ipv6.h15
-rw-r--r--include/linux/nfs_fs.h1
-rw-r--r--include/linux/nfs_fs_sb.h13
-rw-r--r--include/linux/nfs_page.h12
-rw-r--r--include/linux/node.h71
-rw-r--r--include/linux/nvme-fc-driver.h6
-rw-r--r--include/linux/nvme-rdma.h2
-rw-r--r--include/linux/nvme.h9
-rw-r--r--include/linux/nvmem-consumer.h7
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/oid_registry.h18
-rw-r--r--include/linux/overflow.h12
-rw-r--r--include/linux/packing.h49
-rw-r--r--include/linux/page-isolation.h10
-rw-r--r--include/linux/pagemap.h26
-rw-r--r--include/linux/parport.h13
-rw-r--r--include/linux/pci-ecam.h1
-rw-r--r--include/linux/pci-epc.h2
-rw-r--r--include/linux/pci-epf.h3
-rw-r--r--include/linux/pci.h43
-rw-r--r--include/linux/pci_hotplug.h66
-rw-r--r--include/linux/percpu.h12
-rw-r--r--include/linux/perf_event.h20
-rw-r--r--include/linux/phy.h14
-rw-r--r--include/linux/phy/phy.h2
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/pipe_fs_i.h11
-rw-r--r--include/linux/platform_data/ads7828.h2
-rw-r--r--include/linux/platform_data/ds620.h2
-rw-r--r--include/linux/platform_data/elm.h2
-rw-r--r--include/linux/platform_data/gpio-omap.h2
-rw-r--r--include/linux/platform_data/gpio/gpio-amd-fch.h2
-rw-r--r--include/linux/platform_data/ina2xx.h2
-rw-r--r--include/linux/platform_data/macb.h9
-rw-r--r--include/linux/platform_data/max197.h2
-rw-r--r--include/linux/platform_data/mv88e6xxx.h1
-rw-r--r--include/linux/platform_data/ntc_thermistor.h2
-rw-r--r--include/linux/platform_data/pca954x.h48
-rw-r--r--include/linux/platform_data/spi-ep93xx.h4
-rw-r--r--include/linux/platform_data/wilco-ec.h22
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h1
-rw-r--r--include/linux/platform_data/x86/clk-pmc-atom.h3
-rw-r--r--include/linux/platform_data/xilinx-ll-temac.h32
-rw-r--r--include/linux/pm_domain.h22
-rw-r--r--include/linux/pm_opp.h8
-rw-r--r--include/linux/printk.h4
-rw-r--r--include/linux/property.h18
-rw-r--r--include/linux/psp-sev.h3
-rw-r--r--include/linux/ptrace.h11
-rw-r--r--include/linux/pwm.h5
-rw-r--r--include/linux/qed/qed_if.h2
-rw-r--r--include/linux/random.h1
-rw-r--r--include/linux/rcupdate.h6
-rw-r--r--include/linux/rcuwait.h2
-rw-r--r--include/linux/regulator/consumer.h5
-rw-r--r--include/linux/reservation.h3
-rw-r--r--include/linux/reset.h113
-rw-r--r--include/linux/rhashtable-types.h2
-rw-r--r--include/linux/rhashtable.h358
-rw-r--r--include/linux/rtc.h6
-rw-r--r--include/linux/rtc/ds1685.h2
-rw-r--r--include/linux/rwsem-spinlock.h47
-rw-r--r--include/linux/rwsem.h37
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/scatterlist.h10
-rw-r--r--include/linux/sched.h6
-rw-r--r--include/linux/sched/jobctl.h2
-rw-r--r--include/linux/sched/mm.h21
-rw-r--r--include/linux/sched/signal.h18
-rw-r--r--include/linux/sched/task.h1
-rw-r--r--include/linux/sched/topology.h4
-rw-r--r--include/linux/sched/user.h7
-rw-r--r--include/linux/security.h16
-rw-r--r--include/linux/sed-opal.h10
-rw-r--r--include/linux/selection.h7
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/set_memory.h11
-rw-r--r--include/linux/shmem_fs.h1
-rw-r--r--include/linux/siphash.h5
-rw-r--r--include/linux/skbuff.h48
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/smpboot.h2
-rw-r--r--include/linux/socket.h12
-rw-r--r--include/linux/soundwire/sdw.h16
-rw-r--r--include/linux/soundwire/sdw_intel.h6
-rw-r--r--include/linux/soundwire/sdw_registers.h5
-rw-r--r--include/linux/soundwire/sdw_type.h6
-rw-r--r--include/linux/spi/pxa2xx_spi.h1
-rw-r--r--include/linux/spi/spi-mem.h12
-rw-r--r--include/linux/spi/spi.h24
-rw-r--r--include/linux/spi/spi_bitbang.h1
-rw-r--r--include/linux/spinlock.h11
-rw-r--r--include/linux/srcu.h36
-rw-r--r--include/linux/stackdepot.h8
-rw-r--r--include/linux/stacktrace.h81
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/sched.h28
-rw-r--r--include/linux/sunrpc/xprt.h6
-rw-r--r--include/linux/suspend.h3
-rw-r--r--include/linux/switchtec.h2
-rw-r--r--include/linux/syscalls.h9
-rw-r--r--include/linux/thunderbolt.h8
-rw-r--r--include/linux/tick.h13
-rw-r--r--include/linux/time64.h21
-rw-r--r--include/linux/tracepoint-defs.h1
-rw-r--r--include/linux/types.h5
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/uio.h26
-rw-r--r--include/linux/unicode.h30
-rw-r--r--include/linux/uprobes.h5
-rw-r--r--include/linux/usb.h4
-rw-r--r--include/linux/usb/ch9.h8
-rw-r--r--include/linux/usb/hcd.h6
-rw-r--r--include/linux/usb/serial.h8
-rw-r--r--include/linux/usb/tcpm.h13
-rw-r--r--include/linux/usb/typec_dp.h5
-rw-r--r--include/linux/userfaultfd_k.h2
-rw-r--r--include/linux/vbox_utils.h12
-rw-r--r--include/linux/virtio_ring.h2
-rw-r--r--include/linux/vmalloc.h15
-rw-r--r--include/linux/vmstat.h2
-rw-r--r--include/linux/vmw_vmci_defs.h35
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/media/cec-notifier.h19
-rw-r--r--include/media/dvb-usb-ids.h1
-rw-r--r--include/media/fwht-ctrls.h31
-rw-r--r--include/media/media-dev-allocator.h63
-rw-r--r--include/media/media-entity.h24
-rw-r--r--include/media/media-request.h4
-rw-r--r--include/media/rc-map.h4
-rw-r--r--include/media/v4l2-common.h33
-rw-r--r--include/media/v4l2-ctrls.h11
-rw-r--r--include/media/v4l2-subdev.h15
-rw-r--r--include/media/videobuf2-core.h25
-rw-r--r--include/media/vsp1.h19
-rw-r--r--include/misc/charlcd.h1
-rw-r--r--include/misc/ocxl.h359
-rw-r--r--include/net/act_api.h9
-rw-r--r--include/net/addrconf.h55
-rw-r--r--include/net/af_rxrpc.h4
-rw-r--r--include/net/arp.h8
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/net/bluetooth/hci_core.h3
-rw-r--r--include/net/bpf_sk_storage.h13
-rw-r--r--include/net/cfg80211.h109
-rw-r--r--include/net/compat.h3
-rw-r--r--include/net/devlink.h534
-rw-r--r--include/net/dsa.h208
-rw-r--r--include/net/dst.h11
-rw-r--r--include/net/fib_notifier.h3
-rw-r--r--include/net/flow_dissector.h7
-rw-r--r--include/net/flow_offload.h23
-rw-r--r--include/net/fq_impl.h18
-rw-r--r--include/net/genetlink.h34
-rw-r--r--include/net/geneve.h2
-rw-r--r--include/net/ife.h1
-rw-r--r--include/net/ip.h6
-rw-r--r--include/net/ip6_fib.h62
-rw-r--r--include/net/ip6_route.h20
-rw-r--r--include/net/ip_fib.h124
-rw-r--r--include/net/ip_vs.h5
-rw-r--r--include/net/ipv6_frag.h1
-rw-r--r--include/net/ipv6_stubs.h68
-rw-r--r--include/net/lwtunnel.h7
-rw-r--r--include/net/mac80211.h93
-rw-r--r--include/net/ndisc.h40
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netfilter/ipv4/nf_nat_masquerade.h15
-rw-r--r--include/net/netfilter/ipv6/nf_nat_masquerade.h11
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h2
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h24
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h6
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h15
-rw-r--r--include/net/netfilter/nf_nat.h7
-rw-r--r--include/net/netfilter/nf_nat_masquerade.h19
-rw-r--r--include/net/netfilter/nf_queue.h3
-rw-r--r--include/net/netfilter/nf_tables.h24
-rw-r--r--include/net/netlink.h372
-rw-r--r--include/net/netns/conntrack.h6
-rw-r--r--include/net/netns/hash.h10
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/netns/ipv6.h5
-rw-r--r--include/net/netrom.h2
-rw-r--r--include/net/nfc/nci_core.h2
-rw-r--r--include/net/pkt_cls.h33
-rw-r--r--include/net/psample.h1
-rw-r--r--include/net/request_sock.h10
-rw-r--r--include/net/route.h43
-rw-r--r--include/net/rtnh.h (renamed from include/net/nexthop.h)4
-rw-r--r--include/net/sch_generic.h145
-rw-r--r--include/net/sctp/checksum.h2
-rw-r--r--include/net/sctp/command.h1
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/sctp/ulpqueue.h2
-rw-r--r--include/net/sock.h39
-rw-r--r--include/net/tc_act/tc_gact.h2
-rw-r--r--include/net/tc_act/tc_ife.h3
-rw-r--r--include/net/tc_act/tc_police.h70
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/net/tls.h40
-rw-r--r--include/net/udp.h6
-rw-r--r--include/net/udp_tunnel.h2
-rw-r--r--include/net/vxlan.h2
-rw-r--r--include/net/xdp_sock.h1
-rw-r--r--include/net/xfrm.h138
-rw-r--r--include/rdma/ib_cache.h4
-rw-r--r--include/rdma/ib_mad.h4
-rw-r--r--include/rdma/ib_smi.h2
-rw-r--r--include/rdma/ib_umem.h12
-rw-r--r--include/rdma/ib_umem_odp.h1
-rw-r--r--include/rdma/ib_verbs.h430
-rw-r--r--include/rdma/iw_cm.h25
-rw-r--r--include/rdma/opa_port_info.h2
-rw-r--r--include/rdma/opa_smi.h4
-rw-r--r--include/rdma/rdma_vt.h78
-rw-r--r--include/rdma/rdmavt_qp.h89
-rw-r--r--include/rdma/uverbs_std_types.h42
-rw-r--r--include/rdma/uverbs_types.h18
-rw-r--r--include/scsi/libsas.h13
-rw-r--r--include/scsi/osd_attributes.h398
-rw-r--r--include/scsi/osd_protocol.h676
-rw-r--r--include/scsi/osd_sec.h45
-rw-r--r--include/scsi/osd_sense.h263
-rw-r--r--include/scsi/osd_types.h45
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/scsi/scsi_transport_fc.h18
-rw-r--r--include/soc/at91/atmel-sfr.h34
-rw-r--r--include/soc/rockchip/rk3399_grf.h21
-rw-r--r--include/soc/rockchip/rockchip_sip.h1
-rw-r--r--include/sound/core.h16
-rw-r--r--include/sound/da7219.h8
-rw-r--r--include/sound/hdaudio.h9
-rw-r--r--include/sound/memalloc.h4
-rw-r--r--include/sound/seq_kernel.h3
-rw-r--r--include/sound/simple_card_utils.h238
-rw-r--r--include/sound/soc.h11
-rw-r--r--include/sound/sof.h100
-rw-r--r--include/sound/sof/control.h158
-rw-r--r--include/sound/sof/dai-intel.h178
-rw-r--r--include/sound/sof/dai.h75
-rw-r--r--include/sound/sof/header.h158
-rw-r--r--include/sound/sof/info.h118
-rw-r--r--include/sound/sof/pm.h48
-rw-r--r--include/sound/sof/stream.h148
-rw-r--r--include/sound/sof/topology.h256
-rw-r--r--include/sound/sof/trace.h67
-rw-r--r--include/sound/sof/xtensa.h44
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/target/target_core_base.h4
-rw-r--r--include/target/target_core_fabric.h1
-rw-r--r--include/trace/bpf_probe.h27
-rw-r--r--include/trace/events/afs.h385
-rw-r--r--include/trace/events/bpf_test_run.h50
-rw-r--r--include/trace/events/btrfs.h245
-rw-r--r--include/trace/events/cgroup.h55
-rw-r--r--include/trace/events/compaction.h10
-rw-r--r--include/trace/events/cpuhp.h4
-rw-r--r--include/trace/events/devfreq.h40
-rw-r--r--include/trace/events/f2fs.h57
-rw-r--r--include/trace/events/fib.h44
-rw-r--r--include/trace/events/fib6.h16
-rw-r--r--include/trace/events/gpio.h4
-rw-r--r--include/trace/events/ib_mad.h390
-rw-r--r--include/trace/events/ib_umad.h126
-rw-r--r--include/trace/events/mlxsw.h2
-rw-r--r--include/trace/events/nbd.h107
-rw-r--r--include/trace/events/net.h23
-rw-r--r--include/trace/events/preemptirq.h2
-rw-r--r--include/trace/events/random.h13
-rw-r--r--include/trace/events/rcu.h4
-rw-r--r--include/trace/events/rpcrdma.h27
-rw-r--r--include/trace/events/spi.h10
-rw-r--r--include/trace/events/sunrpc.h10
-rw-r--r--include/trace/events/syscalls.h2
-rw-r--r--include/trace/events/timer.h17
-rw-r--r--include/trace/events/vmscan.h102
-rw-r--r--include/trace/events/workqueue.h4
-rw-r--r--include/trace/events/writeback.h16
-rw-r--r--include/trace/events/xen.h2
-rw-r--r--include/uapi/asm-generic/sockios.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h43
-rw-r--r--include/uapi/drm/drm.h37
-rw-r--r--include/uapi/drm/drm_fourcc.h51
-rw-r--r--include/uapi/drm/drm_mode.h4
-rw-r--r--include/uapi/drm/i915_drm.h254
-rw-r--r--include/uapi/drm/lima_drm.h169
-rw-r--r--include/uapi/drm/msm_drm.h14
-rw-r--r--include/uapi/drm/panfrost_drm.h142
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/aspeed-p2a-ctrl.h62
-rw-r--r--include/uapi/linux/audit.h14
-rw-r--r--include/uapi/linux/batadv_packet.h12
-rw-r--r--include/uapi/linux/batman_adv.h25
-rw-r--r--include/uapi/linux/bpf.h550
-rw-r--r--include/uapi/linux/btf.h32
-rw-r--r--include/uapi/linux/btrfs_tree.h2
-rw-r--r--include/uapi/linux/elf-em.h6
-rw-r--r--include/uapi/linux/ethtool.h13
-rw-r--r--include/uapi/linux/fcntl.h2
-rw-r--r--include/uapi/linux/fou.h6
-rw-r--r--include/uapi/linux/fs.h3
-rw-r--r--include/uapi/linux/fuse.h22
-rw-r--r--include/uapi/linux/icmpv6.h4
-rw-r--r--include/uapi/linux/if_ether.h1
-rw-r--r--include/uapi/linux/if_tun.h1
-rw-r--r--include/uapi/linux/if_vlan.h9
-rw-r--r--include/uapi/linux/input-event-codes.h7
-rw-r--r--include/uapi/linux/io_uring.h5
-rw-r--r--include/uapi/linux/ip_vs.h11
-rw-r--r--include/uapi/linux/kfd_ioctl.h12
-rw-r--r--include/uapi/linux/lirc.h2
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/media.h25
-rw-r--r--include/uapi/linux/mei.h67
-rw-r--r--include/uapi/linux/mount.h62
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h12
-rw-r--r--include/uapi/linux/nfs_mount.h9
-rw-r--r--include/uapi/linux/nl80211.h86
-rw-r--r--include/uapi/linux/openvswitch.h46
-rw-r--r--include/uapi/linux/pci_regs.h138
-rw-r--r--include/uapi/linux/pkt_sched.h13
-rw-r--r--include/uapi/linux/psci.h7
-rw-r--r--include/uapi/linux/psp-sev.h18
-rw-r--r--include/uapi/linux/sched.h1
-rw-r--r--include/uapi/linux/sed-opal.h11
-rw-r--r--include/uapi/linux/serial_core.h6
-rw-r--r--include/uapi/linux/sockios.h21
-rw-r--r--include/uapi/linux/spi/spidev.h6
-rw-r--r--include/uapi/linux/switchtec_ioctl.h13
-rw-r--r--include/uapi/linux/tcp.h27
-rw-r--r--include/uapi/linux/tipc.h1
-rw-r--r--include/uapi/linux/tipc_netlink.h2
-rw-r--r--include/uapi/linux/tls.h15
-rw-r--r--include/uapi/linux/v4l2-controls.h8
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h60
-rw-r--r--include/uapi/linux/vfio.h4
-rw-r--r--include/uapi/linux/vfio_ccw.h12
-rw-r--r--include/uapi/linux/videodev2.h17
-rw-r--r--include/uapi/linux/virtio_gpu.h12
-rw-r--r--include/uapi/misc/habanalabs.h161
-rw-r--r--include/uapi/rdma/efa-abi.h101
-rw-r--r--include/uapi/rdma/mlx5-abi.h3
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h2
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h7
-rw-r--r--include/uapi/rdma/rdma_netlink.h31
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h1
-rw-r--r--include/uapi/scsi/fc/fc_els.h33
-rw-r--r--include/uapi/sound/asound.h1
-rw-r--r--include/uapi/sound/sof/abi.h62
-rw-r--r--include/uapi/sound/sof/eq.h172
-rw-r--r--include/uapi/sound/sof/fw.h78
-rw-r--r--include/uapi/sound/sof/header.h27
-rw-r--r--include/uapi/sound/sof/manifest.h188
-rw-r--r--include/uapi/sound/sof/tokens.h107
-rw-r--r--include/uapi/sound/sof/tone.h21
-rw-r--r--include/uapi/sound/sof/trace.h66
-rw-r--r--include/video/udlfb.h7
-rw-r--r--include/xen/xen.h4
615 files changed, 16034 insertions, 5936 deletions
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index 30b1ae53689f..c50542dc71e0 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -150,7 +150,10 @@
/* Defaults for debug_level, debug and normal */
+#ifndef ACPI_DEBUG_DEFAULT
#define ACPI_DEBUG_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
+#endif
+
#define ACPI_NORMAL_DEFAULT (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
#define ACPI_DEBUG_ALL (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 0300374101cd..2a462cf4eaa9 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -91,8 +91,8 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
-const char *
-acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv);
+struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
#ifdef CONFIG_ACPI
@@ -687,6 +687,10 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
+static inline void acpi_dev_put(struct acpi_device *adev)
+{
+ put_device(&adev->dev);
+}
#else /* CONFIG_ACPI */
static inline int register_acpi_bus_type(void *bus) { return 0; }
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 24dbb4e742a6..3b1b1d0e4c33 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20190215
+#define ACPI_CA_VERSION 0x20190405
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index 65cc9cbf1141..d568128025df 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -66,14 +66,14 @@
******************************************************************************/
struct acpi_table_header {
- char signature[ACPI_NAME_SIZE]; /* ASCII table signature */
+ char signature[ACPI_NAMESEG_SIZE]; /* ASCII table signature */
u32 length; /* Length of table in bytes, including this header */
u8 revision; /* ACPI Specification minor version number */
u8 checksum; /* To make sum of entire table == 0 */
char oem_id[ACPI_OEM_ID_SIZE]; /* ASCII OEM identification */
char oem_table_id[ACPI_OEM_TABLE_ID_SIZE]; /* ASCII OEM table identification */
u32 oem_revision; /* OEM revision number */
- char asl_compiler_id[ACPI_NAME_SIZE]; /* ASCII ASL compiler vendor ID */
+ char asl_compiler_id[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */
u32 asl_compiler_revision; /* ASL compiler version */
};
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index d14037ddf108..22c039ebc6c5 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -1395,7 +1395,7 @@ struct acpi_table_hmat {
/* Values for HMAT structure types */
enum acpi_hmat_type {
- ACPI_HMAT_TYPE_ADDRESS_RANGE = 0, /* Memory subsystem address range */
+ ACPI_HMAT_TYPE_PROXIMITY = 0, /* Memory proximity domain attributes */
ACPI_HMAT_TYPE_LOCALITY = 1, /* System locality latency and bandwidth information */
ACPI_HMAT_TYPE_CACHE = 2, /* Memory side cache information */
ACPI_HMAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index f73382e82c26..ad6892a24015 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -375,7 +375,7 @@ typedef u64 acpi_physical_address;
/* Names within the namespace are 4 bytes long */
-#define ACPI_NAME_SIZE 4
+#define ACPI_NAMESEG_SIZE 4 /* Fixed by ACPI spec */
#define ACPI_PATH_SEGMENT_LENGTH 5 /* 4 chars for name + 1 char for separator */
#define ACPI_PATH_SEPARATOR '.'
@@ -515,11 +515,11 @@ typedef u64 acpi_integer;
/* Optimizations for 4-character (32-bit) acpi_name manipulation */
#ifndef ACPI_MISALIGNMENT_NOT_SUPPORTED
-#define ACPI_COMPARE_NAME(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
-#define ACPI_MOVE_NAME(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src)))
+#define ACPI_COMPARE_NAMESEG(a,b) (*ACPI_CAST_PTR (u32, (a)) == *ACPI_CAST_PTR (u32, (b)))
+#define ACPI_COPY_NAMESEG(dest,src) (*ACPI_CAST_PTR (u32, (dest)) = *ACPI_CAST_PTR (u32, (src)))
#else
-#define ACPI_COMPARE_NAME(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAME_SIZE))
-#define ACPI_MOVE_NAME(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAME_SIZE))
+#define ACPI_COMPARE_NAMESEG(a,b) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_CAST_PTR (char, (b)), ACPI_NAMESEG_SIZE))
+#define ACPI_COPY_NAMESEG(dest,src) (strncpy (ACPI_CAST_PTR (char, (dest)), ACPI_CAST_PTR (char, (src)), ACPI_NAMESEG_SIZE))
#endif
/* Support for the special RSDP signature (8 characters) */
@@ -529,7 +529,7 @@ typedef u64 acpi_integer;
/* Support for OEMx signature (x can be any character) */
#define ACPI_IS_OEM_SIG(a) (!strncmp (ACPI_CAST_PTR (char, (a)), ACPI_OEM_NAME, 3) &&\
- strnlen (a, ACPI_NAME_SIZE) == ACPI_NAME_SIZE)
+ strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE)
/*
* Algorithm to obtain access bit width.
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 9ff328fd946a..624b90b34085 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -82,6 +82,11 @@
#define ACPI_NO_ERROR_MESSAGES
#undef ACPI_DEBUG_OUTPUT
+/* Use a specific bugging default separate from ACPICA */
+
+#undef ACPI_DEBUG_DEFAULT
+#define ACPI_DEBUG_DEFAULT (ACPI_LV_INFO | ACPI_LV_REPAIR)
+
/* External interface for __KERNEL__, stub is needed */
#define ACPI_EXTERNAL_RETURN_STATUS(prototype) \
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index fcb61b4659b3..8666fe7f35d7 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -23,7 +23,9 @@
*
* Return:
* 0 - On success
- * <0 - On error
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Operation not supported
*/
static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
@@ -85,7 +87,9 @@ out_pagefault_enable:
*
* Return:
* 0 - On success
- * <0 - On error
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 71d7b77eea50..822f433ac95c 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -126,4 +126,11 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
}
#endif
+#ifndef __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
+static inline bool gigantic_page_runtime_supported(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_GIGANTIC_PAGE);
+}
+#endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */
+
#endif /* _ASM_GENERIC_HUGETLB_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 303871651f8a..8f3bf95a36d1 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -19,12 +19,9 @@
#include <asm-generic/iomap.h>
#endif
+#include <asm/mmiowb.h>
#include <asm-generic/pci_iomap.h>
-#ifndef mmiowb
-#define mmiowb() do {} while (0)
-#endif
-
#ifndef __io_br
#define __io_br() barrier()
#endif
@@ -49,7 +46,7 @@
/* serialize device access against a spin_unlock, usually handled there. */
#ifndef __io_aw
-#define __io_aw() barrier()
+#define __io_aw() mmiowb_set_pending()
#endif
#ifndef __io_pbw
diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h
new file mode 100644
index 000000000000..9439ff037b2d
--- /dev/null
+++ b/include/asm-generic/mmiowb.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_H
+#define __ASM_GENERIC_MMIOWB_H
+
+/*
+ * Generic implementation of mmiowb() tracking for spinlocks.
+ *
+ * If your architecture doesn't ensure that writes to an I/O peripheral
+ * within two spinlocked sections on two different CPUs are seen by the
+ * peripheral in the order corresponding to the lock handover, then you
+ * need to follow these FIVE easy steps:
+ *
+ * 1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy)
+ * in asm/mmiowb.h, then #include this file
+ * 2. Ensure your I/O write accessors call mmiowb_set_pending()
+ * 3. Select ARCH_HAS_MMIOWB
+ * 4. Untangle the resulting mess of header files
+ * 5. Complain to your architects
+ */
+#ifdef CONFIG_MMIOWB
+
+#include <linux/compiler.h>
+#include <asm-generic/mmiowb_types.h>
+
+#ifndef arch_mmiowb_state
+#include <asm/percpu.h>
+#include <asm/smp.h>
+
+DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
+#define __mmiowb_state() this_cpu_ptr(&__mmiowb_state)
+#else
+#define __mmiowb_state() arch_mmiowb_state()
+#endif /* arch_mmiowb_state */
+
+static inline void mmiowb_set_pending(void)
+{
+ struct mmiowb_state *ms = __mmiowb_state();
+ ms->mmiowb_pending = ms->nesting_count;
+}
+
+static inline void mmiowb_spin_lock(void)
+{
+ struct mmiowb_state *ms = __mmiowb_state();
+ ms->nesting_count++;
+}
+
+static inline void mmiowb_spin_unlock(void)
+{
+ struct mmiowb_state *ms = __mmiowb_state();
+
+ if (unlikely(ms->mmiowb_pending)) {
+ ms->mmiowb_pending = 0;
+ mmiowb();
+ }
+
+ ms->nesting_count--;
+}
+#else
+#define mmiowb_set_pending() do { } while (0)
+#define mmiowb_spin_lock() do { } while (0)
+#define mmiowb_spin_unlock() do { } while (0)
+#endif /* CONFIG_MMIOWB */
+#endif /* __ASM_GENERIC_MMIOWB_H */
diff --git a/include/asm-generic/mmiowb_types.h b/include/asm-generic/mmiowb_types.h
new file mode 100644
index 000000000000..8eb0095655e7
--- /dev/null
+++ b/include/asm-generic/mmiowb_types.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_TYPES_H
+#define __ASM_GENERIC_MMIOWB_TYPES_H
+
+#include <linux/types.h>
+
+struct mmiowb_state {
+ u16 nesting_count;
+ u16 mmiowb_pending;
+};
+
+#endif /* __ASM_GENERIC_MMIOWB_TYPES_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index fa782fba51ee..75d9d68a6de7 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1126,6 +1126,8 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
static inline void init_espfix_bsp(void) { }
#endif
+extern void __init pgd_cache_init(void);
+
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
{
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
deleted file mode 100644
index 93e67a055a4d..000000000000
--- a/include/asm-generic/rwsem.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_RWSEM_H
-#define _ASM_GENERIC_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#ifdef __KERNEL__
-
-/*
- * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
- * Adapted largely from include/asm-i386/rwsem.h
- * by Paul Mackerras <paulus@samba.org>.
- */
-
-/*
- * the semaphore definition
- */
-#ifdef CONFIG_64BIT
-# define RWSEM_ACTIVE_MASK 0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK 0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000L
-#define RWSEM_ACTIVE_BIAS 0x00000001L
-#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
- if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
- rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
- if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
- if (IS_ERR(rwsem_down_read_failed_killable(sem)))
- return -EINTR;
- }
-
- return 0;
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
- long tmp;
-
- while ((tmp = atomic_long_read(&sem->count)) >= 0) {
- if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
- tmp + RWSEM_ACTIVE_READ_BIAS)) {
- return 1;
- }
- }
- return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
- &sem->count);
- if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
- rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
- &sem->count);
- if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
- if (IS_ERR(rwsem_down_write_failed_killable(sem)))
- return -EINTR;
- return 0;
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
- RWSEM_ACTIVE_WRITE_BIAS);
- return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
- long tmp;
-
- tmp = atomic_long_dec_return_release(&sem->count);
- if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
- rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
- if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
- &sem->count) < 0))
- rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
- long tmp;
-
- /*
- * When downgrading from exclusive to shared ownership,
- * anything inside the write-locked region cannot leak
- * into the read side. In contrast, anything in the
- * read-locked region is ok to be re-ordered into the
- * write side. As such, rely on RELEASE semantics.
- */
- tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
- if (tmp < 0)
- rwsem_downgrade_wake(sem);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_GENERIC_RWSEM_H */
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index d79abca81a52..d1779d442aa5 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -77,6 +77,20 @@ static inline int arch_is_kernel_data(unsigned long addr)
}
#endif
+/*
+ * Check if an address is part of freed initmem. This is needed on architectures
+ * with virt == phys kernel mapping, for code that wants to check if an address
+ * is part of a static object within [_stext, _end]. After initmem is freed,
+ * memory can be allocated from it, and such allocations would then have
+ * addresses within the range [_stext, _end].
+ */
+#ifndef arch_is_kernel_initmem_freed
+static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+{
+ return 0;
+}
+#endif
+
/**
* memory_contains - checks if an object is contained within a memory region
* @begin: virtual address of the beginning of the memory region
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 0c938a4354f6..e06b468a0ebe 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -105,53 +105,43 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
* syscall_get_arguments - extract system call parameter values
* @task: task of interest, must be blocked
* @regs: task_pt_regs() of @task
- * @i: argument index [0,5]
- * @n: number of arguments; n+i must be [1,6].
* @args: array filled with argument values
*
- * Fetches @n arguments to the system call starting with the @i'th argument
- * (from 0 through 5). Argument @i is stored in @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Fetches 6 arguments to the system call. First argument is stored in
+* @args[0], and so on.
*
* It's only valid to call this when @task is stopped for tracing on
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
*/
void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n, unsigned long *args);
+ unsigned long *args);
/**
* syscall_set_arguments - change system call parameter value
* @task: task of interest, must be in system call entry tracing
* @regs: task_pt_regs() of @task
- * @i: argument index [0,5]
- * @n: number of arguments; n+i must be [1,6].
* @args: array of argument values to store
*
- * Changes @n arguments to the system call starting with the @i'th argument.
- * Argument @i gets value @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Changes 6 arguments to the system call.
+ * The first argument gets value @args[0], and so on.
*
* It's only valid to call this when @task is stopped for tracing on
* entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
*/
void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
- unsigned int i, unsigned int n,
const unsigned long *args);
/**
* syscall_get_arch - return the AUDIT_ARCH for the current system call
+ * @task: task of interest, must be blocked
*
* Returns the AUDIT_ARCH_* based on the system call convention in use.
*
- * It's only valid to call this when current is stopped on entry to a system
+ * It's only valid to call this when @task is stopped on entry to a system
* call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
*
* Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
* provide an implementation of this.
*/
-int syscall_get_arch(void);
+int syscall_get_arch(struct task_struct *task);
#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 6be86c1c5c58..480e5b2a5748 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -19,9 +19,140 @@
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Blindly accessing user memory from NMI context can be dangerous
+ * if we're in the middle of switching the current user task or switching
+ * the loaded mm.
+ */
+#ifndef nmi_uaccess_okay
+# define nmi_uaccess_okay() true
+#endif
#ifdef CONFIG_MMU
+/*
+ * Generic MMU-gather implementation.
+ *
+ * The mmu_gather data structure is used by the mm code to implement the
+ * correct and efficient ordering of freeing pages and TLB invalidations.
+ *
+ * This correct ordering is:
+ *
+ * 1) unhook page
+ * 2) TLB invalidate page
+ * 3) free page
+ *
+ * That is, we must never free a page before we have ensured there are no live
+ * translations left to it. Otherwise it might be possible to observe (or
+ * worse, change) the page content after it has been reused.
+ *
+ * The mmu_gather API consists of:
+ *
+ * - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
+ *
+ * Finish in particular will issue a (final) TLB invalidate and free
+ * all (remaining) queued pages.
+ *
+ * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
+ *
+ * Defaults to flushing at tlb_end_vma() to reset the range; helps when
+ * there's large holes between the VMAs.
+ *
+ * - tlb_remove_page() / __tlb_remove_page()
+ * - tlb_remove_page_size() / __tlb_remove_page_size()
+ *
+ * __tlb_remove_page_size() is the basic primitive that queues a page for
+ * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
+ * boolean indicating if the queue is (now) full and a call to
+ * tlb_flush_mmu() is required.
+ *
+ * tlb_remove_page() and tlb_remove_page_size() imply the call to
+ * tlb_flush_mmu() when required and has no return value.
+ *
+ * - tlb_change_page_size()
+ *
+ * call before __tlb_remove_page*() to set the current page-size; implies a
+ * possible tlb_flush_mmu() call.
+ *
+ * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
+ *
+ * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
+ * related state, like the range)
+ *
+ * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
+ * whatever pages are still batched.
+ *
+ * - mmu_gather::fullmm
+ *
+ * A flag set by tlb_gather_mmu() to indicate we're going to free
+ * the entire mm; this allows a number of optimizations.
+ *
+ * - We can ignore tlb_{start,end}_vma(); because we don't
+ * care about ranges. Everything will be shot down.
+ *
+ * - (RISC) architectures that use ASIDs can cycle to a new ASID
+ * and delay the invalidation until ASID space runs out.
+ *
+ * - mmu_gather::need_flush_all
+ *
+ * A flag that can be set by the arch code if it wants to force
+ * flush the entire TLB irrespective of the range. For instance
+ * x86-PAE needs this when changing top-level entries.
+ *
+ * And allows the architecture to provide and implement tlb_flush():
+ *
+ * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
+ * use of:
+ *
+ * - mmu_gather::start / mmu_gather::end
+ *
+ * which provides the range that needs to be flushed to cover the pages to
+ * be freed.
+ *
+ * - mmu_gather::freed_tables
+ *
+ * set when we freed page table pages
+ *
+ * - tlb_get_unmap_shift() / tlb_get_unmap_size()
+ *
+ * returns the smallest TLB entry size unmapped in this range.
+ *
+ * If an architecture does not provide tlb_flush() a default implementation
+ * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
+ * specified, in which case we'll default to flush_tlb_mm().
+ *
+ * Additionally there are a few opt-in features:
+ *
+ * HAVE_MMU_GATHER_PAGE_SIZE
+ *
+ * This ensures we call tlb_flush() every time tlb_change_page_size() actually
+ * changes the size and provides mmu_gather::page_size to tlb_flush().
+ *
+ * HAVE_RCU_TABLE_FREE
+ *
+ * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
+ * for page directores (__p*_free_tlb()). This provides separate freeing of
+ * the page-table pages themselves in a semi-RCU fashion (see comment below).
+ * Useful if your architecture doesn't use IPIs for remote TLB invalidates
+ * and therefore doesn't naturally serialize with software page-table walkers.
+ *
+ * When used, an architecture is expected to provide __tlb_remove_table()
+ * which does the actual freeing of these pages.
+ *
+ * HAVE_RCU_TABLE_NO_INVALIDATE
+ *
+ * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
+ * freeing the page-table pages. This can be avoided if you use
+ * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
+ * page-tables natively.
+ *
+ * MMU_GATHER_NO_RANGE
+ *
+ * Use this if your architecture lacks an efficient flush_tlb_range().
+ */
+
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
/*
* Semi RCU freeing of the page directories.
@@ -60,11 +191,11 @@ struct mmu_table_batch {
#define MAX_TABLE_BATCH \
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
-extern void tlb_table_flush(struct mmu_gather *tlb);
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#endif
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
@@ -89,14 +220,21 @@ struct mmu_gather_batch {
*/
#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
-/* struct mmu_gather is an opaque type used by the mm code for passing around
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+ int page_size);
+#endif
+
+/*
+ * struct mmu_gather is an opaque type used by the mm code for passing around
* any data needed by arch specific code for tlb_remove_page.
*/
struct mmu_gather {
struct mm_struct *mm;
+
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
struct mmu_table_batch *batch;
#endif
+
unsigned long start;
unsigned long end;
/*
@@ -124,23 +262,30 @@ struct mmu_gather {
unsigned int cleared_puds : 1;
unsigned int cleared_p4ds : 1;
+ /*
+ * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
+ */
+ unsigned int vma_exec : 1;
+ unsigned int vma_huge : 1;
+
+ unsigned int batch_count;
+
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
- unsigned int batch_count;
- int page_size;
-};
-#define HAVE_GENERIC_MMU_GATHER
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+ unsigned int page_size;
+#endif
+#endif
+};
void arch_tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb);
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end, bool force);
-void tlb_flush_mmu_free(struct mmu_gather *tlb);
-extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
- int page_size);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address,
@@ -163,8 +308,94 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
tlb->cleared_pmds = 0;
tlb->cleared_puds = 0;
tlb->cleared_p4ds = 0;
+ /*
+ * Do not reset mmu_gather::vma_* fields here, we do not
+ * call into tlb_start_vma() again to set them if there is an
+ * intermediate flush.
+ */
+}
+
+#ifdef CONFIG_MMU_GATHER_NO_RANGE
+
+#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
+#endif
+
+/*
+ * When an architecture does not have efficient means of range flushing TLBs
+ * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
+ * range small. We equally don't have to worry about page granularity or other
+ * things.
+ *
+ * All we need to do is issue a full flush for any !0 range.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ if (tlb->end)
+ flush_tlb_mm(tlb->mm);
+}
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#define tlb_end_vma tlb_end_vma
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#else /* CONFIG_MMU_GATHER_NO_RANGE */
+
+#ifndef tlb_flush
+
+#if defined(tlb_start_vma) || defined(tlb_end_vma)
+#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
+#endif
+
+/*
+ * When an architecture does not provide its own tlb_flush() implementation
+ * but does have a reasonably efficient flush_vma_range() implementation
+ * use that.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ if (tlb->fullmm || tlb->need_flush_all) {
+ flush_tlb_mm(tlb->mm);
+ } else if (tlb->end) {
+ struct vm_area_struct vma = {
+ .vm_mm = tlb->mm,
+ .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
+ (tlb->vma_huge ? VM_HUGETLB : 0),
+ };
+
+ flush_tlb_range(&vma, tlb->start, tlb->end);
+ }
+}
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ /*
+ * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
+ * mips-4k) flush only large pages.
+ *
+ * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
+ * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
+ * range.
+ *
+ * We rely on tlb_end_vma() to issue a flush, such that when we reset
+ * these values the batch is empty.
+ */
+ tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
+ tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
}
+#else
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
+
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
if (!tlb->end)
@@ -196,21 +427,18 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
-#ifndef tlb_remove_check_page_size_change
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
- /*
- * We don't care about page size change, just update
- * mmu_gather page size here so that debug checks
- * doesn't throw false warning.
- */
-#ifdef CONFIG_DEBUG_VM
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+ if (tlb->page_size && tlb->page_size != page_size) {
+ if (!tlb->fullmm)
+ tlb_flush_mmu(tlb);
+ }
+
tlb->page_size = page_size;
#endif
}
-#endif
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
{
@@ -237,17 +465,30 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
* the vmas are adjusted to only cover the region to be torn down.
*/
#ifndef tlb_start_vma
-#define tlb_start_vma(tlb, vma) do { } while (0)
-#endif
+static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (tlb->fullmm)
+ return;
-#define __tlb_end_vma(tlb, vma) \
- do { \
- if (!tlb->fullmm) \
- tlb_flush_mmu_tlbonly(tlb); \
- } while (0)
+ tlb_update_vma_flags(tlb, vma);
+ flush_cache_range(vma, vma->vm_start, vma->vm_end);
+}
+#endif
#ifndef tlb_end_vma
-#define tlb_end_vma __tlb_end_vma
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+ if (tlb->fullmm)
+ return;
+
+ /*
+ * Do a TLB flush and reset the range at VMA boundaries; this avoids
+ * the ranges growing with the unused space between consecutive VMAs,
+ * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
+ * this.
+ */
+ tlb_flush_mmu_tlbonly(tlb);
+}
#endif
#ifndef __tlb_remove_tlb_entry
@@ -372,6 +613,4 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
#endif /* CONFIG_MMU */
-#define tlb_migrate_finish(mm) do {} while (0)
-
#endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index f8f6f04c4453..bbb9e332f2fe 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -844,6 +844,7 @@
EXIT_CALL \
*(.discard) \
*(.discard.*) \
+ *(.modinfo) \
}
/**
diff --git a/include/crypto/aes.h b/include/crypto/aes.h
index 852eaa9cd4db..0fdb542c70cd 100644
--- a/include/crypto/aes.h
+++ b/include/crypto/aes.h
@@ -28,10 +28,10 @@ struct crypto_aes_ctx {
u32 key_length;
};
-extern const u32 crypto_ft_tab[4][256];
-extern const u32 crypto_fl_tab[4][256];
-extern const u32 crypto_it_tab[4][256];
-extern const u32 crypto_il_tab[4][256];
+extern const u32 crypto_ft_tab[4][256] ____cacheline_aligned;
+extern const u32 crypto_fl_tab[4][256] ____cacheline_aligned;
+extern const u32 crypto_it_tab[4][256] ____cacheline_aligned;
+extern const u32 crypto_il_tab[4][256] ____cacheline_aligned;
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len);
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index 2d690494568c..8884046659a0 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -19,14 +19,20 @@
*
* @base: Common attributes for async crypto requests
* @src: Source data
- * @dst: Destination data
+ * For verify op this is signature + digest, in that case
+ * total size of @src is @src_len + @dst_len.
+ * @dst: Destination data (Should be NULL for verify op)
* @src_len: Size of the input buffer
- * @dst_len: Size of the output buffer. It needs to be at least
- * as big as the expected result depending on the operation
+ * For verify op it's size of signature part of @src, this part
+ * is supposed to be operated by cipher.
+ * @dst_len: Size of @dst buffer (for all ops except verify).
+ * It needs to be at least as big as the expected result
+ * depending on the operation.
* After operation it will be updated with the actual size of the
* result.
* In case of error where the dst sgl size was insufficient,
* it will be updated to the size required for the operation.
+ * For verify op this is size of digest part in @src.
* @__ctx: Start of private context data
*/
struct akcipher_request {
@@ -55,10 +61,9 @@ struct crypto_akcipher {
* algorithm. In case of error, where the dst_len was insufficient,
* the req->dst_len will be updated to the size required for the
* operation
- * @verify: Function performs a sign operation as defined by public key
- * algorithm. In case of error, where the dst_len was insufficient,
- * the req->dst_len will be updated to the size required for the
- * operation
+ * @verify: Function performs a complete verify operation as defined by
+ * public key algorithm, returning verification status. Requires
+ * digest value as input parameter.
* @encrypt: Function performs an encrypt operation as defined by public key
* algorithm. In case of error, where the dst_len was insufficient,
* the req->dst_len will be updated to the size required for the
@@ -69,10 +74,10 @@ struct crypto_akcipher {
* operation
* @set_pub_key: Function invokes the algorithm specific set public key
* function, which knows how to decode and interpret
- * the BER encoded public key
+ * the BER encoded public key and parameters
* @set_priv_key: Function invokes the algorithm specific set private key
* function, which knows how to decode and interpret
- * the BER encoded private key
+ * the BER encoded private key and parameters
* @max_size: Function returns dest buffer size required for a given key.
* @init: Initialize the cryptographic transformation object.
* This function is used to initialize the cryptographic
@@ -238,9 +243,10 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req,
*
* @req: public key request
* @src: ptr to input scatter list
- * @dst: ptr to output scatter list
+ * @dst: ptr to output scatter list or NULL for verify op
* @src_len: size of the src input scatter list to be processed
- * @dst_len: size of the dst output scatter list
+ * @dst_len: size of the dst output scatter list or size of signature
+ * portion in @src for verify op
*/
static inline void akcipher_request_set_crypt(struct akcipher_request *req,
struct scatterlist *src,
@@ -343,14 +349,18 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
}
/**
- * crypto_akcipher_verify() - Invoke public key verify operation
+ * crypto_akcipher_verify() - Invoke public key signature verification
*
- * Function invokes the specific public key verify operation for a given
- * public key algorithm
+ * Function invokes the specific public key signature verification operation
+ * for a given public key algorithm.
*
* @req: asymmetric key request
*
- * Return: zero on success; error code in case of error
+ * Note: req->dst should be NULL, req->src should point to SG of size
+ * (req->src_size + req->dst_size), containing signature (of req->src_size
+ * length) with appended digest (of req->dst_size length).
+ *
+ * Return: zero on verification success; error code in case of error.
*/
static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
@@ -369,11 +379,12 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
* crypto_akcipher_set_pub_key() - Invoke set public key operation
*
* Function invokes the algorithm specific set key function, which knows
- * how to decode and interpret the encoded key
+ * how to decode and interpret the encoded key and parameters
*
* @tfm: tfm handle
- * @key: BER encoded public key
- * @keylen: length of the key
+ * @key: BER encoded public key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
*
* Return: zero on success; error code in case of error
*/
@@ -390,11 +401,12 @@ static inline int crypto_akcipher_set_pub_key(struct crypto_akcipher *tfm,
* crypto_akcipher_set_priv_key() - Invoke set private key operation
*
* Function invokes the algorithm specific set key function, which knows
- * how to decode and interpret the encoded key
+ * how to decode and interpret the encoded key and parameters
*
* @tfm: tfm handle
- * @key: BER encoded private key
- * @keylen: length of the key
+ * @key: BER encoded private key, algo OID, paramlen, BER encoded
+ * parameters
+ * @keylen: length of the key (not including other data)
*
* Return: zero on success; error code in case of error
*/
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h
index 1e64f354c2b8..23169f4d87e6 100644
--- a/include/crypto/cryptd.h
+++ b/include/crypto/cryptd.h
@@ -18,27 +18,11 @@
#include <crypto/hash.h>
#include <crypto/skcipher.h>
-struct cryptd_ablkcipher {
- struct crypto_ablkcipher base;
-};
-
-static inline struct cryptd_ablkcipher *__cryptd_ablkcipher_cast(
- struct crypto_ablkcipher *tfm)
-{
- return (struct cryptd_ablkcipher *)tfm;
-}
-
-/* alg_name should be algorithm to be cryptd-ed */
-struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm);
-bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm);
-void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm);
-
struct cryptd_skcipher {
struct crypto_skcipher base;
};
+/* alg_name should be algorithm to be cryptd-ed */
struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
u32 type, u32 mask);
struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm);
diff --git a/include/crypto/des.h b/include/crypto/des.h
index d4094d58ac54..72c7c8e5a5a7 100644
--- a/include/crypto/des.h
+++ b/include/crypto/des.h
@@ -6,6 +6,11 @@
#ifndef __CRYPTO_DES_H
#define __CRYPTO_DES_H
+#include <crypto/skcipher.h>
+#include <linux/compiler.h>
+#include <linux/fips.h>
+#include <linux/string.h>
+
#define DES_KEY_SIZE 8
#define DES_EXPKEY_WORDS 32
#define DES_BLOCK_SIZE 8
@@ -14,6 +19,44 @@
#define DES3_EDE_EXPKEY_WORDS (3 * DES_EXPKEY_WORDS)
#define DES3_EDE_BLOCK_SIZE DES_BLOCK_SIZE
+static inline int __des3_verify_key(u32 *flags, const u8 *key)
+{
+ int err = -EINVAL;
+ u32 K[6];
+
+ memcpy(K, key, DES3_EDE_KEY_SIZE);
+
+ if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
+ !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
+ (fips_enabled ||
+ (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)))
+ goto bad;
+
+ if (unlikely(!((K[0] ^ K[4]) | (K[1] ^ K[5]))) && fips_enabled)
+ goto bad;
+
+ err = 0;
+
+out:
+ memzero_explicit(K, DES3_EDE_KEY_SIZE);
+
+ return err;
+
+bad:
+ *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ goto out;
+}
+
+static inline int des3_verify_key(struct crypto_skcipher *tfm, const u8 *key)
+{
+ u32 flags;
+ int err;
+
+ flags = crypto_skcipher_get_flags(tfm);
+ err = __des3_verify_key(&flags, key);
+ crypto_skcipher_set_flags(tfm, flags);
+ return err;
+}
extern unsigned long des_ekey(u32 *pe, const u8 *k);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 3b31c1b349ae..d21bea2c4382 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -146,8 +146,6 @@ struct ahash_alg {
struct shash_desc {
struct crypto_shash *tfm;
- u32 flags;
-
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -819,6 +817,7 @@ static inline void *shash_desc_ctx(struct shash_desc *desc)
* cipher handle must point to a keyed message digest cipher in order for this
* function to succeed.
*
+ * Context: Any context.
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
@@ -835,6 +834,7 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate three functions.
*
+ * Context: Any context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -850,6 +850,7 @@ int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
* caller-allocated output buffer out which must have sufficient size (e.g. by
* calling crypto_shash_descsize).
*
+ * Context: Any context.
* Return: 0 if the export creation was successful; < 0 if an error occurred
*/
static inline int crypto_shash_export(struct shash_desc *desc, void *out)
@@ -866,6 +867,7 @@ static inline int crypto_shash_export(struct shash_desc *desc, void *out)
* the input buffer. That buffer should have been generated with the
* crypto_ahash_export function.
*
+ * Context: Any context.
* Return: 0 if the import was successful; < 0 if an error occurred
*/
static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
@@ -886,6 +888,7 @@ static inline int crypto_shash_import(struct shash_desc *desc, const void *in)
* operational state handle. Any potentially existing state created by
* previous operations is discarded.
*
+ * Context: Any context.
* Return: 0 if the message digest initialization was successful; < 0 if an
* error occurred
*/
@@ -907,6 +910,7 @@ static inline int crypto_shash_init(struct shash_desc *desc)
*
* Updates the message digest state of the operational state handle.
*
+ * Context: Any context.
* Return: 0 if the message digest update was successful; < 0 if an error
* occurred
*/
@@ -923,6 +927,7 @@ int crypto_shash_update(struct shash_desc *desc, const u8 *data,
* into the output buffer. The caller must ensure that the output buffer is
* large enough by using crypto_shash_digestsize.
*
+ * Context: Any context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
@@ -939,6 +944,7 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out);
* crypto_shash_update and crypto_shash_final. The parameters have the same
* meaning as discussed for those separate functions.
*
+ * Context: Any context.
* Return: 0 if the message digest creation was successful; < 0 if an error
* occurred
*/
diff --git a/include/crypto/internal/simd.h b/include/crypto/internal/simd.h
index f18344518e32..d2316242a988 100644
--- a/include/crypto/internal/simd.h
+++ b/include/crypto/internal/simd.h
@@ -6,6 +6,11 @@
#ifndef _CRYPTO_INTERNAL_SIMD_H
#define _CRYPTO_INTERNAL_SIMD_H
+#include <linux/percpu.h>
+#include <linux/types.h>
+
+/* skcipher support */
+
struct simd_skcipher_alg;
struct skcipher_alg;
@@ -22,4 +27,43 @@ int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
struct simd_skcipher_alg **simd_algs);
+/* AEAD support */
+
+struct simd_aead_alg;
+struct aead_alg;
+
+struct simd_aead_alg *simd_aead_create_compat(const char *algname,
+ const char *drvname,
+ const char *basename);
+struct simd_aead_alg *simd_aead_create(const char *algname,
+ const char *basename);
+void simd_aead_free(struct simd_aead_alg *alg);
+
+int simd_register_aeads_compat(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs);
+
+void simd_unregister_aeads(struct aead_alg *algs, int count,
+ struct simd_aead_alg **simd_algs);
+
+/*
+ * crypto_simd_usable() - is it allowed at this time to use SIMD instructions or
+ * access the SIMD register file?
+ *
+ * This delegates to may_use_simd(), except that this also returns false if SIMD
+ * in crypto code has been temporarily disabled on this CPU by the crypto
+ * self-tests, in order to test the no-SIMD fallback code. This override is
+ * currently limited to configurations where the extra self-tests are enabled,
+ * because it might be a bit too invasive to be part of the regular self-tests.
+ *
+ * This is a macro so that <asm/simd.h>, which some architectures don't have,
+ * doesn't have to be included directly here.
+ */
+#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
+DECLARE_PER_CPU(bool, crypto_simd_disabled_for_test);
+#define crypto_simd_usable() \
+ (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
+#else
+#define crypto_simd_usable() may_use_simd()
+#endif
+
#endif /* _CRYPTO_INTERNAL_SIMD_H */
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
index ad2aa743dd99..5cefddb1991f 100644
--- a/include/crypto/morus1280_glue.h
+++ b/include/crypto/morus1280_glue.h
@@ -47,16 +47,7 @@ int crypto_morus1280_glue_setauthsize(struct crypto_aead *tfm,
int crypto_morus1280_glue_encrypt(struct aead_request *req);
int crypto_morus1280_glue_decrypt(struct aead_request *req);
-int cryptd_morus1280_glue_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen);
-int cryptd_morus1280_glue_setauthsize(struct crypto_aead *aead,
- unsigned int authsize);
-int cryptd_morus1280_glue_encrypt(struct aead_request *req);
-int cryptd_morus1280_glue_decrypt(struct aead_request *req);
-int cryptd_morus1280_glue_init_tfm(struct crypto_aead *aead);
-void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
-
-#define MORUS1280_DECLARE_ALGS(id, driver_name, priority) \
+#define MORUS1280_DECLARE_ALG(id, driver_name, priority) \
static const struct morus1280_glue_ops crypto_morus1280_##id##_ops = {\
.init = crypto_morus1280_##id##_init, \
.ad = crypto_morus1280_##id##_ad, \
@@ -77,55 +68,29 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- static struct aead_alg crypto_morus1280_##id##_algs[] = {\
- { \
- .setkey = crypto_morus1280_glue_setkey, \
- .setauthsize = crypto_morus1280_glue_setauthsize, \
- .encrypt = crypto_morus1280_glue_encrypt, \
- .decrypt = crypto_morus1280_glue_decrypt, \
- .init = crypto_morus1280_##id##_init_tfm, \
- .exit = crypto_morus1280_##id##_exit_tfm, \
- \
- .ivsize = MORUS_NONCE_SIZE, \
- .maxauthsize = MORUS_MAX_AUTH_SIZE, \
- .chunksize = MORUS1280_BLOCK_SIZE, \
- \
- .base = { \
- .cra_flags = CRYPTO_ALG_INTERNAL, \
- .cra_blocksize = 1, \
- .cra_ctxsize = sizeof(struct morus1280_ctx), \
- .cra_alignmask = 0, \
- \
- .cra_name = "__morus1280", \
- .cra_driver_name = "__"driver_name, \
- \
- .cra_module = THIS_MODULE, \
- } \
- }, { \
- .setkey = cryptd_morus1280_glue_setkey, \
- .setauthsize = cryptd_morus1280_glue_setauthsize, \
- .encrypt = cryptd_morus1280_glue_encrypt, \
- .decrypt = cryptd_morus1280_glue_decrypt, \
- .init = cryptd_morus1280_glue_init_tfm, \
- .exit = cryptd_morus1280_glue_exit_tfm, \
+ static struct aead_alg crypto_morus1280_##id##_alg = { \
+ .setkey = crypto_morus1280_glue_setkey, \
+ .setauthsize = crypto_morus1280_glue_setauthsize, \
+ .encrypt = crypto_morus1280_glue_encrypt, \
+ .decrypt = crypto_morus1280_glue_decrypt, \
+ .init = crypto_morus1280_##id##_init_tfm, \
+ .exit = crypto_morus1280_##id##_exit_tfm, \
+ \
+ .ivsize = MORUS_NONCE_SIZE, \
+ .maxauthsize = MORUS_MAX_AUTH_SIZE, \
+ .chunksize = MORUS1280_BLOCK_SIZE, \
+ \
+ .base = { \
+ .cra_flags = CRYPTO_ALG_INTERNAL, \
+ .cra_blocksize = 1, \
+ .cra_ctxsize = sizeof(struct morus1280_ctx), \
+ .cra_alignmask = 0, \
+ .cra_priority = priority, \
\
- .ivsize = MORUS_NONCE_SIZE, \
- .maxauthsize = MORUS_MAX_AUTH_SIZE, \
- .chunksize = MORUS1280_BLOCK_SIZE, \
+ .cra_name = "__morus1280", \
+ .cra_driver_name = "__"driver_name, \
\
- .base = { \
- .cra_flags = CRYPTO_ALG_ASYNC, \
- .cra_blocksize = 1, \
- .cra_ctxsize = sizeof(struct crypto_aead *), \
- .cra_alignmask = 0, \
- \
- .cra_priority = priority, \
- \
- .cra_name = "morus1280", \
- .cra_driver_name = driver_name, \
- \
- .cra_module = THIS_MODULE, \
- } \
+ .cra_module = THIS_MODULE, \
} \
}
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
index df8e1103ff94..0ee6266cb26c 100644
--- a/include/crypto/morus640_glue.h
+++ b/include/crypto/morus640_glue.h
@@ -47,16 +47,7 @@ int crypto_morus640_glue_setauthsize(struct crypto_aead *tfm,
int crypto_morus640_glue_encrypt(struct aead_request *req);
int crypto_morus640_glue_decrypt(struct aead_request *req);
-int cryptd_morus640_glue_setkey(struct crypto_aead *aead, const u8 *key,
- unsigned int keylen);
-int cryptd_morus640_glue_setauthsize(struct crypto_aead *aead,
- unsigned int authsize);
-int cryptd_morus640_glue_encrypt(struct aead_request *req);
-int cryptd_morus640_glue_decrypt(struct aead_request *req);
-int cryptd_morus640_glue_init_tfm(struct crypto_aead *aead);
-void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
-
-#define MORUS640_DECLARE_ALGS(id, driver_name, priority) \
+#define MORUS640_DECLARE_ALG(id, driver_name, priority) \
static const struct morus640_glue_ops crypto_morus640_##id##_ops = {\
.init = crypto_morus640_##id##_init, \
.ad = crypto_morus640_##id##_ad, \
@@ -77,55 +68,29 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- static struct aead_alg crypto_morus640_##id##_algs[] = {\
- { \
- .setkey = crypto_morus640_glue_setkey, \
- .setauthsize = crypto_morus640_glue_setauthsize, \
- .encrypt = crypto_morus640_glue_encrypt, \
- .decrypt = crypto_morus640_glue_decrypt, \
- .init = crypto_morus640_##id##_init_tfm, \
- .exit = crypto_morus640_##id##_exit_tfm, \
- \
- .ivsize = MORUS_NONCE_SIZE, \
- .maxauthsize = MORUS_MAX_AUTH_SIZE, \
- .chunksize = MORUS640_BLOCK_SIZE, \
- \
- .base = { \
- .cra_flags = CRYPTO_ALG_INTERNAL, \
- .cra_blocksize = 1, \
- .cra_ctxsize = sizeof(struct morus640_ctx), \
- .cra_alignmask = 0, \
- \
- .cra_name = "__morus640", \
- .cra_driver_name = "__"driver_name, \
- \
- .cra_module = THIS_MODULE, \
- } \
- }, { \
- .setkey = cryptd_morus640_glue_setkey, \
- .setauthsize = cryptd_morus640_glue_setauthsize, \
- .encrypt = cryptd_morus640_glue_encrypt, \
- .decrypt = cryptd_morus640_glue_decrypt, \
- .init = cryptd_morus640_glue_init_tfm, \
- .exit = cryptd_morus640_glue_exit_tfm, \
+ static struct aead_alg crypto_morus640_##id##_alg = {\
+ .setkey = crypto_morus640_glue_setkey, \
+ .setauthsize = crypto_morus640_glue_setauthsize, \
+ .encrypt = crypto_morus640_glue_encrypt, \
+ .decrypt = crypto_morus640_glue_decrypt, \
+ .init = crypto_morus640_##id##_init_tfm, \
+ .exit = crypto_morus640_##id##_exit_tfm, \
+ \
+ .ivsize = MORUS_NONCE_SIZE, \
+ .maxauthsize = MORUS_MAX_AUTH_SIZE, \
+ .chunksize = MORUS640_BLOCK_SIZE, \
+ \
+ .base = { \
+ .cra_flags = CRYPTO_ALG_INTERNAL, \
+ .cra_blocksize = 1, \
+ .cra_ctxsize = sizeof(struct morus640_ctx), \
+ .cra_alignmask = 0, \
+ .cra_priority = priority, \
\
- .ivsize = MORUS_NONCE_SIZE, \
- .maxauthsize = MORUS_MAX_AUTH_SIZE, \
- .chunksize = MORUS640_BLOCK_SIZE, \
+ .cra_name = "__morus640", \
+ .cra_driver_name = "__"driver_name, \
\
- .base = { \
- .cra_flags = CRYPTO_ALG_ASYNC, \
- .cra_blocksize = 1, \
- .cra_ctxsize = sizeof(struct crypto_aead *), \
- .cra_alignmask = 0, \
- \
- .cra_priority = priority, \
- \
- .cra_name = "morus640", \
- .cra_driver_name = driver_name, \
- \
- .cra_module = THIS_MODULE, \
- } \
+ .cra_module = THIS_MODULE, \
} \
}
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index be626eac9113..712fe1214b5f 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -15,6 +15,7 @@
#define _LINUX_PUBLIC_KEY_H
#include <linux/keyctl.h>
+#include <linux/oid_registry.h>
/*
* Cryptographic data for the public-key subtype of the asymmetric key type.
@@ -25,6 +26,9 @@
struct public_key {
void *key;
u32 keylen;
+ enum OID algo;
+ void *params;
+ u32 paramlen;
bool key_is_private;
const char *id_type;
const char *pkey_algo;
diff --git a/include/crypto/streebog.h b/include/crypto/streebog.h
index 856e32af8657..cae1b4a01971 100644
--- a/include/crypto/streebog.h
+++ b/include/crypto/streebog.h
@@ -23,7 +23,10 @@ struct streebog_uint512 {
};
struct streebog_state {
- u8 buffer[STREEBOG_BLOCK_SIZE];
+ union {
+ u8 buffer[STREEBOG_BLOCK_SIZE];
+ struct streebog_uint512 m;
+ };
struct streebog_uint512 hash;
struct streebog_uint512 h;
struct streebog_uint512 N;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 824a5ed4e216..e937ff2beb04 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -452,6 +452,12 @@ void drm_atomic_private_obj_fini(struct drm_private_obj *obj);
struct drm_private_state * __must_check
drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj);
+struct drm_private_state *
+drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj);
+struct drm_private_state *
+drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj);
/**
* drm_atomic_get_existing_crtc_state - get crtc state, if it exists
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
index 93a386be38fa..a45f93487039 100644
--- a/include/drm/drm_audio_component.h
+++ b/include/drm/drm_audio_component.h
@@ -19,14 +19,17 @@ struct drm_audio_component_ops {
* @get_power: get the POWER_DOMAIN_AUDIO power well
*
* Request the power well to be turned on.
+ *
+ * Returns a wakeref cookie to be passed back to the corresponding
+ * call to @put_power.
*/
- void (*get_power)(struct device *);
+ unsigned long (*get_power)(struct device *);
/**
* @put_power: put the POWER_DOMAIN_AUDIO power well
*
* Allow the power well to be turned off.
*/
- void (*put_power)(struct device *);
+ void (*put_power)(struct device *, unsigned long);
/**
* @codec_wake_override: Enable/disable codec wake signal
*/
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index 86bff9841b54..871008118bab 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -50,7 +50,6 @@ struct drm_lock_data {
*
* @refcount: Refcount for this master object.
* @dev: Link back to the DRM device
- * @lock: DRI1 lock information.
* @driver_priv: Pointer to driver-private information.
* @lessor: Lease holder
* @lessee_id: id for lessees. Owners always have id 0
@@ -80,7 +79,6 @@ struct drm_master {
* &drm_device.master_mutex.
*/
struct idr magic_map;
- struct drm_lock_data lock;
void *driver_priv;
/* Tree of display resource leases, each of which is a drm_master struct
@@ -95,6 +93,10 @@ struct drm_master {
struct list_head lessees;
struct idr leases;
struct idr lessee_idr;
+ /* private: */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+ struct drm_lock_data lock;
+#endif
};
struct drm_master *drm_master_get(struct drm_master *master);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 9da8c93f7976..d4428913a4e1 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -244,14 +244,13 @@ struct drm_bridge_funcs {
*/
struct drm_bridge_timings {
/**
- * @sampling_edge:
+ * @input_bus_flags:
*
- * Tells whether the bridge samples the digital input signal
- * from the display engine on the positive or negative edge of the
- * clock, this should reuse the DRM_BUS_FLAG_PIXDATA_[POS|NEG]EDGE
- * bitwise flags from the DRM connector (bit 2 and 3 valid).
+ * Tells what additional settings for the pixel data on the bus
+ * this bridge requires (like pixel signal polarity). See also
+ * &drm_display_info->bus_flags.
*/
- u32 sampling_edge;
+ u32 input_bus_flags;
/**
* @setup_time_ps:
*
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 97fc498dc767..987ff16b9420 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -38,7 +38,7 @@
void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
-u64 drm_get_max_iomem(void);
+bool drm_need_swiotlb(int dma_bits);
static inline bool drm_arch_can_wc_memory(void)
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 8b552b1a6ce9..268b2cf0052a 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -90,7 +90,7 @@ struct drm_client_dev {
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
const char *name, const struct drm_client_funcs *funcs);
void drm_client_release(struct drm_client_dev *client);
-void drm_client_add(struct drm_client_dev *client);
+void drm_client_register(struct drm_client_dev *client);
void drm_client_dev_unregister(struct drm_device *dev);
void drm_client_dev_hotplug(struct drm_device *dev);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 8fe22abb1e10..02a131202add 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -253,6 +253,96 @@ enum drm_panel_orientation {
DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+/*
+ * This is a consolidated colorimetry list supported by HDMI and
+ * DP protocol standard. The respective connectors will register
+ * a property with the subset of this list (supported by that
+ * respective protocol). Userspace will set the colorspace through
+ * a colorspace property which will be created and exposed to
+ * userspace.
+ */
+
+/* For Default case, driver will set the colorspace */
+#define DRM_MODE_COLORIMETRY_DEFAULT 0
+/* CEA 861 Normal Colorimetry options */
+#define DRM_MODE_COLORIMETRY_NO_DATA 0
+#define DRM_MODE_COLORIMETRY_SMPTE_170M_YCC 1
+#define DRM_MODE_COLORIMETRY_BT709_YCC 2
+/* CEA 861 Extended Colorimetry Options */
+#define DRM_MODE_COLORIMETRY_XVYCC_601 3
+#define DRM_MODE_COLORIMETRY_XVYCC_709 4
+#define DRM_MODE_COLORIMETRY_SYCC_601 5
+#define DRM_MODE_COLORIMETRY_OPYCC_601 6
+#define DRM_MODE_COLORIMETRY_OPRGB 7
+#define DRM_MODE_COLORIMETRY_BT2020_CYCC 8
+#define DRM_MODE_COLORIMETRY_BT2020_RGB 9
+#define DRM_MODE_COLORIMETRY_BT2020_YCC 10
+/* Additional Colorimetry extension added as part of CTA 861.G */
+#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11
+#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12
+
+/**
+ * enum drm_bus_flags - bus_flags info for &drm_display_info
+ *
+ * This enum defines signal polarities and clock edge information for signals on
+ * a bus as bitmask flags.
+ *
+ * The clock edge information is conveyed by two sets of symbols,
+ * DRM_BUS_FLAGS_*_DRIVE_\* and DRM_BUS_FLAGS_*_SAMPLE_\*. When this enum is
+ * used to describe a bus from the point of view of the transmitter, the
+ * \*_DRIVE_\* flags should be used. When used from the point of view of the
+ * receiver, the \*_SAMPLE_\* flags should be used. The \*_DRIVE_\* and
+ * \*_SAMPLE_\* flags alias each other, with the \*_SAMPLE_POSEDGE and
+ * \*_SAMPLE_NEGEDGE flags being equal to \*_DRIVE_NEGEDGE and \*_DRIVE_POSEDGE
+ * respectively. This simplifies code as signals are usually sampled on the
+ * opposite edge of the driving edge. Transmitters and receivers may however
+ * need to take other signal timings into account to convert between driving
+ * and sample edges.
+ *
+ * @DRM_BUS_FLAG_DE_LOW: The Data Enable signal is active low
+ * @DRM_BUS_FLAG_DE_HIGH: The Data Enable signal is active high
+ * @DRM_BUS_FLAG_PIXDATA_POSEDGE: Legacy value, do not use
+ * @DRM_BUS_FLAG_PIXDATA_NEGEDGE: Legacy value, do not use
+ * @DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE: Data is driven on the rising edge of
+ * the pixel clock
+ * @DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE: Data is driven on the falling edge of
+ * the pixel clock
+ * @DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE: Data is sampled on the rising edge of
+ * the pixel clock
+ * @DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE: Data is sampled on the falling edge of
+ * the pixel clock
+ * @DRM_BUS_FLAG_DATA_MSB_TO_LSB: Data is transmitted MSB to LSB on the bus
+ * @DRM_BUS_FLAG_DATA_LSB_TO_MSB: Data is transmitted LSB to MSB on the bus
+ * @DRM_BUS_FLAG_SYNC_POSEDGE: Legacy value, do not use
+ * @DRM_BUS_FLAG_SYNC_NEGEDGE: Legacy value, do not use
+ * @DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE: Sync signals are driven on the rising
+ * edge of the pixel clock
+ * @DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE: Sync signals are driven on the falling
+ * edge of the pixel clock
+ * @DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE: Sync signals are sampled on the rising
+ * edge of the pixel clock
+ * @DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE: Sync signals are sampled on the falling
+ * edge of the pixel clock
+ */
+enum drm_bus_flags {
+ DRM_BUS_FLAG_DE_LOW = BIT(0),
+ DRM_BUS_FLAG_DE_HIGH = BIT(1),
+ DRM_BUS_FLAG_PIXDATA_POSEDGE = BIT(2),
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE = BIT(3),
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ DRM_BUS_FLAG_DATA_MSB_TO_LSB = BIT(4),
+ DRM_BUS_FLAG_DATA_LSB_TO_MSB = BIT(5),
+ DRM_BUS_FLAG_SYNC_POSEDGE = BIT(6),
+ DRM_BUS_FLAG_SYNC_NEGEDGE = BIT(7),
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE = DRM_BUS_FLAG_SYNC_POSEDGE,
+ DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE,
+ DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE = DRM_BUS_FLAG_SYNC_NEGEDGE,
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE = DRM_BUS_FLAG_SYNC_POSEDGE,
+};
+
/**
* struct drm_display_info - runtime data about the connected sink
*
@@ -266,26 +356,16 @@ enum drm_panel_orientation {
*/
struct drm_display_info {
/**
- * @name: Name of the display.
- */
- char name[DRM_DISPLAY_INFO_LEN];
-
- /**
* @width_mm: Physical width in mm.
*/
- unsigned int width_mm;
+ unsigned int width_mm;
+
/**
* @height_mm: Physical height in mm.
*/
unsigned int height_mm;
/**
- * @pixel_clock: Maximum pixel clock supported by the sink, in units of
- * 100Hz. This mismatches the clock in &drm_display_mode (which is in
- * kHZ), because that's what the EDID uses as base unit.
- */
- unsigned int pixel_clock;
- /**
* @bpc: Maximum bits per color channel. Used by HDMI and DP outputs.
*/
unsigned int bpc;
@@ -328,24 +408,10 @@ struct drm_display_info {
*/
unsigned int num_bus_formats;
-#define DRM_BUS_FLAG_DE_LOW (1<<0)
-#define DRM_BUS_FLAG_DE_HIGH (1<<1)
-/* drive data on pos. edge */
-#define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2)
-/* drive data on neg. edge */
-#define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3)
-/* data is transmitted MSB to LSB on the bus */
-#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4)
-/* data is transmitted LSB to MSB on the bus */
-#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5)
-/* drive sync on pos. edge */
-#define DRM_BUS_FLAG_SYNC_POSEDGE (1<<6)
-/* drive sync on neg. edge */
-#define DRM_BUS_FLAG_SYNC_NEGEDGE (1<<7)
-
/**
* @bus_flags: Additional information (like pixel signal polarity) for
- * the pixel data on the bus, using DRM_BUS_FLAGS\_ defines.
+ * the pixel data on the bus, using &enum drm_bus_flags values
+ * DRM_BUS_FLAGS\_.
*/
u32 bus_flags;
@@ -503,6 +569,13 @@ struct drm_connector_state {
unsigned int content_protection;
/**
+ * @colorspace: State variable for Connector property to request
+ * colorspace change on Sink. This is most commonly used to switch
+ * to wider color gamuts like BT2020.
+ */
+ u32 colorspace;
+
+ /**
* @writeback_job: Writeback job for writeback connectors
*
* Holds the framebuffer and out-fence for a writeback connector. As
@@ -995,6 +1068,12 @@ struct drm_connector {
struct drm_property *content_protection_property;
/**
+ * @colorspace_property: Connector property to set the suitable
+ * colorspace supported by the sink.
+ */
+ struct drm_property *colorspace_property;
+
+ /**
* @path_blob_ptr:
*
* DRM blob property data for the DP MST path property. This should only
@@ -1269,6 +1348,7 @@ int drm_connector_attach_vrr_capable_property(
int drm_connector_attach_content_protection_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_colorspace_property(struct drm_connector *connector);
int drm_mode_create_content_type_property(struct drm_device *dev);
void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 85abd3fe9e83..58ad983d7cd6 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -78,7 +78,7 @@ struct drm_plane_helper_funcs;
/**
* struct drm_crtc_state - mutable CRTC state
*
- * Note that the distinction between @enable and @active is rather subtile:
+ * Note that the distinction between @enable and @active is rather subtle:
* Flipping @active while @enable is set without changing anything else may
* never return in a failure from the &drm_mode_config_funcs.atomic_check
* callback. Userspace assumes that a DPMS On will always succeed. In other
@@ -472,7 +472,7 @@ struct drm_crtc_funcs {
/**
* @destroy:
*
- * Clean up plane resources. This is only called at driver unload time
+ * Clean up CRTC resources. This is only called at driver unload time
* through drm_mode_config_cleanup() since a CRTC cannot be hotplugged
* in DRM.
*/
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index d5e092dccf3e..7f9ef709b2b6 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -306,7 +306,7 @@ struct drm_device {
/* Everything below here is for legacy driver, never use! */
/* private: */
-
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
/* Context handle management - linked list of context handles */
struct list_head ctxlist;
@@ -353,6 +353,7 @@ struct drm_device {
/* Scatter gather memory */
struct drm_sg_mem *sg;
+#endif
};
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 570f9d03b2eb..68ca736c548d 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -91,6 +91,13 @@ enum drm_driver_feature {
* submission.
*/
DRIVER_SYNCOBJ = BIT(5),
+ /**
+ * @DRIVER_SYNCOBJ_TIMELINE:
+ *
+ * Driver supports the timeline flavor of &drm_syncobj for explicit
+ * synchronization of command submission.
+ */
+ DRIVER_SYNCOBJ_TIMELINE = BIT(6),
/* IMPORTANT: Below are all the legacy flags, add new ones above. */
@@ -152,9 +159,9 @@ enum drm_driver_feature {
/**
* struct drm_driver - DRM driver structure
*
- * This structure represent the common code for a family of cards. There will
- * one drm_device for each card present in this family. It contains lots of
- * vfunc entries, and a pile of those probably should be moved to more
+ * This structure represent the common code for a family of cards. There will be
+ * one &struct drm_device for each card present in this family. It contains lots
+ * of vfunc entries, and a pile of those probably should be moved to more
* appropriate places like &drm_mode_config_funcs or into a new operations
* structure for GEM drivers.
*/
@@ -718,6 +725,9 @@ extern unsigned int drm_debug;
int drm_dev_init(struct drm_device *dev,
struct drm_driver *driver,
struct device *parent);
+int devm_drm_dev_init(struct device *parent,
+ struct drm_device *dev,
+ struct drm_driver *driver);
void drm_dev_fini(struct drm_device *dev);
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
diff --git a/include/drm/drm_dsc.h b/include/drm/drm_dsc.h
index 9c26f083c70f..887954cbfc60 100644
--- a/include/drm/drm_dsc.h
+++ b/include/drm/drm_dsc.h
@@ -101,9 +101,9 @@ struct drm_dsc_config {
*/
u16 slice_height;
/**
- * @enable422: True for 4_2_2 sampling, false for 4_4_4 sampling
+ * @simple_422: True if simple 4_2_2 mode is enabled else False
*/
- bool enable422;
+ bool simple_422;
/**
* @pic_width: Width of the input display frame in pixels
*/
@@ -601,8 +601,9 @@ struct drm_dsc_pps_infoframe {
struct drm_dsc_picture_parameter_set pps_payload;
} __packed;
-void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp);
-void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header);
+void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_sdp,
const struct drm_dsc_config *dsc_cfg);
+int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg);
#endif /* _DRM_DSC_H_ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 8dc1a081fb36..9d3b5b93102c 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -331,6 +331,7 @@ struct cea_sad {
struct drm_encoder;
struct drm_connector;
+struct drm_connector_state;
struct drm_display_mode;
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
@@ -358,6 +359,11 @@ int
drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
struct drm_connector *connector,
const struct drm_display_mode *mode);
+
+void
+drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
struct drm_connector *connector,
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 286d58efed5d..40af2866c26a 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -68,10 +68,8 @@ struct drm_fb_helper_crtc {
* according to the largest width/height (so it is large enough for all CRTCs
* to scanout). But the fbdev width/height is sized to the minimum width/
* height of all the displays. This ensures that fbcon fits on the smallest
- * of the attached displays.
- *
- * So what is passed to drm_fb_helper_fill_var() should be fb_width/fb_height,
- * rather than the surface size.
+ * of the attached displays. fb_width/fb_height is used by
+ * drm_fb_helper_fill_info() to fill out the &fb_info.var structure.
*/
struct drm_fb_helper_surface_size {
u32 fb_width;
@@ -104,29 +102,6 @@ struct drm_fb_helper_funcs {
*/
int (*fb_probe)(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes);
-
- /**
- * @initial_config:
- *
- * Driver callback to setup an initial fbdev display configuration.
- * Drivers can use this callback to tell the fbdev emulation what the
- * preferred initial configuration is. This is useful to implement
- * smooth booting where the fbdev (and subsequently all userspace) never
- * changes the mode, but always inherits the existing configuration.
- *
- * This callback is optional.
- *
- * RETURNS:
- *
- * The driver should return true if a suitable initial configuration has
- * been filled out and false when the fbdev helper should fall back to
- * the default probing logic.
- */
- bool (*initial_config)(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_fb_offset *offsets,
- bool *enabled, int width, int height);
};
struct drm_fb_helper_connector {
@@ -289,10 +264,9 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
- uint32_t fb_width, uint32_t fb_height);
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth);
+void drm_fb_helper_fill_info(struct fb_info *info,
+ struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
@@ -418,14 +392,10 @@ static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
{
}
-static inline void drm_fb_helper_fill_var(struct fb_info *info,
- struct drm_fb_helper *fb_helper,
- uint32_t fb_width, uint32_t fb_height)
-{
-}
-
-static inline void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth)
+static inline void
+drm_fb_helper_fill_info(struct fb_info *info,
+ struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
{
}
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 6710b612e2f6..67af60bb527a 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -335,7 +335,9 @@ struct drm_file {
struct drm_prime_file_private prime;
/* private: */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
unsigned long lock_count; /* DRI1 legacy lock count */
+#endif
};
/**
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
new file mode 100644
index 000000000000..085d63faee12
--- /dev/null
+++ b/include/drm/drm_format_helper.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_DRM_FORMAT_HELPER_H
+#define __LINUX_DRM_FORMAT_HELPER_H
+
+struct drm_framebuffer;
+struct drm_rect;
+
+void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip);
+void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_rect *clip);
+void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip);
+void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swab);
+void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swab);
+void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip);
+void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip);
+
+#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index f0b34c977ec5..c23016748e3f 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -32,6 +32,7 @@
struct drm_clip_rect;
struct drm_device;
struct drm_file;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_gem_object;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index c95727425284..5047c7ee25f5 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -35,6 +35,7 @@
*/
#include <linux/kref.h>
+#include <linux/reservation.h>
#include <drm/drm_vma_manager.h>
@@ -263,6 +264,24 @@ struct drm_gem_object {
struct dma_buf_attachment *import_attach;
/**
+ * @resv:
+ *
+ * Pointer to reservation object associated with the this GEM object.
+ *
+ * Normally (@resv == &@_resv) except for imported GEM objects.
+ */
+ struct reservation_object *resv;
+
+ /**
+ * @_resv:
+ *
+ * A reservation object for this GEM object.
+ *
+ * This is unused for imported GEM objects.
+ */
+ struct reservation_object _resv;
+
+ /**
* @funcs:
*
* Optional GEM object functions. If this is set, it will be used instead of the
@@ -362,7 +381,20 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
+int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
+ int count, struct drm_gem_object ***objs_out);
struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
+long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+ bool wait_all, unsigned long timeout);
+int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx);
+void drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx);
+int drm_gem_fence_array_add(struct xarray *fence_array,
+ struct dma_fence *fence);
+int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
+ struct drm_gem_object *obj,
+ bool write);
int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
u32 handle, u64 *offset);
int drm_gem_dumb_destroy(struct drm_file *file,
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
new file mode 100644
index 000000000000..038b6d313447
--- /dev/null
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __DRM_GEM_SHMEM_HELPER_H__
+#define __DRM_GEM_SHMEM_HELPER_H__
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+
+struct dma_buf_attachment;
+struct drm_mode_create_dumb;
+struct drm_printer;
+struct sg_table;
+
+/**
+ * struct drm_gem_shmem_object - GEM object backed by shmem
+ */
+struct drm_gem_shmem_object {
+ /**
+ * @base: Base GEM object
+ */
+ struct drm_gem_object base;
+
+ /**
+ * @pages_lock: Protects the page table and use count
+ */
+ struct mutex pages_lock;
+
+ /**
+ * @pages: Page table
+ */
+ struct page **pages;
+
+ /**
+ * @pages_use_count:
+ *
+ * Reference count on the pages table.
+ * The pages are put when the count reaches zero.
+ */
+ unsigned int pages_use_count;
+
+ /**
+ * @pages_mark_dirty_on_put:
+ *
+ * Mark pages as dirty when they are put.
+ */
+ unsigned int pages_mark_dirty_on_put : 1;
+
+ /**
+ * @pages_mark_accessed_on_put:
+ *
+ * Mark pages as accessed when they are put.
+ */
+ unsigned int pages_mark_accessed_on_put : 1;
+
+ /**
+ * @sgt: Scatter/gather table for imported PRIME buffers
+ */
+ struct sg_table *sgt;
+
+ /**
+ * @vmap_lock: Protects the vmap address and use count
+ */
+ struct mutex vmap_lock;
+
+ /**
+ * @vaddr: Kernel virtual address of the backing memory
+ */
+ void *vaddr;
+
+ /**
+ * @vmap_use_count:
+ *
+ * Reference count on the virtual address.
+ * The address are un-mapped when the count reaches zero.
+ */
+ unsigned int vmap_use_count;
+};
+
+#define to_drm_gem_shmem_obj(obj) \
+ container_of(obj, struct drm_gem_shmem_object, base)
+
+/**
+ * DEFINE_DRM_GEM_SHMEM_FOPS() - Macro to generate file operations for shmem drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for shmem based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_GEM_SHMEM_FOPS(name) \
+ static const struct file_operations name = {\
+ .owner = THIS_MODULE,\
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_shmem_mmap, \
+ }
+
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
+void drm_gem_shmem_free_object(struct drm_gem_object *obj);
+
+int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
+int drm_gem_shmem_pin(struct drm_gem_object *obj);
+void drm_gem_shmem_unpin(struct drm_gem_object *obj);
+void *drm_gem_shmem_vmap(struct drm_gem_object *obj);
+void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+struct drm_gem_shmem_object *
+drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *dev, size_t size,
+ uint32_t *handle);
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
+
+void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj);
+
+struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj);
+
+/**
+ * DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
+ *
+ * This macro provides a shortcut for setting the shmem GEM operations in
+ * the &drm_driver structure.
+ */
+#define DRM_GEM_SHMEM_DRIVER_OPS \
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
+ .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
+ .gem_prime_mmap = drm_gem_prime_mmap, \
+ .dumb_create = drm_gem_shmem_dumb_create
+
+#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 7260b31af276..f243408ecf26 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -13,6 +13,7 @@
/* Period of hdcp checks (to ensure we're still authenticated) */
#define DRM_HDCP_CHECK_PERIOD_MS (128 * 16)
+#define DRM_HDCP2_CHECK_PERIOD_MS 500
/* Shared lengths/masks between HDMI/DVI/DisplayPort */
#define DRM_HDCP_AN_LEN 8
@@ -68,7 +69,6 @@
#define HDCP_2_2_REP_SEND_ACK 15
#define HDCP_2_2_REP_STREAM_MANAGE 16
#define HDCP_2_2_REP_STREAM_READY 17
-#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
#define HDCP_2_2_RTX_LEN 8
#define HDCP_2_2_RRX_LEN 8
@@ -219,11 +219,6 @@ struct hdcp2_rep_stream_ready {
u8 m_prime[HDCP_2_2_MPRIME_LEN];
} __packed;
-struct hdcp2_dp_errata_stream_type {
- u8 msg_id;
- u8 stream_type;
-} __packed;
-
/* HDCP2.2 TIMEOUTs in mSec */
#define HDCP_2_2_CERT_TIMEOUT_MS 100
#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 3e99ab69c122..2182a56ac421 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -162,8 +162,6 @@ int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
-void drm_legacy_master_rmmaps(struct drm_device *dev,
- struct drm_master *master);
struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index be4fed97e727..083f16747369 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -138,6 +138,23 @@ enum drm_mode_status {
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \
.vscan = (vs), .flags = (f)
+/**
+ * DRM_SIMPLE_MODE - Simple display mode
+ * @hd: Horizontal resolution, width
+ * @vd: Vertical resolution, height
+ * @hd_mm: Display width in millimeters
+ * @vd_mm: Display height in millimeters
+ *
+ * This macro initializes a &drm_display_mode that only contains info about
+ * resolution and physical size.
+ */
+#define DRM_SIMPLE_MODE(hd, vd, hd_mm, vd_mm) \
+ .type = DRM_MODE_TYPE_DRIVER, .clock = 1 /* pass validation */, \
+ .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \
+ .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \
+ .vsync_end = (vd), .vtotal = (vd), .width_mm = (hd_mm), \
+ .height_mm = (vd_mm)
+
#define CRTC_INTERLACE_HALVE_V (1 << 0) /* halve V values for interlacing */
#define CRTC_STEREO_DOUBLE (1 << 1) /* adjust timings for stereo modes */
#define CRTC_NO_DBLSCAN (1 << 2) /* don't adjust doublescan */
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index cfb7be40bed7..f9c94c2a1364 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -49,6 +49,8 @@
*/
enum mode_set_atomic;
+struct drm_writeback_connector;
+struct drm_writeback_job;
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
@@ -418,6 +420,8 @@ struct drm_crtc_helper_funcs {
* Drivers can use the @old_crtc_state input parameter if the operations
* needed to enable the CRTC don't depend solely on the new state but
* also on the transition between the old state and the new state.
+ *
+ * This function is optional.
*/
void (*atomic_enable)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
@@ -441,6 +445,8 @@ struct drm_crtc_helper_funcs {
* parameter @old_crtc_state which could be used to access the old
* state. Atomic drivers should consider to use this one instead
* of @disable.
+ *
+ * This function is optional.
*/
void (*atomic_disable)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
@@ -989,6 +995,11 @@ struct drm_connector_helper_funcs {
*/
void (*atomic_commit)(struct drm_connector *connector,
struct drm_connector_state *state);
+
+ int (*prepare_writeback_job)(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job);
+ void (*cleanup_writeback_job)(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job);
};
/**
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index afbc3beef089..3a4247319e63 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -30,6 +30,7 @@
#include <linux/printk.h>
#include <linux/seq_file.h>
#include <linux/device.h>
+#include <linux/debugfs.h>
/**
* DOC: print
@@ -84,6 +85,7 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
__printf(2, 3)
void drm_printf(struct drm_printer *p, const char *f, ...);
void drm_puts(struct drm_printer *p, const char *str);
+void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset);
__printf(2, 0)
/**
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 0311c9fdbd2f..6cf7243a1dc5 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -27,6 +27,7 @@
#define __DRM_SYNCOBJ_H__
#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
struct drm_file;
@@ -112,6 +113,10 @@ drm_syncobj_fence_get(struct drm_syncobj *syncobj)
struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
u32 handle);
+void drm_syncobj_add_point(struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain,
+ struct dma_fence *fence,
+ uint64_t point);
void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence);
int drm_syncobj_find_fence(struct drm_file *file_private,
diff --git a/include/drm/drm_utils.h b/include/drm/drm_utils.h
index a803988d8579..70775748d243 100644
--- a/include/drm/drm_utils.h
+++ b/include/drm/drm_utils.h
@@ -10,6 +10,10 @@
#ifndef __DRM_UTILS_H__
#define __DRM_UTILS_H__
+#include <linux/types.h>
+
int drm_get_panel_orientation_quirk(int width, int height);
+signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec);
+
#endif
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index c7987daeaed0..76ac5e97a559 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -29,6 +29,18 @@
#include <linux/spinlock.h>
#include <linux/types.h>
+/* We make up offsets for buffer objects so we can recognize them at
+ * mmap time. pgoff in mmap is an unsigned long, so we need to make sure
+ * that the faked up offset will fit
+ */
+#if BITS_PER_LONG == 64
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 256)
+#else
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
+#endif
+
struct drm_file;
struct drm_vma_offset_file {
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
index 23df9d463003..777c14c847f0 100644
--- a/include/drm/drm_writeback.h
+++ b/include/drm/drm_writeback.h
@@ -80,6 +80,20 @@ struct drm_writeback_connector {
struct drm_writeback_job {
/**
+ * @connector:
+ *
+ * Back-pointer to the writeback connector associated with the job
+ */
+ struct drm_writeback_connector *connector;
+
+ /**
+ * @prepared:
+ *
+ * Set when the job has been prepared with drm_writeback_prepare_job()
+ */
+ bool prepared;
+
+ /**
* @cleanup_work:
*
* Used to allow drm_writeback_signal_completion to defer dropping the
@@ -98,7 +112,7 @@ struct drm_writeback_job {
* @fb:
*
* Framebuffer to be written to by the writeback connector. Do not set
- * directly, use drm_atomic_set_writeback_fb_for_connector()
+ * directly, use drm_writeback_set_fb()
*/
struct drm_framebuffer *fb;
@@ -108,6 +122,13 @@ struct drm_writeback_job {
* Fence which will signal once the writeback has completed
*/
struct dma_fence *out_fence;
+
+ /**
+ * @priv:
+ *
+ * Driver-private data
+ */
+ void *priv;
};
static inline struct drm_writeback_connector *
@@ -122,8 +143,13 @@ int drm_writeback_connector_init(struct drm_device *dev,
const struct drm_encoder_helper_funcs *enc_helper_funcs,
const u32 *formats, int n_formats);
+int drm_writeback_set_fb(struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb);
+
+int drm_writeback_prepare_job(struct drm_writeback_job *job);
+
void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
- struct drm_writeback_job *job);
+ struct drm_connector_state *conn_state);
void drm_writeback_cleanup_job(struct drm_writeback_job *job);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index d2fad7b0fcf6..6477da22af28 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -108,8 +108,10 @@
INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \
INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */
-#define INTEL_PINEVIEW_IDS(info) \
- INTEL_VGA_DEVICE(0xa001, info), \
+#define INTEL_PINEVIEW_G_IDS(info) \
+ INTEL_VGA_DEVICE(0xa001, info)
+
+#define INTEL_PINEVIEW_M_IDS(info) \
INTEL_VGA_DEVICE(0xa011, info)
#define INTEL_IRONLAKE_D_IDS(info) \
@@ -166,7 +168,18 @@
#define INTEL_IVB_Q_IDS(info) \
INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */
+#define INTEL_HSW_ULT_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
+ INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
+ INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
+ INTEL_VGA_DEVICE(0x0A06, info) /* ULT GT1 mobile */
+
+#define INTEL_HSW_ULX_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x0A0E, info) /* ULX GT1 mobile */
+
#define INTEL_HSW_GT1_IDS(info) \
+ INTEL_HSW_ULT_GT1_IDS(info), \
+ INTEL_HSW_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \
INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \
INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \
@@ -175,20 +188,26 @@
INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \
INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \
INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \
- INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \
- INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \
- INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \
INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \
INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \
INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \
INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \
INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \
- INTEL_VGA_DEVICE(0x0A0E, info), /* ULX GT1 mobile */ \
INTEL_VGA_DEVICE(0x0D06, info) /* CRW GT1 mobile */
+#define INTEL_HSW_ULT_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
+ INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
+ INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
+ INTEL_VGA_DEVICE(0x0A16, info) /* ULT GT2 mobile */
+
+#define INTEL_HSW_ULX_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x0A1E, info) /* ULX GT2 mobile */ \
+
#define INTEL_HSW_GT2_IDS(info) \
+ INTEL_HSW_ULT_GT2_IDS(info), \
+ INTEL_HSW_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \
INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \
INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \
@@ -197,9 +216,6 @@
INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \
INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \
INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \
- INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \
- INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \
- INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \
INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \
INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \
INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \
@@ -207,11 +223,17 @@
INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \
INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \
- INTEL_VGA_DEVICE(0x0A1E, info), /* ULX GT2 mobile */ \
INTEL_VGA_DEVICE(0x0D16, info) /* CRW GT2 mobile */
+#define INTEL_HSW_ULT_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
+ INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
+ INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
+ INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
+ INTEL_VGA_DEVICE(0x0A2E, info) /* ULT GT3 reserved */
+
#define INTEL_HSW_GT3_IDS(info) \
+ INTEL_HSW_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \
INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \
INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \
@@ -220,16 +242,11 @@
INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \
INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \
- INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \
- INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \
- INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \
INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \
INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D2E, info), /* CRW GT3 reserved */ \
INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \
- INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \
INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */
#define INTEL_HSW_IDS(info) \
@@ -245,35 +262,59 @@
INTEL_VGA_DEVICE(0x0157, info), \
INTEL_VGA_DEVICE(0x0155, info)
-#define INTEL_BDW_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
+#define INTEL_BDW_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x1606, info), /* GT1 ULT */ \
- INTEL_VGA_DEVICE(0x160B, info), /* GT1 Iris */ \
- INTEL_VGA_DEVICE(0x160E, info), /* GT1 ULX */ \
+ INTEL_VGA_DEVICE(0x160B, info) /* GT1 Iris */
+
+#define INTEL_BDW_ULX_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x160E, info) /* GT1 ULX */
+
+#define INTEL_BDW_GT1_IDS(info) \
+ INTEL_BDW_ULT_GT1_IDS(info), \
+ INTEL_BDW_ULX_GT1_IDS(info), \
+ INTEL_VGA_DEVICE(0x1602, info), /* GT1 ULT */ \
INTEL_VGA_DEVICE(0x160A, info), /* GT1 Server */ \
INTEL_VGA_DEVICE(0x160D, info) /* GT1 Workstation */
-#define INTEL_BDW_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
+#define INTEL_BDW_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1616, info), /* GT2 ULT */ \
- INTEL_VGA_DEVICE(0x161B, info), /* GT2 ULT */ \
- INTEL_VGA_DEVICE(0x161E, info), /* GT2 ULX */ \
+ INTEL_VGA_DEVICE(0x161B, info) /* GT2 ULT */
+
+#define INTEL_BDW_ULX_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x161E, info) /* GT2 ULX */
+
+#define INTEL_BDW_GT2_IDS(info) \
+ INTEL_BDW_ULT_GT2_IDS(info), \
+ INTEL_BDW_ULX_GT2_IDS(info), \
+ INTEL_VGA_DEVICE(0x1612, info), /* GT2 Halo */ \
INTEL_VGA_DEVICE(0x161A, info), /* GT2 Server */ \
INTEL_VGA_DEVICE(0x161D, info) /* GT2 Workstation */
+#define INTEL_BDW_ULT_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x162B, info) /* Iris */ \
+
+#define INTEL_BDW_ULX_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x162E, info) /* ULX */
+
#define INTEL_BDW_GT3_IDS(info) \
+ INTEL_BDW_ULT_GT3_IDS(info), \
+ INTEL_BDW_ULX_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x1622, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x1626, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x162B, info), /* Iris */ \
- INTEL_VGA_DEVICE(0x162E, info), /* ULX */\
INTEL_VGA_DEVICE(0x162A, info), /* Server */ \
INTEL_VGA_DEVICE(0x162D, info) /* Workstation */
+#define INTEL_BDW_ULT_RSVD_IDS(info) \
+ INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
+ INTEL_VGA_DEVICE(0x163B, info) /* Iris */
+
+#define INTEL_BDW_ULX_RSVD_IDS(info) \
+ INTEL_VGA_DEVICE(0x163E, info) /* ULX */
+
#define INTEL_BDW_RSVD_IDS(info) \
+ INTEL_BDW_ULT_RSVD_IDS(info), \
+ INTEL_BDW_ULX_RSVD_IDS(info), \
INTEL_VGA_DEVICE(0x1632, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x1636, info), /* ULT */ \
- INTEL_VGA_DEVICE(0x163B, info), /* Iris */ \
- INTEL_VGA_DEVICE(0x163E, info), /* ULX */ \
INTEL_VGA_DEVICE(0x163A, info), /* Server */ \
INTEL_VGA_DEVICE(0x163D, info) /* Workstation */
@@ -289,25 +330,40 @@
INTEL_VGA_DEVICE(0x22b2, info), \
INTEL_VGA_DEVICE(0x22b3, info)
+#define INTEL_SKL_ULT_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x1906, info) /* ULT GT1 */
+
+#define INTEL_SKL_ULX_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x190E, info) /* ULX GT1 */
+
#define INTEL_SKL_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
- INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
+ INTEL_SKL_ULT_GT1_IDS(info), \
+ INTEL_SKL_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x190A, info) /* SRV GT1 */
-#define INTEL_SKL_GT2_IDS(info) \
+#define INTEL_SKL_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
- INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
- INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
+ INTEL_VGA_DEVICE(0x1921, info) /* ULT GT2F */
+
+#define INTEL_SKL_ULX_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x191E, info) /* ULX GT2 */
+
+#define INTEL_SKL_GT2_IDS(info) \
+ INTEL_SKL_ULT_GT2_IDS(info), \
+ INTEL_SKL_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
+#define INTEL_SKL_ULT_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x1926, info) /* ULT GT3 */
+
#define INTEL_SKL_GT3_IDS(info) \
+ INTEL_SKL_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3 */
@@ -336,29 +392,44 @@
INTEL_VGA_DEVICE(0x3184, info), \
INTEL_VGA_DEVICE(0x3185, info)
-#define INTEL_KBL_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
- INTEL_VGA_DEVICE(0x5915, info), /* ULX GT1.5 */ \
+#define INTEL_KBL_ULT_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x5906, info), /* ULT GT1 */ \
+ INTEL_VGA_DEVICE(0x5913, info) /* ULT GT1.5 */
+
+#define INTEL_KBL_ULX_GT1_IDS(info) \
INTEL_VGA_DEVICE(0x590E, info), /* ULX GT1 */ \
+ INTEL_VGA_DEVICE(0x5915, info) /* ULX GT1.5 */
+
+#define INTEL_KBL_GT1_IDS(info) \
+ INTEL_KBL_ULT_GT1_IDS(info), \
+ INTEL_KBL_ULX_GT1_IDS(info), \
INTEL_VGA_DEVICE(0x5902, info), /* DT GT1 */ \
INTEL_VGA_DEVICE(0x5908, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x590B, info), /* Halo GT1 */ \
INTEL_VGA_DEVICE(0x590A, info) /* SRV GT1 */
-#define INTEL_KBL_GT2_IDS(info) \
+#define INTEL_KBL_ULT_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
+ INTEL_VGA_DEVICE(0x5921, info) /* ULT GT2F */
+
+#define INTEL_KBL_ULX_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x591E, info) /* ULX GT2 */
+
+#define INTEL_KBL_GT2_IDS(info) \
+ INTEL_KBL_ULT_GT2_IDS(info), \
+ INTEL_KBL_ULX_GT2_IDS(info), \
INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
- INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
- INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
INTEL_VGA_DEVICE(0x591B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x591A, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x591D, info) /* WKS GT2 */
+#define INTEL_KBL_ULT_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x5926, info) /* ULT GT3 */
+
#define INTEL_KBL_GT3_IDS(info) \
+ INTEL_KBL_ULT_GT3_IDS(info), \
INTEL_VGA_DEVICE(0x5923, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x5926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x5927, info) /* ULT GT3 */
#define INTEL_KBL_GT4_IDS(info) \
@@ -373,6 +444,30 @@
#define INTEL_AML_CFL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x87CA, info)
+/* CML GT1 */
+#define INTEL_CML_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x9B21, info), \
+ INTEL_VGA_DEVICE(0x9BAA, info), \
+ INTEL_VGA_DEVICE(0x9BAB, info), \
+ INTEL_VGA_DEVICE(0x9BAC, info), \
+ INTEL_VGA_DEVICE(0x9BA0, info), \
+ INTEL_VGA_DEVICE(0x9BA5, info), \
+ INTEL_VGA_DEVICE(0x9BA8, info), \
+ INTEL_VGA_DEVICE(0x9BA4, info), \
+ INTEL_VGA_DEVICE(0x9BA2, info)
+
+/* CML GT2 */
+#define INTEL_CML_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x9B41, info), \
+ INTEL_VGA_DEVICE(0x9BCA, info), \
+ INTEL_VGA_DEVICE(0x9BCB, info), \
+ INTEL_VGA_DEVICE(0x9BCC, info), \
+ INTEL_VGA_DEVICE(0x9BC0, info), \
+ INTEL_VGA_DEVICE(0x9BC5, info), \
+ INTEL_VGA_DEVICE(0x9BC8, info), \
+ INTEL_VGA_DEVICE(0x9BC4, info), \
+ INTEL_VGA_DEVICE(0x9BC2, info)
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
@@ -436,10 +531,19 @@
INTEL_WHL_U_GT1_IDS(info), \
INTEL_WHL_U_GT2_IDS(info), \
INTEL_WHL_U_GT3_IDS(info), \
- INTEL_AML_CFL_GT2_IDS(info)
+ INTEL_AML_CFL_GT2_IDS(info), \
+ INTEL_CML_GT1_IDS(info), \
+ INTEL_CML_GT2_IDS(info)
/* CNL */
+#define INTEL_CNL_PORT_F_IDS(info) \
+ INTEL_VGA_DEVICE(0x5A54, info), \
+ INTEL_VGA_DEVICE(0x5A5C, info), \
+ INTEL_VGA_DEVICE(0x5A44, info), \
+ INTEL_VGA_DEVICE(0x5A4C, info)
+
#define INTEL_CNL_IDS(info) \
+ INTEL_CNL_PORT_F_IDS(info), \
INTEL_VGA_DEVICE(0x5A51, info), \
INTEL_VGA_DEVICE(0x5A59, info), \
INTEL_VGA_DEVICE(0x5A41, info), \
@@ -449,16 +553,11 @@
INTEL_VGA_DEVICE(0x5A42, info), \
INTEL_VGA_DEVICE(0x5A4A, info), \
INTEL_VGA_DEVICE(0x5A50, info), \
- INTEL_VGA_DEVICE(0x5A40, info), \
- INTEL_VGA_DEVICE(0x5A54, info), \
- INTEL_VGA_DEVICE(0x5A5C, info), \
- INTEL_VGA_DEVICE(0x5A44, info), \
- INTEL_VGA_DEVICE(0x5A4C, info)
+ INTEL_VGA_DEVICE(0x5A40, info)
/* ICL */
-#define INTEL_ICL_11_IDS(info) \
+#define INTEL_ICL_PORT_F_IDS(info) \
INTEL_VGA_DEVICE(0x8A50, info), \
- INTEL_VGA_DEVICE(0x8A51, info), \
INTEL_VGA_DEVICE(0x8A5C, info), \
INTEL_VGA_DEVICE(0x8A5D, info), \
INTEL_VGA_DEVICE(0x8A59, info), \
@@ -469,6 +568,18 @@
INTEL_VGA_DEVICE(0x8A57, info), \
INTEL_VGA_DEVICE(0x8A56, info), \
INTEL_VGA_DEVICE(0x8A71, info), \
- INTEL_VGA_DEVICE(0x8A70, info)
+ INTEL_VGA_DEVICE(0x8A70, info), \
+ INTEL_VGA_DEVICE(0x8A53, info)
+
+#define INTEL_ICL_11_IDS(info) \
+ INTEL_ICL_PORT_F_IDS(info), \
+ INTEL_VGA_DEVICE(0x8A51, info)
+
+/* EHL */
+#define INTEL_EHL_IDS(info) \
+ INTEL_VGA_DEVICE(0x4500, info), \
+ INTEL_VGA_DEVICE(0x4571, info), \
+ INTEL_VGA_DEVICE(0x4551, info), \
+ INTEL_VGA_DEVICE(0x4541, info)
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
index f4ec2834bc22..af203b37d87a 100644
--- a/include/drm/tinydrm/mipi-dbi.h
+++ b/include/drm/tinydrm/mipi-dbi.h
@@ -12,7 +12,9 @@
#ifndef __LINUX_MIPI_DBI_H
#define __LINUX_MIPI_DBI_H
-#include <drm/tinydrm/tinydrm.h>
+#include <linux/mutex.h>
+#include <drm/drm_device.h>
+#include <drm/drm_simple_kms_helper.h>
struct drm_rect;
struct spi_device;
@@ -21,7 +23,6 @@ struct regulator;
/**
* struct mipi_dbi - MIPI DBI controller
- * @tinydrm: tinydrm base
* @spi: SPI device
* @enabled: Pipeline is enabled
* @cmdlock: Command lock
@@ -39,11 +40,20 @@ struct regulator;
* @regulator: power regulator (optional)
*/
struct mipi_dbi {
- struct tinydrm_device tinydrm;
+ /**
+ * @drm: DRM device
+ */
+ struct drm_device drm;
+
+ /**
+ * @pipe: Display pipe structure
+ */
+ struct drm_simple_display_pipe pipe;
+
struct spi_device *spi;
bool enabled;
struct mutex cmdlock;
- int (*command)(struct mipi_dbi *mipi, u8 cmd, u8 *param, size_t num);
+ int (*command)(struct mipi_dbi *mipi, u8 *cmd, u8 *param, size_t num);
const u8 *read_commands;
struct gpio_desc *dc;
u16 *tx_buf;
@@ -56,18 +66,17 @@ struct mipi_dbi {
struct regulator *regulator;
};
-static inline struct mipi_dbi *
-mipi_dbi_from_tinydrm(struct tinydrm_device *tdev)
+static inline struct mipi_dbi *drm_to_mipi_dbi(struct drm_device *drm)
{
- return container_of(tdev, struct mipi_dbi, tinydrm);
+ return container_of(drm, struct mipi_dbi, drm);
}
int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
struct gpio_desc *dc);
-int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
- const struct drm_simple_display_pipe_funcs *pipe_funcs,
- struct drm_driver *driver,
+int mipi_dbi_init(struct mipi_dbi *mipi,
+ const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation);
+void mipi_dbi_release(struct drm_device *drm);
void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state);
void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
@@ -82,6 +91,7 @@ u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
+int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap);
/**
@@ -99,7 +109,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
#define mipi_dbi_command(mipi, cmd, seq...) \
({ \
u8 d[] = { seq }; \
- mipi_dbi_command_buf(mipi, cmd, d, ARRAY_SIZE(d)); \
+ mipi_dbi_command_stackbuf(mipi, cmd, d, ARRAY_SIZE(d)); \
})
#ifdef CONFIG_DEBUG_FS
diff --git a/include/drm/tinydrm/tinydrm-helpers.h b/include/drm/tinydrm/tinydrm-helpers.h
index f0d598789e4d..7d259acb8826 100644
--- a/include/drm/tinydrm/tinydrm-helpers.h
+++ b/include/drm/tinydrm/tinydrm-helpers.h
@@ -11,8 +11,12 @@
#define __LINUX_TINYDRM_HELPERS_H
struct backlight_device;
+struct drm_device;
+struct drm_display_mode;
struct drm_framebuffer;
struct drm_rect;
+struct drm_simple_display_pipe;
+struct drm_simple_display_pipe_funcs;
struct spi_transfer;
struct spi_message;
struct spi_device;
@@ -33,15 +37,14 @@ static inline bool tinydrm_machine_little_endian(void)
#endif
}
-void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
-void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap);
-void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip);
+int tinydrm_display_pipe_init(struct drm_device *drm,
+ struct drm_simple_display_pipe *pipe,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ int connector_type,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const struct drm_display_mode *mode,
+ unsigned int rotation);
size_t tinydrm_spi_max_transfer_size(struct spi_device *spi, size_t max_len);
bool tinydrm_spi_bpw_supported(struct spi_device *spi, u8 bpw);
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
deleted file mode 100644
index 5621688edcc0..000000000000
--- a/include/drm/tinydrm/tinydrm.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2016 Noralf Trønnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LINUX_TINYDRM_H
-#define __LINUX_TINYDRM_H
-
-#include <drm/drm_simple_kms_helper.h>
-
-struct drm_driver;
-
-/**
- * struct tinydrm_device - tinydrm device
- */
-struct tinydrm_device {
- /**
- * @drm: DRM device
- */
- struct drm_device *drm;
-
- /**
- * @pipe: Display pipe structure
- */
- struct drm_simple_display_pipe pipe;
-};
-
-static inline struct tinydrm_device *
-pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
-{
- return container_of(pipe, struct tinydrm_device, pipe);
-}
-
-/**
- * TINYDRM_MODE - tinydrm display mode
- * @hd: Horizontal resolution, width
- * @vd: Vertical resolution, height
- * @hd_mm: Display width in millimeters
- * @vd_mm: Display height in millimeters
- *
- * This macro creates a &drm_display_mode for use with tinydrm.
- */
-#define TINYDRM_MODE(hd, vd, hd_mm, vd_mm) \
- .hdisplay = (hd), \
- .hsync_start = (hd), \
- .hsync_end = (hd), \
- .htotal = (hd), \
- .vdisplay = (vd), \
- .vsync_start = (vd), \
- .vsync_end = (vd), \
- .vtotal = (vd), \
- .width_mm = (hd_mm), \
- .height_mm = (vd_mm), \
- .type = DRM_MODE_TYPE_DRIVER, \
- .clock = 1 /* pass validation */
-
-int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
- struct drm_driver *driver);
-int devm_tinydrm_register(struct tinydrm_device *tdev);
-void tinydrm_shutdown(struct tinydrm_device *tdev);
-
-int
-tinydrm_display_pipe_init(struct tinydrm_device *tdev,
- const struct drm_simple_display_pipe_funcs *funcs,
- int connector_type,
- const uint32_t *formats,
- unsigned int format_count,
- const struct drm_display_mode *mode,
- unsigned int rotation);
-
-#endif /* __LINUX_TINYDRM_H */
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index cbf3180cb612..129dabbc002d 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -420,7 +420,6 @@ extern struct ttm_bo_global {
/**
* Protected by ttm_global_mutex.
*/
- unsigned int use_count;
struct list_head device_list;
/**
@@ -597,7 +596,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
- uint64_t file_page_offset, bool need_dma32);
+ bool need_dma32);
/**
* ttm_bo_unmap_virtual
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index fd9c362099d9..75901c636893 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -7,26 +7,6 @@
#ifndef __AXG_AUDIO_CLKC_BINDINGS_H
#define __AXG_AUDIO_CLKC_BINDINGS_H
-#define AUD_CLKID_SLV_SCLK0 9
-#define AUD_CLKID_SLV_SCLK1 10
-#define AUD_CLKID_SLV_SCLK2 11
-#define AUD_CLKID_SLV_SCLK3 12
-#define AUD_CLKID_SLV_SCLK4 13
-#define AUD_CLKID_SLV_SCLK5 14
-#define AUD_CLKID_SLV_SCLK6 15
-#define AUD_CLKID_SLV_SCLK7 16
-#define AUD_CLKID_SLV_SCLK8 17
-#define AUD_CLKID_SLV_SCLK9 18
-#define AUD_CLKID_SLV_LRCLK0 19
-#define AUD_CLKID_SLV_LRCLK1 20
-#define AUD_CLKID_SLV_LRCLK2 21
-#define AUD_CLKID_SLV_LRCLK3 22
-#define AUD_CLKID_SLV_LRCLK4 23
-#define AUD_CLKID_SLV_LRCLK5 24
-#define AUD_CLKID_SLV_LRCLK6 25
-#define AUD_CLKID_SLV_LRCLK7 26
-#define AUD_CLKID_SLV_LRCLK8 27
-#define AUD_CLKID_SLV_LRCLK9 28
#define AUD_CLKID_DDR_ARB 29
#define AUD_CLKID_PDM 30
#define AUD_CLKID_TDMIN_A 31
@@ -90,5 +70,15 @@
#define AUD_CLKID_TDMOUT_A_LRCLK 134
#define AUD_CLKID_TDMOUT_B_LRCLK 135
#define AUD_CLKID_TDMOUT_C_LRCLK 136
+#define AUD_CLKID_SPDIFOUT_B 151
+#define AUD_CLKID_SPDIFOUT_B_CLK 152
+#define AUD_CLKID_TDM_MCLK_PAD0 155
+#define AUD_CLKID_TDM_MCLK_PAD1 156
+#define AUD_CLKID_TDM_LRCLK_PAD0 157
+#define AUD_CLKID_TDM_LRCLK_PAD1 158
+#define AUD_CLKID_TDM_LRCLK_PAD2 159
+#define AUD_CLKID_TDM_SCLK_PAD0 160
+#define AUD_CLKID_TDM_SCLK_PAD1 161
+#define AUD_CLKID_TDM_SCLK_PAD2 162
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h
index f179eabbcdb7..86c2ad56c5ef 100644
--- a/include/dt-bindings/clock/exynos5410.h
+++ b/include/dt-bindings/clock/exynos5410.h
@@ -36,6 +36,7 @@
#define CLK_UART0 257
#define CLK_UART1 258
#define CLK_UART2 259
+#define CLK_UART3 260
#define CLK_I2C0 261
#define CLK_I2C1 262
#define CLK_I2C2 263
@@ -44,7 +45,7 @@
#define CLK_USI1 266
#define CLK_USI2 267
#define CLK_USI3 268
-#define CLK_UART3 260
+#define CLK_TSADC 270
#define CLK_PWM 279
#define CLK_MCT 315
#define CLK_WDT 316
diff --git a/include/dt-bindings/clock/g12a-aoclkc.h b/include/dt-bindings/clock/g12a-aoclkc.h
index 8db01ffbeb06..e916e49ff288 100644
--- a/include/dt-bindings/clock/g12a-aoclkc.h
+++ b/include/dt-bindings/clock/g12a-aoclkc.h
@@ -26,7 +26,9 @@
#define CLKID_AO_M4_FCLK 13
#define CLKID_AO_M4_HCLK 14
#define CLKID_AO_CLK81 15
+#define CLKID_AO_SAR_ADC_SEL 16
#define CLKID_AO_SAR_ADC_CLK 18
+#define CLKID_AO_CTS_OSCIN 19
#define CLKID_AO_32K 23
#define CLKID_AO_CEC 27
#define CLKID_AO_CTS_RTC_OSCIN 28
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index 83b657038d1e..82c9e0c020b2 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -131,5 +131,10 @@
#define CLKID_MALI_1 174
#define CLKID_MALI 175
#define CLKID_MPLL_5OM 177
+#define CLKID_CPU_CLK 187
+#define CLKID_PCIE_PLL 201
+#define CLKID_VDEC_1 204
+#define CLKID_VDEC_HEVC 207
+#define CLKID_VDEC_HEVCF 210
#endif /* __G12A_CLKC_H */
diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h
index 21d872e69cb1..6f66f9005c81 100644
--- a/include/dt-bindings/clock/imx7ulp-clock.h
+++ b/include/dt-bindings/clock/imx7ulp-clock.h
@@ -65,7 +65,6 @@
#define IMX7ULP_CLK_FLEXBUS 2
#define IMX7ULP_CLK_SEMA42_1 3
#define IMX7ULP_CLK_DMA_MUX1 4
-#define IMX7ULP_CLK_SNVS 5
#define IMX7ULP_CLK_CAAM 6
#define IMX7ULP_CLK_LPTPM4 7
#define IMX7ULP_CLK_LPTPM5 8
diff --git a/include/dt-bindings/clock/jz4725b-cgu.h b/include/dt-bindings/clock/jz4725b-cgu.h
index 460bbeff6ab8..31f1ab0fe42c 100644
--- a/include/dt-bindings/clock/jz4725b-cgu.h
+++ b/include/dt-bindings/clock/jz4725b-cgu.h
@@ -31,5 +31,6 @@
#define JZ4725B_CLK_TCU 22
#define JZ4725B_CLK_EXT512 23
#define JZ4725B_CLK_RTC 24
+#define JZ4725B_CLK_UDC_PHY 25
#endif /* __DT_BINDINGS_CLOCK_JZ4725B_CGU_H__ */
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 8067077a62ca..47556539f0ee 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -103,10 +103,14 @@
#define CLKID_MPLL1 94
#define CLKID_MPLL2 95
#define CLKID_NAND_CLK 112
-#define CLKID_ABP 124
#define CLKID_APB 124
#define CLKID_PERIPH 126
#define CLKID_AXI 128
#define CLKID_L2_DRAM 130
+#define CLKID_VPU 190
+#define CLKID_VDEC_1 196
+#define CLKID_VDEC_HCODEC 199
+#define CLKID_VDEC_2 202
+#define CLKID_VDEC_HEVC 206
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/mt8183-clk.h b/include/dt-bindings/clock/mt8183-clk.h
new file mode 100644
index 000000000000..0046506eb24c
--- /dev/null
+++ b/include/dt-bindings/clock/mt8183-clk.h
@@ -0,0 +1,422 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Weiyi Lu <weiyi.lu@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8183_H
+#define _DT_BINDINGS_CLK_MT8183_H
+
+/* APMIXED */
+#define CLK_APMIXED_ARMPLL_LL 0
+#define CLK_APMIXED_ARMPLL_L 1
+#define CLK_APMIXED_CCIPLL 2
+#define CLK_APMIXED_MAINPLL 3
+#define CLK_APMIXED_UNIV2PLL 4
+#define CLK_APMIXED_MSDCPLL 5
+#define CLK_APMIXED_MMPLL 6
+#define CLK_APMIXED_MFGPLL 7
+#define CLK_APMIXED_TVDPLL 8
+#define CLK_APMIXED_APLL1 9
+#define CLK_APMIXED_APLL2 10
+#define CLK_APMIXED_SSUSB_26M 11
+#define CLK_APMIXED_APPLL_26M 12
+#define CLK_APMIXED_MIPIC0_26M 13
+#define CLK_APMIXED_MDPLLGP_26M 14
+#define CLK_APMIXED_MMSYS_26M 15
+#define CLK_APMIXED_UFS_26M 16
+#define CLK_APMIXED_MIPIC1_26M 17
+#define CLK_APMIXED_MEMPLL_26M 18
+#define CLK_APMIXED_CLKSQ_LVPLL_26M 19
+#define CLK_APMIXED_MIPID0_26M 20
+#define CLK_APMIXED_MIPID1_26M 21
+#define CLK_APMIXED_NR_CLK 22
+
+/* TOPCKGEN */
+#define CLK_TOP_MUX_AXI 0
+#define CLK_TOP_MUX_MM 1
+#define CLK_TOP_MUX_CAM 2
+#define CLK_TOP_MUX_MFG 3
+#define CLK_TOP_MUX_CAMTG 4
+#define CLK_TOP_MUX_UART 5
+#define CLK_TOP_MUX_SPI 6
+#define CLK_TOP_MUX_MSDC50_0_HCLK 7
+#define CLK_TOP_MUX_MSDC50_0 8
+#define CLK_TOP_MUX_MSDC30_1 9
+#define CLK_TOP_MUX_MSDC30_2 10
+#define CLK_TOP_MUX_AUDIO 11
+#define CLK_TOP_MUX_AUD_INTBUS 12
+#define CLK_TOP_MUX_FPWRAP_ULPOSC 13
+#define CLK_TOP_MUX_SCP 14
+#define CLK_TOP_MUX_ATB 15
+#define CLK_TOP_MUX_SSPM 16
+#define CLK_TOP_MUX_DPI0 17
+#define CLK_TOP_MUX_SCAM 18
+#define CLK_TOP_MUX_AUD_1 19
+#define CLK_TOP_MUX_AUD_2 20
+#define CLK_TOP_MUX_DISP_PWM 21
+#define CLK_TOP_MUX_SSUSB_TOP_XHCI 22
+#define CLK_TOP_MUX_USB_TOP 23
+#define CLK_TOP_MUX_SPM 24
+#define CLK_TOP_MUX_I2C 25
+#define CLK_TOP_MUX_F52M_MFG 26
+#define CLK_TOP_MUX_SENINF 27
+#define CLK_TOP_MUX_DXCC 28
+#define CLK_TOP_MUX_CAMTG2 29
+#define CLK_TOP_MUX_AUD_ENG1 30
+#define CLK_TOP_MUX_AUD_ENG2 31
+#define CLK_TOP_MUX_FAES_UFSFDE 32
+#define CLK_TOP_MUX_FUFS 33
+#define CLK_TOP_MUX_IMG 34
+#define CLK_TOP_MUX_DSP 35
+#define CLK_TOP_MUX_DSP1 36
+#define CLK_TOP_MUX_DSP2 37
+#define CLK_TOP_MUX_IPU_IF 38
+#define CLK_TOP_MUX_CAMTG3 39
+#define CLK_TOP_MUX_CAMTG4 40
+#define CLK_TOP_MUX_PMICSPI 41
+#define CLK_TOP_SYSPLL_CK 42
+#define CLK_TOP_SYSPLL_D2 43
+#define CLK_TOP_SYSPLL_D3 44
+#define CLK_TOP_SYSPLL_D5 45
+#define CLK_TOP_SYSPLL_D7 46
+#define CLK_TOP_SYSPLL_D2_D2 47
+#define CLK_TOP_SYSPLL_D2_D4 48
+#define CLK_TOP_SYSPLL_D2_D8 49
+#define CLK_TOP_SYSPLL_D2_D16 50
+#define CLK_TOP_SYSPLL_D3_D2 51
+#define CLK_TOP_SYSPLL_D3_D4 52
+#define CLK_TOP_SYSPLL_D3_D8 53
+#define CLK_TOP_SYSPLL_D5_D2 54
+#define CLK_TOP_SYSPLL_D5_D4 55
+#define CLK_TOP_SYSPLL_D7_D2 56
+#define CLK_TOP_SYSPLL_D7_D4 57
+#define CLK_TOP_UNIVPLL_CK 58
+#define CLK_TOP_UNIVPLL_D2 59
+#define CLK_TOP_UNIVPLL_D3 60
+#define CLK_TOP_UNIVPLL_D5 61
+#define CLK_TOP_UNIVPLL_D7 62
+#define CLK_TOP_UNIVPLL_D2_D2 63
+#define CLK_TOP_UNIVPLL_D2_D4 64
+#define CLK_TOP_UNIVPLL_D2_D8 65
+#define CLK_TOP_UNIVPLL_D3_D2 66
+#define CLK_TOP_UNIVPLL_D3_D4 67
+#define CLK_TOP_UNIVPLL_D3_D8 68
+#define CLK_TOP_UNIVPLL_D5_D2 69
+#define CLK_TOP_UNIVPLL_D5_D4 70
+#define CLK_TOP_UNIVPLL_D5_D8 71
+#define CLK_TOP_APLL1_CK 72
+#define CLK_TOP_APLL1_D2 73
+#define CLK_TOP_APLL1_D4 74
+#define CLK_TOP_APLL1_D8 75
+#define CLK_TOP_APLL2_CK 76
+#define CLK_TOP_APLL2_D2 77
+#define CLK_TOP_APLL2_D4 78
+#define CLK_TOP_APLL2_D8 79
+#define CLK_TOP_TVDPLL_CK 80
+#define CLK_TOP_TVDPLL_D2 81
+#define CLK_TOP_TVDPLL_D4 82
+#define CLK_TOP_TVDPLL_D8 83
+#define CLK_TOP_TVDPLL_D16 84
+#define CLK_TOP_MSDCPLL_CK 85
+#define CLK_TOP_MSDCPLL_D2 86
+#define CLK_TOP_MSDCPLL_D4 87
+#define CLK_TOP_MSDCPLL_D8 88
+#define CLK_TOP_MSDCPLL_D16 89
+#define CLK_TOP_AD_OSC_CK 90
+#define CLK_TOP_OSC_D2 91
+#define CLK_TOP_OSC_D4 92
+#define CLK_TOP_OSC_D8 93
+#define CLK_TOP_OSC_D16 94
+#define CLK_TOP_F26M_CK_D2 95
+#define CLK_TOP_MFGPLL_CK 96
+#define CLK_TOP_UNIVP_192M_CK 97
+#define CLK_TOP_UNIVP_192M_D2 98
+#define CLK_TOP_UNIVP_192M_D4 99
+#define CLK_TOP_UNIVP_192M_D8 100
+#define CLK_TOP_UNIVP_192M_D16 101
+#define CLK_TOP_UNIVP_192M_D32 102
+#define CLK_TOP_MMPLL_CK 103
+#define CLK_TOP_MMPLL_D4 104
+#define CLK_TOP_MMPLL_D4_D2 105
+#define CLK_TOP_MMPLL_D4_D4 106
+#define CLK_TOP_MMPLL_D5 107
+#define CLK_TOP_MMPLL_D5_D2 108
+#define CLK_TOP_MMPLL_D5_D4 109
+#define CLK_TOP_MMPLL_D6 110
+#define CLK_TOP_MMPLL_D7 111
+#define CLK_TOP_CLK26M 112
+#define CLK_TOP_CLK13M 113
+#define CLK_TOP_ULPOSC 114
+#define CLK_TOP_UNIVP_192M 115
+#define CLK_TOP_MUX_APLL_I2S0 116
+#define CLK_TOP_MUX_APLL_I2S1 117
+#define CLK_TOP_MUX_APLL_I2S2 118
+#define CLK_TOP_MUX_APLL_I2S3 119
+#define CLK_TOP_MUX_APLL_I2S4 120
+#define CLK_TOP_MUX_APLL_I2S5 121
+#define CLK_TOP_APLL12_DIV0 122
+#define CLK_TOP_APLL12_DIV1 123
+#define CLK_TOP_APLL12_DIV2 124
+#define CLK_TOP_APLL12_DIV3 125
+#define CLK_TOP_APLL12_DIV4 126
+#define CLK_TOP_APLL12_DIVB 127
+#define CLK_TOP_UNIVPLL 128
+#define CLK_TOP_ARMPLL_DIV_PLL1 129
+#define CLK_TOP_ARMPLL_DIV_PLL2 130
+#define CLK_TOP_UNIVPLL_D3_D16 131
+#define CLK_TOP_NR_CLK 132
+
+/* CAMSYS */
+#define CLK_CAM_LARB6 0
+#define CLK_CAM_DFP_VAD 1
+#define CLK_CAM_CAM 2
+#define CLK_CAM_CAMTG 3
+#define CLK_CAM_SENINF 4
+#define CLK_CAM_CAMSV0 5
+#define CLK_CAM_CAMSV1 6
+#define CLK_CAM_CAMSV2 7
+#define CLK_CAM_CCU 8
+#define CLK_CAM_LARB3 9
+#define CLK_CAM_NR_CLK 10
+
+/* INFRACFG_AO */
+#define CLK_INFRA_PMIC_TMR 0
+#define CLK_INFRA_PMIC_AP 1
+#define CLK_INFRA_PMIC_MD 2
+#define CLK_INFRA_PMIC_CONN 3
+#define CLK_INFRA_SCPSYS 4
+#define CLK_INFRA_SEJ 5
+#define CLK_INFRA_APXGPT 6
+#define CLK_INFRA_ICUSB 7
+#define CLK_INFRA_GCE 8
+#define CLK_INFRA_THERM 9
+#define CLK_INFRA_I2C0 10
+#define CLK_INFRA_I2C1 11
+#define CLK_INFRA_I2C2 12
+#define CLK_INFRA_I2C3 13
+#define CLK_INFRA_PWM_HCLK 14
+#define CLK_INFRA_PWM1 15
+#define CLK_INFRA_PWM2 16
+#define CLK_INFRA_PWM3 17
+#define CLK_INFRA_PWM4 18
+#define CLK_INFRA_PWM 19
+#define CLK_INFRA_UART0 20
+#define CLK_INFRA_UART1 21
+#define CLK_INFRA_UART2 22
+#define CLK_INFRA_UART3 23
+#define CLK_INFRA_GCE_26M 24
+#define CLK_INFRA_CQ_DMA_FPC 25
+#define CLK_INFRA_BTIF 26
+#define CLK_INFRA_SPI0 27
+#define CLK_INFRA_MSDC0 28
+#define CLK_INFRA_MSDC1 29
+#define CLK_INFRA_MSDC2 30
+#define CLK_INFRA_MSDC0_SCK 31
+#define CLK_INFRA_DVFSRC 32
+#define CLK_INFRA_GCPU 33
+#define CLK_INFRA_TRNG 34
+#define CLK_INFRA_AUXADC 35
+#define CLK_INFRA_CPUM 36
+#define CLK_INFRA_CCIF1_AP 37
+#define CLK_INFRA_CCIF1_MD 38
+#define CLK_INFRA_AUXADC_MD 39
+#define CLK_INFRA_MSDC1_SCK 40
+#define CLK_INFRA_MSDC2_SCK 41
+#define CLK_INFRA_AP_DMA 42
+#define CLK_INFRA_XIU 43
+#define CLK_INFRA_DEVICE_APC 44
+#define CLK_INFRA_CCIF_AP 45
+#define CLK_INFRA_DEBUGSYS 46
+#define CLK_INFRA_AUDIO 47
+#define CLK_INFRA_CCIF_MD 48
+#define CLK_INFRA_DXCC_SEC_CORE 49
+#define CLK_INFRA_DXCC_AO 50
+#define CLK_INFRA_DRAMC_F26M 51
+#define CLK_INFRA_IRTX 52
+#define CLK_INFRA_DISP_PWM 53
+#define CLK_INFRA_CLDMA_BCLK 54
+#define CLK_INFRA_AUDIO_26M_BCLK 55
+#define CLK_INFRA_SPI1 56
+#define CLK_INFRA_I2C4 57
+#define CLK_INFRA_MODEM_TEMP_SHARE 58
+#define CLK_INFRA_SPI2 59
+#define CLK_INFRA_SPI3 60
+#define CLK_INFRA_UNIPRO_SCK 61
+#define CLK_INFRA_UNIPRO_TICK 62
+#define CLK_INFRA_UFS_MP_SAP_BCLK 63
+#define CLK_INFRA_MD32_BCLK 64
+#define CLK_INFRA_SSPM 65
+#define CLK_INFRA_UNIPRO_MBIST 66
+#define CLK_INFRA_SSPM_BUS_HCLK 67
+#define CLK_INFRA_I2C5 68
+#define CLK_INFRA_I2C5_ARBITER 69
+#define CLK_INFRA_I2C5_IMM 70
+#define CLK_INFRA_I2C1_ARBITER 71
+#define CLK_INFRA_I2C1_IMM 72
+#define CLK_INFRA_I2C2_ARBITER 73
+#define CLK_INFRA_I2C2_IMM 74
+#define CLK_INFRA_SPI4 75
+#define CLK_INFRA_SPI5 76
+#define CLK_INFRA_CQ_DMA 77
+#define CLK_INFRA_UFS 78
+#define CLK_INFRA_AES_UFSFDE 79
+#define CLK_INFRA_UFS_TICK 80
+#define CLK_INFRA_MSDC0_SELF 81
+#define CLK_INFRA_MSDC1_SELF 82
+#define CLK_INFRA_MSDC2_SELF 83
+#define CLK_INFRA_SSPM_26M_SELF 84
+#define CLK_INFRA_SSPM_32K_SELF 85
+#define CLK_INFRA_UFS_AXI 86
+#define CLK_INFRA_I2C6 87
+#define CLK_INFRA_AP_MSDC0 88
+#define CLK_INFRA_MD_MSDC0 89
+#define CLK_INFRA_USB 90
+#define CLK_INFRA_DEVMPU_BCLK 91
+#define CLK_INFRA_CCIF2_AP 92
+#define CLK_INFRA_CCIF2_MD 93
+#define CLK_INFRA_CCIF3_AP 94
+#define CLK_INFRA_CCIF3_MD 95
+#define CLK_INFRA_SEJ_F13M 96
+#define CLK_INFRA_AES_BCLK 97
+#define CLK_INFRA_I2C7 98
+#define CLK_INFRA_I2C8 99
+#define CLK_INFRA_FBIST2FPC 100
+#define CLK_INFRA_NR_CLK 101
+
+/* MFGCFG */
+#define CLK_MFG_BG3D 0
+#define CLK_MFG_NR_CLK 1
+
+/* IMG */
+#define CLK_IMG_OWE 0
+#define CLK_IMG_WPE_B 1
+#define CLK_IMG_WPE_A 2
+#define CLK_IMG_MFB 3
+#define CLK_IMG_RSC 4
+#define CLK_IMG_DPE 5
+#define CLK_IMG_FDVT 6
+#define CLK_IMG_DIP 7
+#define CLK_IMG_LARB2 8
+#define CLK_IMG_LARB5 9
+#define CLK_IMG_NR_CLK 10
+
+/* MMSYS_CONFIG */
+#define CLK_MM_SMI_COMMON 0
+#define CLK_MM_SMI_LARB0 1
+#define CLK_MM_SMI_LARB1 2
+#define CLK_MM_GALS_COMM0 3
+#define CLK_MM_GALS_COMM1 4
+#define CLK_MM_GALS_CCU2MM 5
+#define CLK_MM_GALS_IPU12MM 6
+#define CLK_MM_GALS_IMG2MM 7
+#define CLK_MM_GALS_CAM2MM 8
+#define CLK_MM_GALS_IPU2MM 9
+#define CLK_MM_MDP_DL_TXCK 10
+#define CLK_MM_IPU_DL_TXCK 11
+#define CLK_MM_MDP_RDMA0 12
+#define CLK_MM_MDP_RDMA1 13
+#define CLK_MM_MDP_RSZ0 14
+#define CLK_MM_MDP_RSZ1 15
+#define CLK_MM_MDP_TDSHP 16
+#define CLK_MM_MDP_WROT0 17
+#define CLK_MM_FAKE_ENG 18
+#define CLK_MM_DISP_OVL0 19
+#define CLK_MM_DISP_OVL0_2L 20
+#define CLK_MM_DISP_OVL1_2L 21
+#define CLK_MM_DISP_RDMA0 22
+#define CLK_MM_DISP_RDMA1 23
+#define CLK_MM_DISP_WDMA0 24
+#define CLK_MM_DISP_COLOR0 25
+#define CLK_MM_DISP_CCORR0 26
+#define CLK_MM_DISP_AAL0 27
+#define CLK_MM_DISP_GAMMA0 28
+#define CLK_MM_DISP_DITHER0 29
+#define CLK_MM_DISP_SPLIT 30
+#define CLK_MM_DSI0_MM 31
+#define CLK_MM_DSI0_IF 32
+#define CLK_MM_DPI_MM 33
+#define CLK_MM_DPI_IF 34
+#define CLK_MM_FAKE_ENG2 35
+#define CLK_MM_MDP_DL_RX 36
+#define CLK_MM_IPU_DL_RX 37
+#define CLK_MM_26M 38
+#define CLK_MM_MMSYS_R2Y 39
+#define CLK_MM_DISP_RSZ 40
+#define CLK_MM_MDP_WDMA0 41
+#define CLK_MM_MDP_AAL 42
+#define CLK_MM_MDP_CCORR 43
+#define CLK_MM_DBI_MM 44
+#define CLK_MM_DBI_IF 45
+#define CLK_MM_NR_CLK 46
+
+/* VDEC_GCON */
+#define CLK_VDEC_VDEC 0
+#define CLK_VDEC_LARB1 1
+#define CLK_VDEC_NR_CLK 2
+
+/* VENC_GCON */
+#define CLK_VENC_LARB 0
+#define CLK_VENC_VENC 1
+#define CLK_VENC_JPGENC 2
+#define CLK_VENC_NR_CLK 3
+
+/* AUDIO */
+#define CLK_AUDIO_TML 0
+#define CLK_AUDIO_DAC_PREDIS 1
+#define CLK_AUDIO_DAC 2
+#define CLK_AUDIO_ADC 3
+#define CLK_AUDIO_APLL_TUNER 4
+#define CLK_AUDIO_APLL2_TUNER 5
+#define CLK_AUDIO_24M 6
+#define CLK_AUDIO_22M 7
+#define CLK_AUDIO_AFE 8
+#define CLK_AUDIO_I2S4 9
+#define CLK_AUDIO_I2S3 10
+#define CLK_AUDIO_I2S2 11
+#define CLK_AUDIO_I2S1 12
+#define CLK_AUDIO_PDN_ADDA6_ADC 13
+#define CLK_AUDIO_TDM 14
+#define CLK_AUDIO_NR_CLK 15
+
+/* IPU_CONN */
+#define CLK_IPU_CONN_IPU 0
+#define CLK_IPU_CONN_AHB 1
+#define CLK_IPU_CONN_AXI 2
+#define CLK_IPU_CONN_ISP 3
+#define CLK_IPU_CONN_CAM_ADL 4
+#define CLK_IPU_CONN_IMG_ADL 5
+#define CLK_IPU_CONN_DAP_RX 6
+#define CLK_IPU_CONN_APB2AXI 7
+#define CLK_IPU_CONN_APB2AHB 8
+#define CLK_IPU_CONN_IPU_CAB1TO2 9
+#define CLK_IPU_CONN_IPU1_CAB1TO2 10
+#define CLK_IPU_CONN_IPU2_CAB1TO2 11
+#define CLK_IPU_CONN_CAB3TO3 12
+#define CLK_IPU_CONN_CAB2TO1 13
+#define CLK_IPU_CONN_CAB3TO1_SLICE 14
+#define CLK_IPU_CONN_NR_CLK 15
+
+/* IPU_ADL */
+#define CLK_IPU_ADL_CABGEN 0
+#define CLK_IPU_ADL_NR_CLK 1
+
+/* IPU_CORE0 */
+#define CLK_IPU_CORE0_JTAG 0
+#define CLK_IPU_CORE0_AXI 1
+#define CLK_IPU_CORE0_IPU 2
+#define CLK_IPU_CORE0_NR_CLK 3
+
+/* IPU_CORE1 */
+#define CLK_IPU_CORE1_JTAG 0
+#define CLK_IPU_CORE1_AXI 1
+#define CLK_IPU_CORE1_IPU 2
+#define CLK_IPU_CORE1_NR_CLK 3
+
+/* MCUCFG */
+#define CLK_MCU_MP0_SEL 0
+#define CLK_MCU_MP2_SEL 1
+#define CLK_MCU_BUS_SEL 2
+#define CLK_MCU_NR_CLK 3
+
+#endif /* _DT_BINDINGS_CLK_MT8183_H */
diff --git a/include/dt-bindings/clock/mt8516-clk.h b/include/dt-bindings/clock/mt8516-clk.h
new file mode 100644
index 000000000000..9cfca53cd78d
--- /dev/null
+++ b/include/dt-bindings/clock/mt8516-clk.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Copyright (c) 2019 BayLibre, SAS.
+ * Author: James Liao <jamesjj.liao@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT8516_H
+#define _DT_BINDINGS_CLK_MT8516_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_MAINPLL 1
+#define CLK_APMIXED_UNIVPLL 2
+#define CLK_APMIXED_MMPLL 3
+#define CLK_APMIXED_APLL1 4
+#define CLK_APMIXED_APLL2 5
+#define CLK_APMIXED_NR_CLK 6
+
+/* INFRACFG */
+
+#define CLK_IFR_MUX1_SEL 0
+#define CLK_IFR_ETH_25M_SEL 1
+#define CLK_IFR_I2C0_SEL 2
+#define CLK_IFR_I2C1_SEL 3
+#define CLK_IFR_I2C2_SEL 4
+#define CLK_IFR_NR_CLK 5
+
+/* TOPCKGEN */
+
+#define CLK_TOP_CLK_NULL 0
+#define CLK_TOP_I2S_INFRA_BCK 1
+#define CLK_TOP_MEMPLL 2
+#define CLK_TOP_DMPLL 3
+#define CLK_TOP_MAINPLL_D2 4
+#define CLK_TOP_MAINPLL_D4 5
+#define CLK_TOP_MAINPLL_D8 6
+#define CLK_TOP_MAINPLL_D16 7
+#define CLK_TOP_MAINPLL_D11 8
+#define CLK_TOP_MAINPLL_D22 9
+#define CLK_TOP_MAINPLL_D3 10
+#define CLK_TOP_MAINPLL_D6 11
+#define CLK_TOP_MAINPLL_D12 12
+#define CLK_TOP_MAINPLL_D5 13
+#define CLK_TOP_MAINPLL_D10 14
+#define CLK_TOP_MAINPLL_D20 15
+#define CLK_TOP_MAINPLL_D40 16
+#define CLK_TOP_MAINPLL_D7 17
+#define CLK_TOP_MAINPLL_D14 18
+#define CLK_TOP_UNIVPLL_D2 19
+#define CLK_TOP_UNIVPLL_D4 20
+#define CLK_TOP_UNIVPLL_D8 21
+#define CLK_TOP_UNIVPLL_D16 22
+#define CLK_TOP_UNIVPLL_D3 23
+#define CLK_TOP_UNIVPLL_D6 24
+#define CLK_TOP_UNIVPLL_D12 25
+#define CLK_TOP_UNIVPLL_D24 26
+#define CLK_TOP_UNIVPLL_D5 27
+#define CLK_TOP_UNIVPLL_D20 28
+#define CLK_TOP_MMPLL380M 29
+#define CLK_TOP_MMPLL_D2 30
+#define CLK_TOP_MMPLL_200M 31
+#define CLK_TOP_USB_PHY48M 32
+#define CLK_TOP_APLL1 33
+#define CLK_TOP_APLL1_D2 34
+#define CLK_TOP_APLL1_D4 35
+#define CLK_TOP_APLL1_D8 36
+#define CLK_TOP_APLL2 37
+#define CLK_TOP_APLL2_D2 38
+#define CLK_TOP_APLL2_D4 39
+#define CLK_TOP_APLL2_D8 40
+#define CLK_TOP_CLK26M 41
+#define CLK_TOP_CLK26M_D2 42
+#define CLK_TOP_AHB_INFRA_D2 43
+#define CLK_TOP_NFI1X 44
+#define CLK_TOP_ETH_D2 45
+#define CLK_TOP_THEM 46
+#define CLK_TOP_APDMA 47
+#define CLK_TOP_I2C0 48
+#define CLK_TOP_I2C1 49
+#define CLK_TOP_AUXADC1 50
+#define CLK_TOP_NFI 51
+#define CLK_TOP_NFIECC 52
+#define CLK_TOP_DEBUGSYS 53
+#define CLK_TOP_PWM 54
+#define CLK_TOP_UART0 55
+#define CLK_TOP_UART1 56
+#define CLK_TOP_BTIF 57
+#define CLK_TOP_USB 58
+#define CLK_TOP_FLASHIF_26M 59
+#define CLK_TOP_AUXADC2 60
+#define CLK_TOP_I2C2 61
+#define CLK_TOP_MSDC0 62
+#define CLK_TOP_MSDC1 63
+#define CLK_TOP_NFI2X 64
+#define CLK_TOP_PMICWRAP_AP 65
+#define CLK_TOP_SEJ 66
+#define CLK_TOP_MEMSLP_DLYER 67
+#define CLK_TOP_SPI 68
+#define CLK_TOP_APXGPT 69
+#define CLK_TOP_AUDIO 70
+#define CLK_TOP_PMICWRAP_MD 71
+#define CLK_TOP_PMICWRAP_CONN 72
+#define CLK_TOP_PMICWRAP_26M 73
+#define CLK_TOP_AUX_ADC 74
+#define CLK_TOP_AUX_TP 75
+#define CLK_TOP_MSDC2 76
+#define CLK_TOP_RBIST 77
+#define CLK_TOP_NFI_BUS 78
+#define CLK_TOP_GCE 79
+#define CLK_TOP_TRNG 80
+#define CLK_TOP_SEJ_13M 81
+#define CLK_TOP_AES 82
+#define CLK_TOP_PWM_B 83
+#define CLK_TOP_PWM1_FB 84
+#define CLK_TOP_PWM2_FB 85
+#define CLK_TOP_PWM3_FB 86
+#define CLK_TOP_PWM4_FB 87
+#define CLK_TOP_PWM5_FB 88
+#define CLK_TOP_USB_1P 89
+#define CLK_TOP_FLASHIF_FREERUN 90
+#define CLK_TOP_66M_ETH 91
+#define CLK_TOP_133M_ETH 92
+#define CLK_TOP_FETH_25M 93
+#define CLK_TOP_FETH_50M 94
+#define CLK_TOP_FLASHIF_AXI 95
+#define CLK_TOP_USBIF 96
+#define CLK_TOP_UART2 97
+#define CLK_TOP_BSI 98
+#define CLK_TOP_RG_SPINOR 99
+#define CLK_TOP_RG_MSDC2 100
+#define CLK_TOP_RG_ETH 101
+#define CLK_TOP_RG_AUD1 102
+#define CLK_TOP_RG_AUD2 103
+#define CLK_TOP_RG_AUD_ENGEN1 104
+#define CLK_TOP_RG_AUD_ENGEN2 105
+#define CLK_TOP_RG_I2C 106
+#define CLK_TOP_RG_PWM_INFRA 107
+#define CLK_TOP_RG_AUD_SPDIF_IN 108
+#define CLK_TOP_RG_UART2 109
+#define CLK_TOP_RG_BSI 110
+#define CLK_TOP_RG_DBG_ATCLK 111
+#define CLK_TOP_RG_NFIECC 112
+#define CLK_TOP_RG_APLL1_D2_EN 113
+#define CLK_TOP_RG_APLL1_D4_EN 114
+#define CLK_TOP_RG_APLL1_D8_EN 115
+#define CLK_TOP_RG_APLL2_D2_EN 116
+#define CLK_TOP_RG_APLL2_D4_EN 117
+#define CLK_TOP_RG_APLL2_D8_EN 118
+#define CLK_TOP_APLL12_DIV0 119
+#define CLK_TOP_APLL12_DIV1 120
+#define CLK_TOP_APLL12_DIV2 121
+#define CLK_TOP_APLL12_DIV3 122
+#define CLK_TOP_APLL12_DIV4 123
+#define CLK_TOP_APLL12_DIV4B 124
+#define CLK_TOP_APLL12_DIV5 125
+#define CLK_TOP_APLL12_DIV5B 126
+#define CLK_TOP_APLL12_DIV6 127
+#define CLK_TOP_UART0_SEL 128
+#define CLK_TOP_EMI_DDRPHY_SEL 129
+#define CLK_TOP_AHB_INFRA_SEL 130
+#define CLK_TOP_MSDC0_SEL 131
+#define CLK_TOP_UART1_SEL 132
+#define CLK_TOP_MSDC1_SEL 133
+#define CLK_TOP_PMICSPI_SEL 134
+#define CLK_TOP_QAXI_AUD26M_SEL 135
+#define CLK_TOP_AUD_INTBUS_SEL 136
+#define CLK_TOP_NFI2X_PAD_SEL 137
+#define CLK_TOP_NFI1X_PAD_SEL 138
+#define CLK_TOP_DDRPHYCFG_SEL 139
+#define CLK_TOP_USB_78M_SEL 140
+#define CLK_TOP_SPINOR_SEL 141
+#define CLK_TOP_MSDC2_SEL 142
+#define CLK_TOP_ETH_SEL 143
+#define CLK_TOP_AUD1_SEL 144
+#define CLK_TOP_AUD2_SEL 145
+#define CLK_TOP_AUD_ENGEN1_SEL 146
+#define CLK_TOP_AUD_ENGEN2_SEL 147
+#define CLK_TOP_I2C_SEL 148
+#define CLK_TOP_AUD_I2S0_M_SEL 149
+#define CLK_TOP_AUD_I2S1_M_SEL 150
+#define CLK_TOP_AUD_I2S2_M_SEL 151
+#define CLK_TOP_AUD_I2S3_M_SEL 152
+#define CLK_TOP_AUD_I2S4_M_SEL 153
+#define CLK_TOP_AUD_I2S5_M_SEL 154
+#define CLK_TOP_AUD_SPDIF_B_SEL 155
+#define CLK_TOP_PWM_SEL 156
+#define CLK_TOP_SPI_SEL 157
+#define CLK_TOP_AUD_SPDIFIN_SEL 158
+#define CLK_TOP_UART2_SEL 159
+#define CLK_TOP_BSI_SEL 160
+#define CLK_TOP_DBG_ATCLK_SEL 161
+#define CLK_TOP_CSW_NFIECC_SEL 162
+#define CLK_TOP_NFIECC_SEL 163
+#define CLK_TOP_APLL12_CK_DIV0 164
+#define CLK_TOP_APLL12_CK_DIV1 165
+#define CLK_TOP_APLL12_CK_DIV2 166
+#define CLK_TOP_APLL12_CK_DIV3 167
+#define CLK_TOP_APLL12_CK_DIV4 168
+#define CLK_TOP_APLL12_CK_DIV4B 169
+#define CLK_TOP_APLL12_CK_DIV5 170
+#define CLK_TOP_APLL12_CK_DIV5B 171
+#define CLK_TOP_APLL12_CK_DIV6 172
+#define CLK_TOP_USB_78M 173
+#define CLK_TOP_MSDC0_INFRA 174
+#define CLK_TOP_MSDC1_INFRA 175
+#define CLK_TOP_MSDC2_INFRA 176
+#define CLK_TOP_NR_CLK 177
+
+#endif /* _DT_BINDINGS_CLK_MT8516_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-qcs404.h b/include/dt-bindings/clock/qcom,gcc-qcs404.h
index 6ceb55ed72c6..454b3f43f538 100644
--- a/include/dt-bindings/clock/qcom,gcc-qcs404.h
+++ b/include/dt-bindings/clock/qcom,gcc-qcs404.h
@@ -146,6 +146,10 @@
#define GCC_MDP_TBU_CLK 138
#define GCC_QDSS_DAP_CLK 139
#define GCC_DCC_XO_CLK 140
+#define GCC_CDSP_CFG_AHB_CLK 143
+#define GCC_BIMC_CDSP_CLK 144
+#define GCC_CDSP_TBU_CLK 145
+#define GCC_CDSP_BIMC_CLK_SRC 146
#define GCC_GENI_IR_BCR 0
#define GCC_USB_HS_BCR 1
@@ -161,5 +165,6 @@
#define GCC_PCIE_0_LINK_DOWN_BCR 11
#define GCC_PCIEPHY_0_PHY_BCR 12
#define GCC_EMAC_BCR 13
+#define GCC_CDSP_RESTART 14
#endif
diff --git a/include/dt-bindings/clock/qcom,turingcc-qcs404.h b/include/dt-bindings/clock/qcom,turingcc-qcs404.h
new file mode 100644
index 000000000000..838faef57c67
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,turingcc-qcs404.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, Linaro Ltd
+ */
+
+#ifndef _DT_BINDINGS_CLK_TURING_QCS404_H
+#define _DT_BINDINGS_CLK_TURING_QCS404_H
+
+#define TURING_Q6SS_Q6_AXIM_CLK 0
+#define TURING_Q6SS_AHBM_AON_CLK 1
+#define TURING_WRAPPER_AON_CLK 2
+#define TURING_Q6SS_AHBS_AON_CLK 3
+#define TURING_WRAPPER_QOS_AHBS_AON_CLK 4
+
+#endif
diff --git a/include/dt-bindings/clock/sifive-fu540-prci.h b/include/dt-bindings/clock/sifive-fu540-prci.h
new file mode 100644
index 000000000000..6a0b70a37d78
--- /dev/null
+++ b/include/dt-bindings/clock/sifive-fu540-prci.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+#define __DT_BINDINGS_CLOCK_SIFIVE_FU540_PRCI_H
+
+/* Clock indexes for use by Device Tree data and the PRCI driver */
+
+#define PRCI_CLK_COREPLL 0
+#define PRCI_CLK_DDRPLL 1
+#define PRCI_CLK_GEMGXLPLL 2
+#define PRCI_CLK_TLCLK 3
+
+#endif
diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h
index 58d8b515be55..7d34e297049c 100644
--- a/include/dt-bindings/clock/stm32fx-clock.h
+++ b/include/dt-bindings/clock/stm32fx-clock.h
@@ -54,7 +54,10 @@
#define CLK_I2C3 28
#define CLK_I2C4 29
#define CLK_LPTIMER 30
-
-#define END_PRIMARY_CLK_F7 31
+#define CLK_PLL_SRC 31
+#define CLK_DFSDM1 32
+#define CLK_ADFSDM1 33
+#define CLK_F769_DSI 34
+#define END_PRIMARY_CLK_F7 35
#endif
diff --git a/include/dt-bindings/clock/sun5i-ccu.h b/include/dt-bindings/clock/sun5i-ccu.h
index 81f34d477aeb..2e6b9ddcc24e 100644
--- a/include/dt-bindings/clock/sun5i-ccu.h
+++ b/include/dt-bindings/clock/sun5i-ccu.h
@@ -100,7 +100,7 @@
#define CLK_AVS 96
#define CLK_HDMI 97
#define CLK_GPU 98
-
+#define CLK_MBUS 99
#define CLK_IEP 100
#endif /* _DT_BINDINGS_CLK_SUN5I_H_ */
diff --git a/include/dt-bindings/iio/temperature/thermocouple.h b/include/dt-bindings/iio/temperature/thermocouple.h
new file mode 100644
index 000000000000..ce037f5238ac
--- /dev/null
+++ b/include/dt-bindings/iio/temperature/thermocouple.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_TEMPERATURE_THERMOCOUPLE_H
+#define _DT_BINDINGS_TEMPERATURE_THERMOCOUPLE_H
+
+
+#define THERMOCOUPLE_TYPE_B 0x00
+#define THERMOCOUPLE_TYPE_E 0x01
+#define THERMOCOUPLE_TYPE_J 0x02
+#define THERMOCOUPLE_TYPE_K 0x03
+#define THERMOCOUPLE_TYPE_N 0x04
+#define THERMOCOUPLE_TYPE_R 0x05
+#define THERMOCOUPLE_TYPE_S 0x06
+#define THERMOCOUPLE_TYPE_T 0x07
+
+#endif /* _DT_BINDINGS_TEMPERATURE_THERMOCOUPLE_H */
diff --git a/include/dt-bindings/phy/phy-am654-serdes.h b/include/dt-bindings/phy/phy-am654-serdes.h
new file mode 100644
index 000000000000..e8d901729ed9
--- /dev/null
+++ b/include/dt-bindings/phy/phy-am654-serdes.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for AM654 SERDES.
+ */
+
+#ifndef _DT_BINDINGS_AM654_SERDES
+#define _DT_BINDINGS_AM654_SERDES
+
+#define AM654_SERDES_CMU_REFCLK 0
+#define AM654_SERDES_LO_REFCLK 1
+#define AM654_SERDES_RO_REFCLK 2
+
+#endif /* _DT_BINDINGS_AM654_SERDES */
diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h
index b5a2174a6386..e6fb8ada3f4d 100644
--- a/include/dt-bindings/pinctrl/stm32-pinfunc.h
+++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h
@@ -32,5 +32,11 @@
#define STM32_PINMUX(port, line, mode) (((PIN_NO(port, line)) << 8) | (mode))
+/* package information */
+#define STM32MP_PKG_AA 0x1
+#define STM32MP_PKG_AB 0x2
+#define STM32MP_PKG_AC 0x4
+#define STM32MP_PKG_AD 0x8
+
#endif /* _DT_BINDINGS_STM32_PINFUNC_H */
diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
index 8063e8314eef..6d487c5eba2c 100644
--- a/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-g12a-reset.h
@@ -51,7 +51,10 @@
#define RESET_SD_EMMC_A 44
#define RESET_SD_EMMC_B 45
#define RESET_SD_EMMC_C 46
-/* 47-60 */
+/* 47 */
+#define RESET_USB_PHY20 48
+#define RESET_USB_PHY21 49
+/* 50-60 */
#define RESET_AUDIO_CODEC 61
/* 62-63 */
/* RESET2 */
diff --git a/include/keys/trusted.h b/include/keys/trusted.h
index adbcb6817826..0071298b9b28 100644
--- a/include/keys/trusted.h
+++ b/include/keys/trusted.h
@@ -38,7 +38,7 @@ enum {
int TSS_authhmac(unsigned char *digest, const unsigned char *key,
unsigned int keylen, unsigned char *h1,
- unsigned char *h2, unsigned char h3, ...);
+ unsigned char *h2, unsigned int h3, ...);
int TSS_checkhmac1(unsigned char *buffer,
const uint32_t command,
const unsigned char *ononce,
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d5dcebd7aad3..98440df7fe42 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -141,10 +141,14 @@ enum acpi_address_range_id {
/* Table Handlers */
+union acpi_subtable_headers {
+ struct acpi_subtable_header common;
+ struct acpi_hmat_structure hmat;
+};
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
-typedef int (*acpi_tbl_entry_handler)(struct acpi_subtable_header *header,
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
const unsigned long end);
/* Debugger support */
@@ -513,7 +517,8 @@ extern bool osc_pc_lpi_support_confirmed;
#define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004
#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008
#define OSC_PCI_MSI_SUPPORT 0x00000010
-#define OSC_PCI_SUPPORT_MASKS 0x0000001f
+#define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100
+#define OSC_PCI_SUPPORT_MASKS 0x0000011f
/* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */
#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001
@@ -669,12 +674,14 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
-static inline const char *
-acpi_dev_get_first_match_name(const char *hid, const char *uid, s64 hrv)
+static inline struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
return NULL;
}
+static inline void acpi_dev_put(struct acpi_device *adev) {}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 38cd77b39a64..723e4dfa1c14 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -26,6 +26,14 @@
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
+/*
+ * PMCG model identifiers for use in smmu pmu driver. Please note
+ * that this is purely for the use of software and has nothing to
+ * do with hardware or with IORT specification.
+ */
+#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
+#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
+
int iort_register_domain_token(int trans_id, phys_addr_t base,
struct fwnode_handle *fw_node);
void iort_deregister_domain_token(int trans_id);
diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h
index da973e8a2da8..4416df597526 100644
--- a/include/linux/alcor_pci.h
+++ b/include/linux/alcor_pci.h
@@ -23,7 +23,7 @@
#define AU6601_BASE_CLOCK 31000000
#define AU6601_MIN_CLOCK 150000
#define AU6601_MAX_CLOCK 208000000
-#define AU6601_MAX_DMA_SEGMENTS 1
+#define AU6601_MAX_DMA_SEGMENTS 64
#define AU6601_MAX_PIO_SEGMENTS 1
#define AU6601_MAX_DMA_BLOCK_SIZE 0x1000
#define AU6601_MAX_PIO_BLOCK_SIZE 0x200
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h
index d0c3be77c18e..b6e0cbeaf533 100644
--- a/include/linux/amba/clcd.h
+++ b/include/linux/amba/clcd.h
@@ -124,38 +124,11 @@ struct clcd_board {
struct amba_device;
struct clk;
-/**
- * struct clcd_vendor_data - holds hardware (IP-block) vendor-specific
- * variant information
- *
- * @clock_timregs: the CLCD needs to be clocked when accessing the
- * timer registers, or the hardware will hang.
- * @packed_24_bit_pixels: this variant supports 24bit packed pixel data,
- * so that RGB accesses 3 bytes at a time, not just on even 32bit
- * boundaries, packing the pixel data in memory. ST Microelectronics
- * have this.
- * @st_bitmux_control: ST Microelectronics have implemented output
- * bit line multiplexing into the CLCD control register. This indicates
- * that we need to use this.
- * @init_board: custom board init function for this variant
- * @init_panel: custom panel init function for this variant
- */
-struct clcd_vendor_data {
- bool clock_timregs;
- bool packed_24_bit_pixels;
- bool st_bitmux_control;
- int (*init_board)(struct amba_device *adev,
- struct clcd_board *board);
- int (*init_panel)(struct clcd_fb *fb,
- struct device_node *panel);
-};
-
/* this data structure describes each frame buffer device we find */
struct clcd_fb {
struct fb_info fb;
struct amba_device *dev;
struct clk *clk;
- struct clcd_vendor_data *vendor;
struct clcd_panel *panel;
struct clcd_board *board;
void *board_data;
@@ -257,10 +230,6 @@ static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs)
else
val |= CNTL_LCDBPP16_444;
break;
- case 24:
- /* Modified variant supporting 24 bit packed pixels */
- val |= CNTL_ST_LCDBPP24_PACKED;
- break;
case 32:
val |= CNTL_LCDBPP24;
break;
diff --git a/include/linux/armada-37xx-rwtm-mailbox.h b/include/linux/armada-37xx-rwtm-mailbox.h
new file mode 100644
index 000000000000..57bb54f6767a
--- /dev/null
+++ b/include/linux/armada-37xx-rwtm-mailbox.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * rWTM BIU Mailbox driver for Armada 37xx
+ *
+ * Author: Marek Behun <marek.behun@nic.cz>
+ */
+
+#ifndef _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
+#define _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_
+
+#include <linux/types.h>
+
+struct armada_37xx_rwtm_tx_msg {
+ u16 command;
+ u32 args[16];
+};
+
+struct armada_37xx_rwtm_rx_msg {
+ u32 retval;
+ u32 status[16];
+};
+
+#endif /* _LINUX_ARMADA_37XX_RWTM_MAILBOX_H_ */
diff --git a/include/linux/atalk.h b/include/linux/atalk.h
index d5cfc0b15b76..f6034ba774be 100644
--- a/include/linux/atalk.h
+++ b/include/linux/atalk.h
@@ -108,7 +108,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
#define AARP_RESOLVE_TIME (10 * HZ)
extern struct datalink_proto *ddp_dl, *aarp_dl;
-extern void aarp_proto_init(void);
+extern int aarp_proto_init(void);
/* Inter module exports */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 1e69d9fe16da..43a23e28ba23 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -86,6 +86,29 @@ struct audit_field {
u32 op;
};
+enum audit_ntp_type {
+ AUDIT_NTP_OFFSET,
+ AUDIT_NTP_FREQ,
+ AUDIT_NTP_STATUS,
+ AUDIT_NTP_TAI,
+ AUDIT_NTP_TICK,
+ AUDIT_NTP_ADJUST,
+
+ AUDIT_NTP_NVALS /* count */
+};
+
+#ifdef CONFIG_AUDITSYSCALL
+struct audit_ntp_val {
+ long long oldval, newval;
+};
+
+struct audit_ntp_data {
+ struct audit_ntp_val vals[AUDIT_NTP_NVALS];
+};
+#else
+struct audit_ntp_data {};
+#endif
+
extern int is_audit_feature_set(int which);
extern int __init audit_register_class(int class, unsigned *list);
@@ -365,6 +388,8 @@ extern void __audit_log_capset(const struct cred *new, const struct cred *old);
extern void __audit_mmap_fd(int fd, int flags);
extern void __audit_log_kern_module(char *name);
extern void __audit_fanotify(unsigned int response);
+extern void __audit_tk_injoffset(struct timespec64 offset);
+extern void __audit_ntp_log(const struct audit_ntp_data *ad);
static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp)
{
@@ -467,6 +492,39 @@ static inline void audit_fanotify(unsigned int response)
__audit_fanotify(response);
}
+static inline void audit_tk_injoffset(struct timespec64 offset)
+{
+ /* ignore no-op events */
+ if (offset.tv_sec == 0 && offset.tv_nsec == 0)
+ return;
+
+ if (!audit_dummy_context())
+ __audit_tk_injoffset(offset);
+}
+
+static inline void audit_ntp_init(struct audit_ntp_data *ad)
+{
+ memset(ad, 0, sizeof(*ad));
+}
+
+static inline void audit_ntp_set_old(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{
+ ad->vals[type].oldval = val;
+}
+
+static inline void audit_ntp_set_new(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{
+ ad->vals[type].newval = val;
+}
+
+static inline void audit_ntp_log(const struct audit_ntp_data *ad)
+{
+ if (!audit_dummy_context())
+ __audit_ntp_log(ad);
+}
+
extern int audit_n_rules;
extern int audit_signals;
#else /* CONFIG_AUDITSYSCALL */
@@ -580,6 +638,23 @@ static inline void audit_log_kern_module(char *name)
static inline void audit_fanotify(unsigned int response)
{ }
+static inline void audit_tk_injoffset(struct timespec64 offset)
+{ }
+
+static inline void audit_ntp_init(struct audit_ntp_data *ad)
+{ }
+
+static inline void audit_ntp_set_old(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{ }
+
+static inline void audit_ntp_set_new(struct audit_ntp_data *ad,
+ enum audit_ntp_type type, long long val)
+{ }
+
+static inline void audit_ntp_log(const struct audit_ntp_data *ad)
+{ }
+
static inline void audit_ptrace(struct task_struct *t)
{ }
#define audit_n_rules 0
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index f111c780ef1d..f31521dcb09a 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -151,21 +151,6 @@ static inline void balloon_page_delete(struct page *page)
list_del(&page->lru);
}
-static inline bool __is_movable_balloon_page(struct page *page)
-{
- return false;
-}
-
-static inline bool balloon_page_movable(struct page *page)
-{
- return false;
-}
-
-static inline bool isolated_balloon_page(struct page *page)
-{
- return false;
-}
-
static inline bool balloon_page_isolate(struct page *page)
{
return false;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index bb6090aa165d..ea73df36529a 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -1,19 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2001 Jens Axboe <axboe@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*/
#ifndef __LINUX_BIO_H
#define __LINUX_BIO_H
@@ -120,19 +107,22 @@ static inline bool bio_full(struct bio *bio)
return bio->bi_vcnt >= bio->bi_max_vecs;
}
-#define mp_bvec_for_each_segment(bv, bvl, i, iter_all) \
- for (bv = bvec_init_iter_all(&iter_all); \
- (iter_all.done < (bvl)->bv_len) && \
- (mp_bvec_next_segment((bvl), &iter_all), 1); \
- iter_all.done += bv->bv_len, i += 1)
+static inline bool bio_next_segment(const struct bio *bio,
+ struct bvec_iter_all *iter)
+{
+ if (iter->idx >= bio->bi_vcnt)
+ return false;
+
+ bvec_advance(&bio->bi_io_vec[iter->idx], iter);
+ return true;
+}
/*
* drivers should _never_ use the all version - the bio may have been split
* before it got to the driver and the driver won't own all of it
*/
-#define bio_for_each_segment_all(bvl, bio, i, iter_all) \
- for (i = 0, iter_all.idx = 0; iter_all.idx < (bio)->bi_vcnt; iter_all.idx++) \
- mp_bvec_for_each_segment(bvl, &((bio)->bi_io_vec[iter_all.idx]), i, iter_all)
+#define bio_for_each_segment_all(bvl, bio, iter) \
+ for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
unsigned bytes)
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h
index 50fb0dee23e8..d35b8ec1c485 100644
--- a/include/linux/bitrev.h
+++ b/include/linux/bitrev.h
@@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x)
#define __constant_bitrev32(x) \
({ \
- u32 __x = x; \
- __x = (__x >> 16) | (__x << 16); \
- __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
- __x; \
+ u32 ___x = x; \
+ ___x = (___x >> 16) | (___x << 16); \
+ ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \
+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
+ ___x; \
})
#define __constant_bitrev16(x) \
({ \
- u16 __x = x; \
- __x = (__x >> 8) | (__x << 8); \
- __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
- __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
- __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
- __x; \
+ u16 ___x = x; \
+ ___x = (___x >> 8) | (___x << 8); \
+ ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \
+ ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \
+ ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \
+ ___x; \
})
#define __constant_bitrev8x4(x) \
({ \
- u32 __x = x; \
- __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
- __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
- __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
- __x; \
+ u32 ___x = x; \
+ ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \
+ ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \
+ ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \
+ ___x; \
})
#define __constant_bitrev8(x) \
({ \
- u8 __x = x; \
- __x = (__x >> 4) | (__x << 4); \
- __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
- __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
- __x; \
+ u8 ___x = x; \
+ ___x = (___x >> 4) | (___x << 4); \
+ ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \
+ ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \
+ ___x; \
})
#define bitrev32(x) \
diff --git a/include/linux/blk-mq-rdma.h b/include/linux/blk-mq-rdma.h
index 7b6ecf9ac4c3..5cc5f0f36218 100644
--- a/include/linux/blk-mq-rdma.h
+++ b/include/linux/blk-mq-rdma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_BLK_MQ_RDMA_H
#define _LINUX_BLK_MQ_RDMA_H
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b0c814bcc7e3..15d1aa53d96c 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -57,7 +57,6 @@ struct blk_mq_hw_ctx {
unsigned int queue_num;
atomic_t nr_active;
- unsigned int nr_expired;
struct hlist_node cpuhp_dead;
struct kobject kobj;
@@ -71,6 +70,8 @@ struct blk_mq_hw_ctx {
struct dentry *sched_debugfs_dir;
#endif
+ struct list_head hctx_list;
+
/* Must be the last member - see also blk_mq_hw_ctx_size(). */
struct srcu_struct srcu[0];
};
@@ -300,11 +301,10 @@ void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
-void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
- bool kick_requeue_list);
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
bool blk_mq_complete_request(struct request *rq);
+void blk_mq_complete_request_sync(struct request *rq);
bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio);
bool blk_mq_queue_stopped(struct request_queue *q);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d66bf5f32610..be418275763c 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -215,20 +215,24 @@ struct bio {
/*
* bio flags
*/
-#define BIO_SEG_VALID 1 /* bi_phys_segments valid */
-#define BIO_CLONED 2 /* doesn't own data */
-#define BIO_BOUNCED 3 /* bio is a bounce bio */
-#define BIO_USER_MAPPED 4 /* contains user pages */
-#define BIO_NULL_MAPPED 5 /* contains invalid user pages */
-#define BIO_QUIET 6 /* Make BIO Quiet */
-#define BIO_CHAIN 7 /* chained bio, ->bi_remaining in effect */
-#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
-#define BIO_THROTTLED 9 /* This bio has already been subjected to
+enum {
+ BIO_NO_PAGE_REF, /* don't put release vec pages */
+ BIO_SEG_VALID, /* bi_phys_segments valid */
+ BIO_CLONED, /* doesn't own data */
+ BIO_BOUNCED, /* bio is a bounce bio */
+ BIO_USER_MAPPED, /* contains user pages */
+ BIO_NULL_MAPPED, /* contains invalid user pages */
+ BIO_QUIET, /* Make BIO Quiet */
+ BIO_CHAIN, /* chained bio, ->bi_remaining in effect */
+ BIO_REFFED, /* bio has elevated ->bi_cnt */
+ BIO_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */
-#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
+ BIO_TRACE_COMPLETION, /* bio_endio() should trace the final completion
* of this bio. */
-#define BIO_QUEUE_ENTERED 11 /* can use blk_queue_enter_live() */
-#define BIO_TRACKED 12 /* set if bio goes through the rq_qos path */
+ BIO_QUEUE_ENTERED, /* can use blk_queue_enter_live() */
+ BIO_TRACKED, /* set if bio goes through the rq_qos path */
+ BIO_FLAG_LAST
+};
/* See BVEC_POOL_OFFSET below before adding new flags */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0de92b29f589..1aafeb923e7b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -50,6 +50,9 @@ struct blk_stat_callback;
/* Must be consistent with blk_mq_poll_stats_bkt() */
#define BLK_MQ_POLL_STATS_BKTS 16
+/* Doing classic polling */
+#define BLK_MQ_POLL_CLASSIC -1
+
/*
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
@@ -532,6 +535,13 @@ struct request_queue {
struct mutex sysfs_lock;
+ /*
+ * for reusing dead hctx instance in case of updating
+ * nr_hw_queues
+ */
+ struct list_head unused_hctx_list;
+ spinlock_t unused_hctx_lock;
+
atomic_t mq_freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG)
@@ -545,7 +555,6 @@ struct request_queue {
struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
- struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;
struct list_head tag_set_list;
@@ -638,6 +647,13 @@ static inline bool blk_account_rq(struct request *rq)
#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
+#define rq_dma_dir(rq) \
+ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+#define dma_map_bvec(dev, bv, dir, attrs) \
+ dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
+ (dir), (attrs))
+
static inline bool queue_is_mq(struct request_queue *q)
{
return q->mq_ops;
@@ -929,6 +945,17 @@ static inline unsigned int blk_rq_payload_bytes(struct request *rq)
return blk_rq_bytes(rq);
}
+/*
+ * Return the first full biovec in the request. The caller needs to check that
+ * there are any bvecs before calling this helper.
+ */
+static inline struct bio_vec req_bvec(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec;
+ return mp_bvec_iter_bvec(rq->bio->bi_io_vec, rq->bio->bi_iter);
+}
+
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
@@ -1049,7 +1076,6 @@ extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
-extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
@@ -1545,6 +1571,17 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
}
+/*
+ * Return the first bvec that contains integrity data. Only drivers that are
+ * limited to a single integrity segment should use this helper.
+ */
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1))
+ return NULL;
+ return rq->bio->bi_integrity->bip_vec;
+}
+
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
@@ -1619,6 +1656,11 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
return 0;
}
+static inline struct bio_vec *rq_integrity_vec(struct request *rq)
+{
+ return NULL;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
struct block_device_operations {
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index a4c644c1c091..cb3c6b3b89c8 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -17,6 +17,8 @@ struct bpf_map;
struct bpf_prog;
struct bpf_sock_ops_kern;
struct bpf_cgroup_storage;
+struct ctl_table;
+struct ctl_table_header;
#ifdef CONFIG_CGROUP_BPF
@@ -109,6 +111,12 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
short access, enum bpf_attach_type type);
+int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
+ struct ctl_table *table, int write,
+ void __user *buf, size_t *pcount,
+ loff_t *ppos, void **new_buf,
+ enum bpf_attach_type type);
+
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
struct bpf_map *map)
{
@@ -253,6 +261,18 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
\
__ret; \
})
+
+
+#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled) \
+ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
+ buf, count, pos, nbuf, \
+ BPF_CGROUP_SYSCTL); \
+ __ret; \
+})
+
int cgroup_bpf_prog_attach(const union bpf_attr *attr,
enum bpf_prog_type ptype, struct bpf_prog *prog);
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
@@ -321,6 +341,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
#define for_each_cgroup_storage_type(stype) for (; false; )
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index a2132e09dc1c..59631dd0777c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -57,6 +57,12 @@ struct bpf_map_ops {
const struct btf *btf,
const struct btf_type *key_type,
const struct btf_type *value_type);
+
+ /* Direct value access helpers. */
+ int (*map_direct_value_addr)(const struct bpf_map *map,
+ u64 *imm, u32 off);
+ int (*map_direct_value_meta)(const struct bpf_map *map,
+ u64 imm, u32 *off);
};
struct bpf_map {
@@ -81,7 +87,8 @@ struct bpf_map {
struct btf *btf;
u32 pages;
bool unpriv_array;
- /* 51 bytes hole */
+ bool frozen; /* write-once */
+ /* 48 bytes hole */
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
@@ -177,6 +184,7 @@ enum bpf_arg_type {
ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */
+ ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */
/* the following constraints used to prototype bpf_memcmp() and other
* functions that access data on eBPF program stack
@@ -193,9 +201,11 @@ enum bpf_arg_type {
ARG_PTR_TO_CTX, /* pointer to context */
ARG_ANYTHING, /* any (initialized) argument is ok */
- ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */
ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */
ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
+ ARG_PTR_TO_INT, /* pointer to int */
+ ARG_PTR_TO_LONG, /* pointer to long */
+ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
};
/* type of values returned from helper functions */
@@ -206,6 +216,7 @@ enum bpf_return_type {
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */
RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */
+ RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -263,6 +274,7 @@ enum bpf_reg_type {
PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */
PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
+ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
};
/* The information passed from prog-specific *_is_valid_access
@@ -352,6 +364,7 @@ struct bpf_prog_aux {
u32 used_map_cnt;
u32 max_ctx_offset;
u32 max_pkt_offset;
+ u32 max_tp_access;
u32 stack_depth;
u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */
@@ -421,8 +434,38 @@ struct bpf_array {
};
};
+#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
#define MAX_TAIL_CALL_CNT 32
+#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
+ BPF_F_RDONLY_PROG | \
+ BPF_F_WRONLY | \
+ BPF_F_WRONLY_PROG)
+
+#define BPF_MAP_CAN_READ BIT(0)
+#define BPF_MAP_CAN_WRITE BIT(1)
+
+static inline u32 bpf_map_flags_to_cap(struct bpf_map *map)
+{
+ u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+
+ /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is
+ * not possible.
+ */
+ if (access_flags & BPF_F_RDONLY_PROG)
+ return BPF_MAP_CAN_READ;
+ else if (access_flags & BPF_F_WRONLY_PROG)
+ return BPF_MAP_CAN_WRITE;
+ else
+ return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE;
+}
+
+static inline bool bpf_map_flags_access_ok(u32 access_flags)
+{
+ return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) !=
+ (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG);
+}
+
struct bpf_event_entry {
struct perf_event *event;
struct file *perf_file;
@@ -446,14 +489,6 @@ typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type,
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
-int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr);
-int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
- union bpf_attr __user *uattr);
-int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
- const union bpf_attr *kattr,
- union bpf_attr __user *uattr);
-
/* an array of programs to be executed under rcu_lock.
*
* Typical usage:
@@ -511,7 +546,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
} \
_out: \
rcu_read_unlock(); \
- preempt_enable_no_resched(); \
+ preempt_enable(); \
_ret; \
})
@@ -644,6 +679,13 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
int array_map_alloc_check(union bpf_attr *attr);
+int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -755,6 +797,27 @@ static inline struct bpf_prog *bpf_prog_get_type_path(const char *name,
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_skb(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
+
+static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ return -ENOTSUPP;
+}
#endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -930,6 +993,8 @@ extern const struct bpf_func_proto bpf_sk_redirect_map_proto;
extern const struct bpf_func_proto bpf_spin_lock_proto;
extern const struct bpf_func_proto bpf_spin_unlock_proto;
extern const struct bpf_func_proto bpf_get_local_storage_proto;
+extern const struct bpf_func_proto bpf_strtol_proto;
+extern const struct bpf_func_proto bpf_strtoul_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 08bf2f1fe553..5a9975678d6f 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -25,9 +25,11 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint)
BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event)
BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable)
#endif
#ifdef CONFIG_CGROUP_BPF
BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl)
#endif
#ifdef CONFIG_BPF_LIRC_MODE2
BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
@@ -59,6 +61,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
#ifdef CONFIG_NET
BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
#if defined(CONFIG_BPF_STREAM_PARSER)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 69f7a3449eda..1305ccbd8fe6 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -66,6 +66,46 @@ struct bpf_reg_state {
* same reference to the socket, to determine proper reference freeing.
*/
u32 id;
+ /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
+ * from a pointer-cast helper, bpf_sk_fullsock() and
+ * bpf_tcp_sock().
+ *
+ * Consider the following where "sk" is a reference counted
+ * pointer returned from "sk = bpf_sk_lookup_tcp();":
+ *
+ * 1: sk = bpf_sk_lookup_tcp();
+ * 2: if (!sk) { return 0; }
+ * 3: fullsock = bpf_sk_fullsock(sk);
+ * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
+ * 5: tp = bpf_tcp_sock(fullsock);
+ * 6: if (!tp) { bpf_sk_release(sk); return 0; }
+ * 7: bpf_sk_release(sk);
+ * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain
+ *
+ * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
+ * "tp" ptr should be invalidated also. In order to do that,
+ * the reg holding "fullsock" and "sk" need to remember
+ * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
+ * such that the verifier can reset all regs which have
+ * ref_obj_id matching the sk_reg->id.
+ *
+ * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
+ * sk_reg->id will stay as NULL-marking purpose only.
+ * After NULL-marking is done, sk_reg->id can be reset to 0.
+ *
+ * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
+ * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
+ *
+ * After "tp = bpf_tcp_sock(fullsock);" at line 5,
+ * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
+ * which is the same as sk_reg->ref_obj_id.
+ *
+ * From the verifier perspective, if sk, fullsock and tp
+ * are not NULL, they are the same ptr with different
+ * reg->type. In particular, bpf_sk_release(tp) is also
+ * allowed and has the same effect as bpf_sk_release(sk).
+ */
+ u32 ref_obj_id;
/* For scalar types (SCALAR_VALUE), this represents our knowledge of
* the actual value.
* For pointer types, this represents the variable part of the offset
@@ -167,6 +207,7 @@ struct bpf_verifier_state {
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
struct bpf_verifier_state_list *next;
+ int miss_cnt, hit_cnt;
};
/* Possible states for alu_state member. */
@@ -183,6 +224,10 @@ struct bpf_insn_aux_data {
unsigned long map_state; /* pointer/poison value for maps */
s32 call_imm; /* saved imm field of call insn */
u32 alu_limit; /* limit for add/sub register with pointer */
+ struct {
+ u32 map_index; /* index into used_maps[] */
+ u32 map_off; /* offset from value base address */
+ };
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */
@@ -208,6 +253,12 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
return log->len_used >= log->len_total - 1;
}
+#define BPF_LOG_LEVEL1 1
+#define BPF_LOG_LEVEL2 2
+#define BPF_LOG_STATS 4
+#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
+#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
+
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
{
return log->level && log->ubuf && !bpf_verifier_log_full(log);
@@ -234,6 +285,7 @@ struct bpf_verifier_env {
bool strict_alignment; /* perform strict pointer alignment checks */
struct bpf_verifier_state *cur_state; /* current verifier state */
struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
+ struct bpf_verifier_state_list *free_list;
struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
u32 used_map_cnt; /* number of used maps */
u32 id_gen; /* used to generate unique reg IDs */
@@ -243,7 +295,27 @@ struct bpf_verifier_env {
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
+ struct {
+ int *insn_state;
+ int *insn_stack;
+ int cur_stack;
+ } cfg;
u32 subprog_cnt;
+ /* number of instructions analyzed by the verifier */
+ u32 insn_processed;
+ /* total verification time */
+ u64 verification_time;
+ /* maximum number of verifier states kept in 'branching' instructions */
+ u32 max_states_per_insn;
+ /* total number of allocated verifier states */
+ u32 total_states;
+ /* some states are freed during program analysis.
+ * this is peak number of states. this number dominates kernel
+ * memory consumption during verification
+ */
+ u32 peak_states;
+ /* longest register parentage chain walked for liveness marking */
+ u32 longest_mark_read_walk;
};
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 9cd00a37b8d3..6db2d9a6e503 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -148,6 +148,22 @@
#define BCM_LED_SRC_OFF 0xe /* Tied high */
#define BCM_LED_SRC_ON 0xf /* Tied low */
+/*
+ * Broadcom Multicolor LED configurations (expansion register 4)
+ */
+#define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04)
+#define BCM_LED_MULTICOLOR_IN_PHASE BIT(8)
+#define BCM_LED_MULTICOLOR_LINK_ACT 0x0
+#define BCM_LED_MULTICOLOR_SPEED 0x1
+#define BCM_LED_MULTICOLOR_ACT_FLASH 0x2
+#define BCM_LED_MULTICOLOR_FDX 0x3
+#define BCM_LED_MULTICOLOR_OFF 0x4
+#define BCM_LED_MULTICOLOR_ON 0x5
+#define BCM_LED_MULTICOLOR_ALT 0x6
+#define BCM_LED_MULTICOLOR_FLASH 0x7
+#define BCM_LED_MULTICOLOR_LINK 0x8
+#define BCM_LED_MULTICOLOR_ACT 0x9
+#define BCM_LED_MULTICOLOR_PROGRAM 0xa
/*
* BCM5482: Shadow registers
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h
index 7f14517a559b..960988d42f77 100644
--- a/include/linux/bsg-lib.h
+++ b/include/linux/bsg-lib.h
@@ -1,24 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* BSG helper library
*
* Copyright (C) 2008 James Smart, Emulex Corporation
* Copyright (C) 2011 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011 Mike Christie
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _BLK_BSG_
#define _BLK_BSG_
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 455d31b55828..64cdf2a23d42 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -51,6 +51,7 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
+bool btf_type_is_void(const struct btf_type *t);
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index f6275c4da13a..a032f01e928c 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -1,21 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* bvec iterator
*
* Copyright (C) 2001 Ming Lei <ming.lei@canonical.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- *
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public Licens
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
*/
#ifndef __LINUX_BVEC_ITER_H
#define __LINUX_BVEC_ITER_H
@@ -51,11 +38,6 @@ struct bvec_iter_all {
unsigned done;
};
-static inline struct page *bvec_nth_page(struct page *page, int idx)
-{
- return idx == 0 ? page : nth_page(page, idx);
-}
-
/*
* various member access, note that bio_data should of course not be used
* on highmem page vectors
@@ -92,8 +74,8 @@ static inline struct page *bvec_nth_page(struct page *page, int idx)
PAGE_SIZE - bvec_iter_offset((bvec), (iter)))
#define bvec_iter_page(bvec, iter) \
- bvec_nth_page(mp_bvec_iter_page((bvec), (iter)), \
- mp_bvec_iter_page_idx((bvec), (iter)))
+ (mp_bvec_iter_page((bvec), (iter)) + \
+ mp_bvec_iter_page_idx((bvec), (iter)))
#define bvec_iter_bvec(bvec, iter) \
((struct bio_vec) { \
@@ -145,26 +127,32 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all)
{
- iter_all->bv.bv_page = NULL;
iter_all->done = 0;
+ iter_all->idx = 0;
return &iter_all->bv;
}
-static inline void mp_bvec_next_segment(const struct bio_vec *bvec,
- struct bvec_iter_all *iter_all)
+static inline void bvec_advance(const struct bio_vec *bvec,
+ struct bvec_iter_all *iter_all)
{
struct bio_vec *bv = &iter_all->bv;
- if (bv->bv_page) {
- bv->bv_page = nth_page(bv->bv_page, 1);
+ if (iter_all->done) {
+ bv->bv_page++;
bv->bv_offset = 0;
} else {
- bv->bv_page = bvec->bv_page;
- bv->bv_offset = bvec->bv_offset;
+ bv->bv_page = bvec->bv_page + (bvec->bv_offset >> PAGE_SHIFT);
+ bv->bv_offset = bvec->bv_offset & ~PAGE_MASK;
}
bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset,
bvec->bv_len - iter_all->done);
+ iter_all->done += bv->bv_len;
+
+ if (iter_all->done == bvec->bv_len) {
+ iter_all->idx++;
+ iter_all->done = 0;
+ }
}
/*
@@ -177,7 +165,7 @@ static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
unsigned total = bvec->bv_offset + bvec->bv_len;
unsigned last_page = (total - 1) / PAGE_SIZE;
- seg->bv_page = bvec_nth_page(bvec->bv_page, last_page);
+ seg->bv_page = bvec->bv_page + last_page;
/* the whole segment is inside the last page */
if (bvec->bv_offset >= last_page * PAGE_SIZE) {
@@ -189,9 +177,4 @@ static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
}
}
-#define mp_bvec_for_each_page(pg, bv, i) \
- for (i = (bv)->bv_offset / PAGE_SIZE; \
- (i <= (((bv)->bv_offset + (bv)->bv_len - 1) / PAGE_SIZE)) && \
- (pg = bvec_nth_page((bv)->bv_page, i)); i += 1)
-
#endif /* __LINUX_BVEC_ITER_H */
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index a420c07904bc..337d5049ff93 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -294,6 +294,8 @@ extern void ceph_destroy_client(struct ceph_client *client);
extern int __ceph_open_session(struct ceph_client *client,
unsigned long started);
extern int ceph_open_session(struct ceph_client *client);
+int ceph_wait_for_latest_osdmap(struct ceph_client *client,
+ unsigned long timeout);
/* pagevec.c */
extern void ceph_release_page_vector(struct page **pages, int num_pages);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 1c70803e9f77..77258d276f93 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -65,6 +65,12 @@ enum {
* specified at mount time and thus is implemented here.
*/
CGRP_CPUSET_CLONE_CHILDREN,
+
+ /* Control group has to be frozen. */
+ CGRP_FREEZE,
+
+ /* Cgroup is frozen. */
+ CGRP_FROZEN,
};
/* cgroup_root->flags */
@@ -317,6 +323,25 @@ struct cgroup_rstat_cpu {
struct cgroup *updated_next; /* NULL iff not on the list */
};
+struct cgroup_freezer_state {
+ /* Should the cgroup and its descendants be frozen. */
+ bool freeze;
+
+ /* Should the cgroup actually be frozen? */
+ int e_freeze;
+
+ /* Fields below are protected by css_set_lock */
+
+ /* Number of frozen descendant cgroups */
+ int nr_frozen_descendants;
+
+ /*
+ * Number of tasks, which are counted as frozen:
+ * frozen, SIGSTOPped, and PTRACEd.
+ */
+ int nr_frozen_tasks;
+};
+
struct cgroup {
/* self css with NULL ->ss, points back to this cgroup */
struct cgroup_subsys_state self;
@@ -349,6 +374,11 @@ struct cgroup {
* Dying cgroups are cgroups which were deleted by a user,
* but are still existing because someone else is holding a reference.
* max_descendants is a maximum allowed number of descent cgroups.
+ *
+ * nr_descendants and nr_dying_descendants are protected
+ * by cgroup_mutex and css_set_lock. It's fine to read them holding
+ * any of cgroup_mutex and css_set_lock; for writing both locks
+ * should be held.
*/
int nr_descendants;
int nr_dying_descendants;
@@ -448,6 +478,9 @@ struct cgroup {
/* If there is block congestion on this cgroup. */
atomic_t congestion_count;
+ /* Used to store internal freezer state */
+ struct cgroup_freezer_state freezer;
+
/* ids of the ancestors at each level including self */
int ancestor_ids[];
};
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 81f58b4a5418..c0077adeea83 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -881,4 +881,47 @@ static inline void put_cgroup_ns(struct cgroup_namespace *ns)
free_cgroup_ns(ns);
}
+#ifdef CONFIG_CGROUPS
+
+void cgroup_enter_frozen(void);
+void cgroup_leave_frozen(bool always_leave);
+void cgroup_update_frozen(struct cgroup *cgrp);
+void cgroup_freeze(struct cgroup *cgrp, bool freeze);
+void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
+ struct cgroup *dst);
+
+static inline bool cgroup_task_freeze(struct task_struct *task)
+{
+ bool ret;
+
+ if (task->flags & PF_KTHREAD)
+ return false;
+
+ rcu_read_lock();
+ ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static inline bool cgroup_task_frozen(struct task_struct *task)
+{
+ return task->frozen;
+}
+
+#else /* !CONFIG_CGROUPS */
+
+static inline void cgroup_enter_frozen(void) { }
+static inline void cgroup_leave_frozen(bool always_leave) { }
+static inline bool cgroup_task_freeze(struct task_struct *task)
+{
+ return false;
+}
+static inline bool cgroup_task_frozen(struct task_struct *task)
+{
+ return false;
+}
+
+#endif /* !CONFIG_CGROUPS */
+
#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index b7cf80a71293..491d992d045d 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -24,7 +24,7 @@
#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
/* unused */
-#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
+ /* unused */
#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */
#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */
#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
@@ -251,19 +251,40 @@ struct clk_ops {
};
/**
+ * struct clk_parent_data - clk parent information
+ * @hw: parent clk_hw pointer (used for clk providers with internal clks)
+ * @fw_name: parent name local to provider registering clk
+ * @name: globally unique parent name (used as a fallback)
+ * @index: parent index local to provider registering clk (if @fw_name absent)
+ */
+struct clk_parent_data {
+ const struct clk_hw *hw;
+ const char *fw_name;
+ const char *name;
+ int index;
+};
+
+/**
* struct clk_init_data - holds init data that's common to all clocks and is
* shared between the clock provider and the common clock framework.
*
* @name: clock name
* @ops: operations this clock supports
* @parent_names: array of string names for all possible parents
+ * @parent_data: array of parent data for all possible parents (when some
+ * parents are external to the clk controller)
+ * @parent_hws: array of pointers to all possible parents (when all parents
+ * are internal to the clk controller)
* @num_parents: number of possible parents
* @flags: framework-level hints and quirks
*/
struct clk_init_data {
const char *name;
const struct clk_ops *ops;
+ /* Only one of the following three should be assigned */
const char * const *parent_names;
+ const struct clk_parent_data *parent_data;
+ const struct clk_hw **parent_hws;
u8 num_parents;
unsigned long flags;
};
@@ -307,7 +328,6 @@ struct clk_fixed_rate {
struct clk_hw hw;
unsigned long fixed_rate;
unsigned long fixed_accuracy;
- u8 flags;
};
#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
@@ -349,6 +369,9 @@ void of_fixed_clk_setup(struct device_node *np);
* of this register, and mask of gate bits are in higher 16-bit of this
* register. While setting the gate bits, higher 16-bit should also be
* updated to indicate changing gate bits.
+ * CLK_GATE_BIG_ENDIAN - by default little endian register accesses are used for
+ * the gate register. Setting this flag makes the register accesses big
+ * endian.
*/
struct clk_gate {
struct clk_hw hw;
@@ -362,6 +385,7 @@ struct clk_gate {
#define CLK_GATE_SET_TO_DISABLE BIT(0)
#define CLK_GATE_HIWORD_MASK BIT(1)
+#define CLK_GATE_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_gate_ops;
struct clk *clk_register_gate(struct device *dev, const char *name,
@@ -417,6 +441,9 @@ struct clk_div_table {
* CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED
* except when the value read from the register is zero, the divisor is
* 2^width of the field.
+ * CLK_DIVIDER_BIG_ENDIAN - By default little endian register accesses are used
+ * for the divider register. Setting this flag makes the register accesses
+ * big endian.
*/
struct clk_divider {
struct clk_hw hw;
@@ -438,6 +465,7 @@ struct clk_divider {
#define CLK_DIVIDER_ROUND_CLOSEST BIT(4)
#define CLK_DIVIDER_READ_ONLY BIT(5)
#define CLK_DIVIDER_MAX_AT_ZERO BIT(6)
+#define CLK_DIVIDER_BIG_ENDIAN BIT(7)
extern const struct clk_ops clk_divider_ops;
extern const struct clk_ops clk_divider_ro_ops;
@@ -499,8 +527,13 @@ void clk_hw_unregister_divider(struct clk_hw *hw);
* register, and mask of mux bits are in higher 16-bit of this register.
* While setting the mux bits, higher 16-bit should also be updated to
* indicate changing mux bits.
+ * CLK_MUX_READ_ONLY - The mux registers can't be written, only read in the
+ * .get_parent clk_op.
* CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired
* frequency.
+ * CLK_MUX_BIG_ENDIAN - By default little endian register accesses are used for
+ * the mux register. Setting this flag makes the register accesses big
+ * endian.
*/
struct clk_mux {
struct clk_hw hw;
@@ -519,6 +552,7 @@ struct clk_mux {
#define CLK_MUX_HIWORD_MASK BIT(2)
#define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */
#define CLK_MUX_ROUND_CLOSEST BIT(4)
+#define CLK_MUX_BIG_ENDIAN BIT(5)
extern const struct clk_ops clk_mux_ops;
extern const struct clk_ops clk_mux_ro_ops;
@@ -602,6 +636,9 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw);
* is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
* is set then the numerator and denominator are both the value read
* plus one.
+ * CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are
+ * used for the divider register. Setting this flag makes the register
+ * accesses big endian.
*/
struct clk_fractional_divider {
struct clk_hw hw;
@@ -622,6 +659,7 @@ struct clk_fractional_divider {
#define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw)
#define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0)
+#define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1)
extern const struct clk_ops clk_fractional_divider_ops;
struct clk *clk_register_fractional_divider(struct device *dev,
@@ -654,6 +692,9 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
* leaving the parent rate unmodified.
* CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be
* rounded to the closest integer instead of the down one.
+ * CLK_MULTIPLIER_BIG_ENDIAN - By default little endian register accesses are
+ * used for the multiplier register. Setting this flag makes the register
+ * accesses big endian.
*/
struct clk_multiplier {
struct clk_hw hw;
@@ -668,6 +709,7 @@ struct clk_multiplier {
#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0)
#define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1)
+#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_multiplier_ops;
@@ -712,16 +754,19 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
unsigned long flags);
void clk_hw_unregister_composite(struct clk_hw *hw);
-/***
- * struct clk_gpio_gate - gpio gated clock
+/**
+ * struct clk_gpio - gpio gated clock
*
* @hw: handle between common and hardware-specific interfaces
* @gpiod: gpio descriptor
*
- * Clock with a gpio control for enabling and disabling the parent clock.
- * Implements .enable, .disable and .is_enabled
+ * Clock with a gpio control for enabling and disabling the parent clock
+ * or switching between two parents by asserting or deasserting the gpio.
+ *
+ * Implements .enable, .disable and .is_enabled or
+ * .get_parent, .set_parent and .determine_rate depending on which clk_ops
+ * is used.
*/
-
struct clk_gpio {
struct clk_hw hw;
struct gpio_desc *gpiod;
@@ -738,16 +783,6 @@ struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
unsigned long flags);
void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
-/**
- * struct clk_gpio_mux - gpio controlled clock multiplexer
- *
- * @hw: see struct clk_gpio
- * @gpiod: gpio descriptor to select the parent of this clock multiplexer
- *
- * Clock with a gpio control for selecting the parent clock.
- * Implements .get_parent, .set_parent and .determine_rate
- */
-
extern const struct clk_ops clk_gpio_mux_ops;
struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
@@ -757,22 +792,12 @@ struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
unsigned long flags);
void clk_hw_unregister_gpio_mux(struct clk_hw *hw);
-/**
- * clk_register - allocate a new clock, register it and return an opaque cookie
- * @dev: device that is registering this clock
- * @hw: link to hardware-specific clock data
- *
- * clk_register is the primary interface for populating the clock tree with new
- * clock nodes. It returns a pointer to the newly allocated struct clk which
- * cannot be dereferenced by driver code but may be used in conjuction with the
- * rest of the clock API. In the event of an error clk_register will return an
- * error code; drivers must test for an error code after calling clk_register.
- */
struct clk *clk_register(struct device *dev, struct clk_hw *hw);
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw);
int __must_check clk_hw_register(struct device *dev, struct clk_hw *hw);
int __must_check devm_clk_hw_register(struct device *dev, struct clk_hw *hw);
+int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw);
void clk_unregister(struct clk *clk);
void devm_clk_unregister(struct device *dev, struct clk *clk);
@@ -993,37 +1018,6 @@ static inline int of_clk_detect_critical(struct device_node *np, int index,
}
#endif /* CONFIG_OF */
-/*
- * wrap access to peripherals in accessor routines
- * for improved portability across platforms
- */
-
-#if IS_ENABLED(CONFIG_PPC)
-
-static inline u32 clk_readl(u32 __iomem *reg)
-{
- return ioread32be(reg);
-}
-
-static inline void clk_writel(u32 val, u32 __iomem *reg)
-{
- iowrite32be(val, reg);
-}
-
-#else /* platform dependent I/O accessors */
-
-static inline u32 clk_readl(u32 __iomem *reg)
-{
- return readl(reg);
-}
-
-static inline void clk_writel(u32 val, u32 __iomem *reg)
-{
- writel(val, reg);
-}
-
-#endif /* platform dependent I/O accessors */
-
void clk_gate_restore_context(struct clk_hw *hw);
#endif /* CONFIG_COMMON_CLK */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index d8bc1a856b39..f689fc58d7be 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -811,6 +811,22 @@ static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
return true;
}
+static inline int clk_set_rate_range(struct clk *clk, unsigned long min,
+ unsigned long max)
+{
+ return 0;
+}
+
+static inline int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
+static inline int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ return 0;
+}
+
static inline int clk_set_parent(struct clk *clk, struct clk *parent)
{
return 0;
diff --git a/include/linux/clk/analogbits-wrpll-cln28hpc.h b/include/linux/clk/analogbits-wrpll-cln28hpc.h
new file mode 100644
index 000000000000..03279097e138
--- /dev/null
+++ b/include/linux/clk/analogbits-wrpll-cln28hpc.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ */
+
+#ifndef __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H
+#define __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H
+
+#include <linux/types.h>
+
+/* DIVQ_VALUES: number of valid DIVQ values */
+#define DIVQ_VALUES 6
+
+/*
+ * Bit definitions for struct wrpll_cfg.flags
+ *
+ * WRPLL_FLAGS_BYPASS_FLAG: if set, the PLL is either in bypass, or should be
+ * programmed to enter bypass
+ * WRPLL_FLAGS_RESET_FLAG: if set, the PLL is in reset
+ * WRPLL_FLAGS_INT_FEEDBACK_FLAG: if set, the PLL is configured for internal
+ * feedback mode
+ * WRPLL_FLAGS_EXT_FEEDBACK_FLAG: if set, the PLL is configured for external
+ * feedback mode (not yet supported by this driver)
+ */
+#define WRPLL_FLAGS_BYPASS_SHIFT 0
+#define WRPLL_FLAGS_BYPASS_MASK BIT(WRPLL_FLAGS_BYPASS_SHIFT)
+#define WRPLL_FLAGS_RESET_SHIFT 1
+#define WRPLL_FLAGS_RESET_MASK BIT(WRPLL_FLAGS_RESET_SHIFT)
+#define WRPLL_FLAGS_INT_FEEDBACK_SHIFT 2
+#define WRPLL_FLAGS_INT_FEEDBACK_MASK BIT(WRPLL_FLAGS_INT_FEEDBACK_SHIFT)
+#define WRPLL_FLAGS_EXT_FEEDBACK_SHIFT 3
+#define WRPLL_FLAGS_EXT_FEEDBACK_MASK BIT(WRPLL_FLAGS_EXT_FEEDBACK_SHIFT)
+
+/**
+ * struct wrpll_cfg - WRPLL configuration values
+ * @divr: reference divider value (6 bits), as presented to the PLL signals
+ * @divf: feedback divider value (9 bits), as presented to the PLL signals
+ * @divq: output divider value (3 bits), as presented to the PLL signals
+ * @flags: PLL configuration flags. See above for more information
+ * @range: PLL loop filter range. See below for more information
+ * @output_rate_cache: cached output rates, swept across DIVQ
+ * @parent_rate: PLL refclk rate for which values are valid
+ * @max_r: maximum possible R divider value, given @parent_rate
+ * @init_r: initial R divider value to start the search from
+ *
+ * @divr, @divq, @divq, @range represent what the PLL expects to see
+ * on its input signals. Thus @divr and @divf are the actual divisors
+ * minus one. @divq is a power-of-two divider; for example, 1 =
+ * divide-by-2 and 6 = divide-by-64. 0 is an invalid @divq value.
+ *
+ * When initially passing a struct wrpll_cfg record, the
+ * record should be zero-initialized with the exception of the @flags
+ * field. The only flag bits that need to be set are either
+ * WRPLL_FLAGS_INT_FEEDBACK or WRPLL_FLAGS_EXT_FEEDBACK.
+ */
+struct wrpll_cfg {
+ u8 divr;
+ u8 divq;
+ u8 range;
+ u8 flags;
+ u16 divf;
+/* private: */
+ u32 output_rate_cache[DIVQ_VALUES];
+ unsigned long parent_rate;
+ u8 max_r;
+ u8 init_r;
+};
+
+int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate,
+ unsigned long parent_rate);
+
+unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c);
+
+unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c,
+ unsigned long parent_rate);
+
+#endif /* __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H */
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 931ab05f771d..0c53f26ae3d3 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -74,6 +74,8 @@
#define AT91_PMC_USBDIV_4 (2 << 28)
#define AT91_PMC_USB96M (1 << 28) /* Divider by 2 Enable (PLLB only) */
+#define AT91_PMC_CPU_CKR 0x28 /* CPU Clock Register */
+
#define AT91_PMC_MCKR 0x30 /* Master Clock Register */
#define AT91_PMC_CSS (3 << 0) /* Master Clock Selection */
#define AT91_PMC_CSS_SLOW (0 << 0)
@@ -187,16 +189,8 @@
#define AT91_PMC_PCR 0x10c /* Peripheral Control Register [some SAM9 and SAMA5] */
#define AT91_PMC_PCR_PID_MASK 0x3f
-#define AT91_PMC_PCR_GCKCSS_OFFSET 8
-#define AT91_PMC_PCR_GCKCSS_MASK (0x7 << AT91_PMC_PCR_GCKCSS_OFFSET)
-#define AT91_PMC_PCR_GCKCSS(n) ((n) << AT91_PMC_PCR_GCKCSS_OFFSET) /* GCK Clock Source Selection */
#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */
-#define AT91_PMC_PCR_DIV_OFFSET 16
-#define AT91_PMC_PCR_DIV_MASK (0x3 << AT91_PMC_PCR_DIV_OFFSET)
-#define AT91_PMC_PCR_DIV(n) ((n) << AT91_PMC_PCR_DIV_OFFSET) /* Divisor Value */
-#define AT91_PMC_PCR_GCKDIV_OFFSET 20
-#define AT91_PMC_PCR_GCKDIV_MASK (0xff << AT91_PMC_PCR_GCKDIV_OFFSET)
-#define AT91_PMC_PCR_GCKDIV(n) ((n) << AT91_PMC_PCR_GCKDIV_OFFSET) /* Generated Clock Divisor Value */
+#define AT91_PMC_PCR_GCKDIV_MASK GENMASK(27, 20)
#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
#define AT91_PMC_PCR_GCKEN (0x1 << 29) /* GCK Enable */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 78872efc7be0..1e8ef96555ce 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -243,6 +243,7 @@ struct ti_clk_ll_ops {
#define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw)
+bool omap2_clk_is_hw_omap(struct clk_hw *hw);
int omap2_clk_disable_autoidle_all(void);
int omap2_clk_enable_autoidle_all(void);
int omap2_clk_allow_idle(struct clk *clk);
@@ -293,6 +294,7 @@ struct ti_clk_features {
#define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2)
#define TI_CLK_ERRATA_I810 BIT(3)
#define TI_CLK_CLKCTRL_COMPAT BIT(4)
+#define TI_CLK_DEVICE_TYPE_GP BIT(5)
void ti_clk_setup_features(struct ti_clk_features *features);
const struct ti_clk_features *ti_clk_get_features(void);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 445348facea9..d58aa0db05f9 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -67,7 +67,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
.line = __LINE__, \
}; \
______r = !!(cond); \
- ______f.miss_hit[______r]++; \
+ ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\
______r; \
}))
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h
index a1a959ba24ff..b0e35eec6499 100644
--- a/include/linux/coresight-pmu.h
+++ b/include/linux/coresight-pmu.h
@@ -12,11 +12,13 @@
/* ETMv3.5/PTM's ETMCR config bit */
#define ETM_OPT_CYCACC 12
+#define ETM_OPT_CTXTID 14
#define ETM_OPT_TS 28
#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
#define ETM4_CFG_BIT_CYCACC 4
+#define ETM4_CFG_BIT_CTXTID 6
#define ETM4_CFG_BIT_TS 11
#define ETM4_CFG_BIT_RETSTK 12
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 7b87965f7a65..62a520df8add 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -192,9 +192,10 @@ struct coresight_device {
*/
struct coresight_ops_sink {
int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
- void (*disable)(struct coresight_device *csdev);
- void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
- void **pages, int nr_pages, bool overwrite);
+ int (*disable)(struct coresight_device *csdev);
+ void *(*alloc_buffer)(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite);
void (*free_buffer)(void *config);
unsigned long (*update_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
diff --git a/include/linux/counter.h b/include/linux/counter.h
new file mode 100644
index 000000000000..a061cdcdef7c
--- /dev/null
+++ b/include/linux/counter.h
@@ -0,0 +1,510 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Counter interface
+ * Copyright (C) 2018 William Breathitt Gray
+ */
+#ifndef _COUNTER_H_
+#define _COUNTER_H_
+
+#include <linux/counter_enum.h>
+#include <linux/device.h>
+#include <linux/types.h>
+
+enum counter_count_direction {
+ COUNTER_COUNT_DIRECTION_FORWARD = 0,
+ COUNTER_COUNT_DIRECTION_BACKWARD
+};
+extern const char *const counter_count_direction_str[2];
+
+enum counter_count_mode {
+ COUNTER_COUNT_MODE_NORMAL = 0,
+ COUNTER_COUNT_MODE_RANGE_LIMIT,
+ COUNTER_COUNT_MODE_NON_RECYCLE,
+ COUNTER_COUNT_MODE_MODULO_N
+};
+extern const char *const counter_count_mode_str[4];
+
+struct counter_device;
+struct counter_signal;
+
+/**
+ * struct counter_signal_ext - Counter Signal extensions
+ * @name: attribute name
+ * @read: read callback for this attribute; may be NULL
+ * @write: write callback for this attribute; may be NULL
+ * @priv: data private to the driver
+ */
+struct counter_signal_ext {
+ const char *name;
+ ssize_t (*read)(struct counter_device *counter,
+ struct counter_signal *signal, void *priv, char *buf);
+ ssize_t (*write)(struct counter_device *counter,
+ struct counter_signal *signal, void *priv,
+ const char *buf, size_t len);
+ void *priv;
+};
+
+/**
+ * struct counter_signal - Counter Signal node
+ * @id: unique ID used to identify signal
+ * @name: device-specific Signal name; ideally, this should match the name
+ * as it appears in the datasheet documentation
+ * @ext: optional array of Counter Signal extensions
+ * @num_ext: number of Counter Signal extensions specified in @ext
+ * @priv: optional private data supplied by driver
+ */
+struct counter_signal {
+ int id;
+ const char *name;
+
+ const struct counter_signal_ext *ext;
+ size_t num_ext;
+
+ void *priv;
+};
+
+/**
+ * struct counter_signal_enum_ext - Signal enum extension attribute
+ * @items: Array of strings
+ * @num_items: Number of items specified in @items
+ * @set: Set callback function; may be NULL
+ * @get: Get callback function; may be NULL
+ *
+ * The counter_signal_enum_ext structure can be used to implement enum style
+ * Signal extension attributes. Enum style attributes are those which have a set
+ * of strings that map to unsigned integer values. The Generic Counter Signal
+ * enum extension helper code takes care of mapping between value and string, as
+ * well as generating a "_available" file which contains a list of all available
+ * items. The get callback is used to query the currently active item; the index
+ * of the item within the respective items array is returned via the 'item'
+ * parameter. The set callback is called when the attribute is updated; the
+ * 'item' parameter contains the index of the newly activated item within the
+ * respective items array.
+ */
+struct counter_signal_enum_ext {
+ const char * const *items;
+ size_t num_items;
+ int (*get)(struct counter_device *counter,
+ struct counter_signal *signal, size_t *item);
+ int (*set)(struct counter_device *counter,
+ struct counter_signal *signal, size_t item);
+};
+
+/**
+ * COUNTER_SIGNAL_ENUM() - Initialize Signal enum extension
+ * @_name: Attribute name
+ * @_e: Pointer to a counter_signal_enum_ext structure
+ *
+ * This should usually be used together with COUNTER_SIGNAL_ENUM_AVAILABLE()
+ */
+#define COUNTER_SIGNAL_ENUM(_name, _e) \
+{ \
+ .name = (_name), \
+ .read = counter_signal_enum_read, \
+ .write = counter_signal_enum_write, \
+ .priv = (_e) \
+}
+
+/**
+ * COUNTER_SIGNAL_ENUM_AVAILABLE() - Initialize Signal enum available extension
+ * @_name: Attribute name ("_available" will be appended to the name)
+ * @_e: Pointer to a counter_signal_enum_ext structure
+ *
+ * Creates a read only attribute that lists all the available enum items in a
+ * newline separated list. This should usually be used together with
+ * COUNTER_SIGNAL_ENUM()
+ */
+#define COUNTER_SIGNAL_ENUM_AVAILABLE(_name, _e) \
+{ \
+ .name = (_name "_available"), \
+ .read = counter_signal_enum_available_read, \
+ .priv = (_e) \
+}
+
+enum counter_synapse_action {
+ COUNTER_SYNAPSE_ACTION_NONE = 0,
+ COUNTER_SYNAPSE_ACTION_RISING_EDGE,
+ COUNTER_SYNAPSE_ACTION_FALLING_EDGE,
+ COUNTER_SYNAPSE_ACTION_BOTH_EDGES
+};
+
+/**
+ * struct counter_synapse - Counter Synapse node
+ * @action: index of current action mode
+ * @actions_list: array of available action modes
+ * @num_actions: number of action modes specified in @actions_list
+ * @signal: pointer to associated signal
+ */
+struct counter_synapse {
+ size_t action;
+ const enum counter_synapse_action *actions_list;
+ size_t num_actions;
+
+ struct counter_signal *signal;
+};
+
+struct counter_count;
+
+/**
+ * struct counter_count_ext - Counter Count extension
+ * @name: attribute name
+ * @read: read callback for this attribute; may be NULL
+ * @write: write callback for this attribute; may be NULL
+ * @priv: data private to the driver
+ */
+struct counter_count_ext {
+ const char *name;
+ ssize_t (*read)(struct counter_device *counter,
+ struct counter_count *count, void *priv, char *buf);
+ ssize_t (*write)(struct counter_device *counter,
+ struct counter_count *count, void *priv,
+ const char *buf, size_t len);
+ void *priv;
+};
+
+enum counter_count_function {
+ COUNTER_COUNT_FUNCTION_INCREASE = 0,
+ COUNTER_COUNT_FUNCTION_DECREASE,
+ COUNTER_COUNT_FUNCTION_PULSE_DIRECTION,
+ COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A,
+ COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B,
+ COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A,
+ COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B,
+ COUNTER_COUNT_FUNCTION_QUADRATURE_X4
+};
+
+/**
+ * struct counter_count - Counter Count node
+ * @id: unique ID used to identify Count
+ * @name: device-specific Count name; ideally, this should match
+ * the name as it appears in the datasheet documentation
+ * @function: index of current function mode
+ * @functions_list: array available function modes
+ * @num_functions: number of function modes specified in @functions_list
+ * @synapses: array of synapses for initialization
+ * @num_synapses: number of synapses specified in @synapses
+ * @ext: optional array of Counter Count extensions
+ * @num_ext: number of Counter Count extensions specified in @ext
+ * @priv: optional private data supplied by driver
+ */
+struct counter_count {
+ int id;
+ const char *name;
+
+ size_t function;
+ const enum counter_count_function *functions_list;
+ size_t num_functions;
+
+ struct counter_synapse *synapses;
+ size_t num_synapses;
+
+ const struct counter_count_ext *ext;
+ size_t num_ext;
+
+ void *priv;
+};
+
+/**
+ * struct counter_count_enum_ext - Count enum extension attribute
+ * @items: Array of strings
+ * @num_items: Number of items specified in @items
+ * @set: Set callback function; may be NULL
+ * @get: Get callback function; may be NULL
+ *
+ * The counter_count_enum_ext structure can be used to implement enum style
+ * Count extension attributes. Enum style attributes are those which have a set
+ * of strings that map to unsigned integer values. The Generic Counter Count
+ * enum extension helper code takes care of mapping between value and string, as
+ * well as generating a "_available" file which contains a list of all available
+ * items. The get callback is used to query the currently active item; the index
+ * of the item within the respective items array is returned via the 'item'
+ * parameter. The set callback is called when the attribute is updated; the
+ * 'item' parameter contains the index of the newly activated item within the
+ * respective items array.
+ */
+struct counter_count_enum_ext {
+ const char * const *items;
+ size_t num_items;
+ int (*get)(struct counter_device *counter, struct counter_count *count,
+ size_t *item);
+ int (*set)(struct counter_device *counter, struct counter_count *count,
+ size_t item);
+};
+
+/**
+ * COUNTER_COUNT_ENUM() - Initialize Count enum extension
+ * @_name: Attribute name
+ * @_e: Pointer to a counter_count_enum_ext structure
+ *
+ * This should usually be used together with COUNTER_COUNT_ENUM_AVAILABLE()
+ */
+#define COUNTER_COUNT_ENUM(_name, _e) \
+{ \
+ .name = (_name), \
+ .read = counter_count_enum_read, \
+ .write = counter_count_enum_write, \
+ .priv = (_e) \
+}
+
+/**
+ * COUNTER_COUNT_ENUM_AVAILABLE() - Initialize Count enum available extension
+ * @_name: Attribute name ("_available" will be appended to the name)
+ * @_e: Pointer to a counter_count_enum_ext structure
+ *
+ * Creates a read only attribute that lists all the available enum items in a
+ * newline separated list. This should usually be used together with
+ * COUNTER_COUNT_ENUM()
+ */
+#define COUNTER_COUNT_ENUM_AVAILABLE(_name, _e) \
+{ \
+ .name = (_name "_available"), \
+ .read = counter_count_enum_available_read, \
+ .priv = (_e) \
+}
+
+/**
+ * struct counter_device_attr_group - internal container for attribute group
+ * @attr_group: Counter sysfs attributes group
+ * @attr_list: list to keep track of created Counter sysfs attributes
+ * @num_attr: number of Counter sysfs attributes
+ */
+struct counter_device_attr_group {
+ struct attribute_group attr_group;
+ struct list_head attr_list;
+ size_t num_attr;
+};
+
+/**
+ * struct counter_device_state - internal state container for a Counter device
+ * @id: unique ID used to identify the Counter
+ * @dev: internal device structure
+ * @groups_list: attribute groups list (for Signals, Counts, and ext)
+ * @num_groups: number of attribute groups containers
+ * @groups: Counter sysfs attribute groups (to populate @dev.groups)
+ */
+struct counter_device_state {
+ int id;
+ struct device dev;
+ struct counter_device_attr_group *groups_list;
+ size_t num_groups;
+ const struct attribute_group **groups;
+};
+
+/**
+ * struct counter_signal_read_value - Opaque Signal read value
+ * @buf: string representation of Signal read value
+ * @len: length of string in @buf
+ */
+struct counter_signal_read_value {
+ char *buf;
+ size_t len;
+};
+
+/**
+ * struct counter_count_read_value - Opaque Count read value
+ * @buf: string representation of Count read value
+ * @len: length of string in @buf
+ */
+struct counter_count_read_value {
+ char *buf;
+ size_t len;
+};
+
+/**
+ * struct counter_count_write_value - Opaque Count write value
+ * @buf: string representation of Count write value
+ */
+struct counter_count_write_value {
+ const char *buf;
+};
+
+/**
+ * struct counter_ops - Callbacks from driver
+ * @signal_read: optional read callback for Signal attribute. The read
+ * value of the respective Signal should be passed back via
+ * the val parameter. val points to an opaque type which
+ * should be set only by calling the
+ * counter_signal_read_value_set function from within the
+ * signal_read callback.
+ * @count_read: optional read callback for Count attribute. The read
+ * value of the respective Count should be passed back via
+ * the val parameter. val points to an opaque type which
+ * should be set only by calling the
+ * counter_count_read_value_set function from within the
+ * count_read callback.
+ * @count_write: optional write callback for Count attribute. The write
+ * value for the respective Count is passed in via the val
+ * parameter. val points to an opaque type which should be
+ * accessed only by calling the
+ * counter_count_write_value_get function.
+ * @function_get: function to get the current count function mode. Returns
+ * 0 on success and negative error code on error. The index
+ * of the respective Count's returned function mode should
+ * be passed back via the function parameter.
+ * @function_set: function to set the count function mode. function is the
+ * index of the requested function mode from the respective
+ * Count's functions_list array.
+ * @action_get: function to get the current action mode. Returns 0 on
+ * success and negative error code on error. The index of
+ * the respective Signal's returned action mode should be
+ * passed back via the action parameter.
+ * @action_set: function to set the action mode. action is the index of
+ * the requested action mode from the respective Synapse's
+ * actions_list array.
+ */
+struct counter_ops {
+ int (*signal_read)(struct counter_device *counter,
+ struct counter_signal *signal,
+ struct counter_signal_read_value *val);
+ int (*count_read)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_read_value *val);
+ int (*count_write)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_count_write_value *val);
+ int (*function_get)(struct counter_device *counter,
+ struct counter_count *count, size_t *function);
+ int (*function_set)(struct counter_device *counter,
+ struct counter_count *count, size_t function);
+ int (*action_get)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse, size_t *action);
+ int (*action_set)(struct counter_device *counter,
+ struct counter_count *count,
+ struct counter_synapse *synapse, size_t action);
+};
+
+/**
+ * struct counter_device_ext - Counter device extension
+ * @name: attribute name
+ * @read: read callback for this attribute; may be NULL
+ * @write: write callback for this attribute; may be NULL
+ * @priv: data private to the driver
+ */
+struct counter_device_ext {
+ const char *name;
+ ssize_t (*read)(struct counter_device *counter, void *priv, char *buf);
+ ssize_t (*write)(struct counter_device *counter, void *priv,
+ const char *buf, size_t len);
+ void *priv;
+};
+
+/**
+ * struct counter_device_enum_ext - Counter enum extension attribute
+ * @items: Array of strings
+ * @num_items: Number of items specified in @items
+ * @set: Set callback function; may be NULL
+ * @get: Get callback function; may be NULL
+ *
+ * The counter_device_enum_ext structure can be used to implement enum style
+ * Counter extension attributes. Enum style attributes are those which have a
+ * set of strings that map to unsigned integer values. The Generic Counter enum
+ * extension helper code takes care of mapping between value and string, as well
+ * as generating a "_available" file which contains a list of all available
+ * items. The get callback is used to query the currently active item; the index
+ * of the item within the respective items array is returned via the 'item'
+ * parameter. The set callback is called when the attribute is updated; the
+ * 'item' parameter contains the index of the newly activated item within the
+ * respective items array.
+ */
+struct counter_device_enum_ext {
+ const char * const *items;
+ size_t num_items;
+ int (*get)(struct counter_device *counter, size_t *item);
+ int (*set)(struct counter_device *counter, size_t item);
+};
+
+/**
+ * COUNTER_DEVICE_ENUM() - Initialize Counter enum extension
+ * @_name: Attribute name
+ * @_e: Pointer to a counter_device_enum_ext structure
+ *
+ * This should usually be used together with COUNTER_DEVICE_ENUM_AVAILABLE()
+ */
+#define COUNTER_DEVICE_ENUM(_name, _e) \
+{ \
+ .name = (_name), \
+ .read = counter_device_enum_read, \
+ .write = counter_device_enum_write, \
+ .priv = (_e) \
+}
+
+/**
+ * COUNTER_DEVICE_ENUM_AVAILABLE() - Initialize Counter enum available extension
+ * @_name: Attribute name ("_available" will be appended to the name)
+ * @_e: Pointer to a counter_device_enum_ext structure
+ *
+ * Creates a read only attribute that lists all the available enum items in a
+ * newline separated list. This should usually be used together with
+ * COUNTER_DEVICE_ENUM()
+ */
+#define COUNTER_DEVICE_ENUM_AVAILABLE(_name, _e) \
+{ \
+ .name = (_name "_available"), \
+ .read = counter_device_enum_available_read, \
+ .priv = (_e) \
+}
+
+/**
+ * struct counter_device - Counter data structure
+ * @name: name of the device as it appears in the datasheet
+ * @parent: optional parent device providing the counters
+ * @device_state: internal device state container
+ * @ops: callbacks from driver
+ * @signals: array of Signals
+ * @num_signals: number of Signals specified in @signals
+ * @counts: array of Counts
+ * @num_counts: number of Counts specified in @counts
+ * @ext: optional array of Counter device extensions
+ * @num_ext: number of Counter device extensions specified in @ext
+ * @priv: optional private data supplied by driver
+ */
+struct counter_device {
+ const char *name;
+ struct device *parent;
+ struct counter_device_state *device_state;
+
+ const struct counter_ops *ops;
+
+ struct counter_signal *signals;
+ size_t num_signals;
+ struct counter_count *counts;
+ size_t num_counts;
+
+ const struct counter_device_ext *ext;
+ size_t num_ext;
+
+ void *priv;
+};
+
+enum counter_signal_level {
+ COUNTER_SIGNAL_LEVEL_LOW = 0,
+ COUNTER_SIGNAL_LEVEL_HIGH
+};
+
+enum counter_signal_value_type {
+ COUNTER_SIGNAL_LEVEL = 0
+};
+
+enum counter_count_value_type {
+ COUNTER_COUNT_POSITION = 0,
+};
+
+void counter_signal_read_value_set(struct counter_signal_read_value *const val,
+ const enum counter_signal_value_type type,
+ void *const data);
+void counter_count_read_value_set(struct counter_count_read_value *const val,
+ const enum counter_count_value_type type,
+ void *const data);
+int counter_count_write_value_get(void *const data,
+ const enum counter_count_value_type type,
+ const struct counter_count_write_value *const val);
+
+int counter_register(struct counter_device *const counter);
+void counter_unregister(struct counter_device *const counter);
+int devm_counter_register(struct device *dev,
+ struct counter_device *const counter);
+void devm_counter_unregister(struct device *dev,
+ struct counter_device *const counter);
+
+#endif /* _COUNTER_H_ */
diff --git a/include/linux/counter_enum.h b/include/linux/counter_enum.h
new file mode 100644
index 000000000000..9f917298a88f
--- /dev/null
+++ b/include/linux/counter_enum.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Counter interface enum functions
+ * Copyright (C) 2018 William Breathitt Gray
+ */
+#ifndef _COUNTER_ENUM_H_
+#define _COUNTER_ENUM_H_
+
+#include <linux/types.h>
+
+struct counter_device;
+struct counter_signal;
+struct counter_count;
+
+ssize_t counter_signal_enum_read(struct counter_device *counter,
+ struct counter_signal *signal, void *priv,
+ char *buf);
+ssize_t counter_signal_enum_write(struct counter_device *counter,
+ struct counter_signal *signal, void *priv,
+ const char *buf, size_t len);
+
+ssize_t counter_signal_enum_available_read(struct counter_device *counter,
+ struct counter_signal *signal,
+ void *priv, char *buf);
+
+ssize_t counter_count_enum_read(struct counter_device *counter,
+ struct counter_count *count, void *priv,
+ char *buf);
+ssize_t counter_count_enum_write(struct counter_device *counter,
+ struct counter_count *count, void *priv,
+ const char *buf, size_t len);
+
+ssize_t counter_count_enum_available_read(struct counter_device *counter,
+ struct counter_count *count,
+ void *priv, char *buf);
+
+ssize_t counter_device_enum_read(struct counter_device *counter, void *priv,
+ char *buf);
+ssize_t counter_device_enum_write(struct counter_device *counter, void *priv,
+ const char *buf, size_t len);
+
+ssize_t counter_device_enum_available_read(struct counter_device *counter,
+ void *priv, char *buf);
+
+#endif /* _COUNTER_ENUM_H_ */
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 9c703a0abe6e..cc4980bb0f65 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -44,7 +44,7 @@
*/
#define CPER_REC_LEN 256
/*
- * Severity difinition for error_severity in struct cper_record_header
+ * Severity definition for error_severity in struct cper_record_header
* and section_severity in struct cper_section_descriptor
*/
enum {
@@ -55,24 +55,21 @@ enum {
};
/*
- * Validation bits difinition for validation_bits in struct
+ * Validation bits definition for validation_bits in struct
* cper_record_header. If set, corresponding fields in struct
* cper_record_header contain valid information.
- *
- * corresponds platform_id
*/
#define CPER_VALID_PLATFORM_ID 0x0001
-/* corresponds timestamp */
#define CPER_VALID_TIMESTAMP 0x0002
-/* corresponds partition_id */
#define CPER_VALID_PARTITION_ID 0x0004
/*
* Notification type used to generate error record, used in
- * notification_type in struct cper_record_header
- *
- * Corrected Machine Check
+ * notification_type in struct cper_record_header. These UUIDs are defined
+ * in the UEFI spec v2.7, sec N.2.1.
*/
+
+/* Corrected Machine Check */
#define CPER_NOTIFY_CMC \
GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \
0xEB, 0xD4, 0xF8, 0x90)
@@ -122,14 +119,11 @@ enum {
#define CPER_SEC_REV 0x0100
/*
- * Validation bits difinition for validation_bits in struct
+ * Validation bits definition for validation_bits in struct
* cper_section_descriptor. If set, corresponding fields in struct
* cper_section_descriptor contain valid information.
- *
- * corresponds fru_id
*/
#define CPER_SEC_VALID_FRU_ID 0x1
-/* corresponds fru_text */
#define CPER_SEC_VALID_FRU_TEXT 0x2
/*
@@ -165,10 +159,11 @@ enum {
/*
* Section type definitions, used in section_type field in struct
- * cper_section_descriptor
- *
- * Processor Generic
+ * cper_section_descriptor. These UUIDs are defined in the UEFI spec
+ * v2.7, sec N.2.2.
*/
+
+/* Processor Generic */
#define CPER_SEC_PROC_GENERIC \
GUID_INIT(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \
0x93, 0xC4, 0xF3, 0xDB)
@@ -325,220 +320,223 @@ enum {
*/
#pragma pack(1)
+/* Record Header, UEFI v2.7 sec N.2.1 */
struct cper_record_header {
char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */
- __u16 revision; /* must be CPER_RECORD_REV */
- __u32 signature_end; /* must be CPER_SIG_END */
- __u16 section_count;
- __u32 error_severity;
- __u32 validation_bits;
- __u32 record_length;
- __u64 timestamp;
+ u16 revision; /* must be CPER_RECORD_REV */
+ u32 signature_end; /* must be CPER_SIG_END */
+ u16 section_count;
+ u32 error_severity;
+ u32 validation_bits;
+ u32 record_length;
+ u64 timestamp;
guid_t platform_id;
guid_t partition_id;
guid_t creator_id;
guid_t notification_type;
- __u64 record_id;
- __u32 flags;
- __u64 persistence_information;
- __u8 reserved[12]; /* must be zero */
+ u64 record_id;
+ u32 flags;
+ u64 persistence_information;
+ u8 reserved[12]; /* must be zero */
};
+/* Section Descriptor, UEFI v2.7 sec N.2.2 */
struct cper_section_descriptor {
- __u32 section_offset; /* Offset in bytes of the
+ u32 section_offset; /* Offset in bytes of the
* section body from the base
* of the record header */
- __u32 section_length;
- __u16 revision; /* must be CPER_RECORD_REV */
- __u8 validation_bits;
- __u8 reserved; /* must be zero */
- __u32 flags;
+ u32 section_length;
+ u16 revision; /* must be CPER_RECORD_REV */
+ u8 validation_bits;
+ u8 reserved; /* must be zero */
+ u32 flags;
guid_t section_type;
guid_t fru_id;
- __u32 section_severity;
- __u8 fru_text[20];
+ u32 section_severity;
+ u8 fru_text[20];
};
-/* Generic Processor Error Section */
+/* Generic Processor Error Section, UEFI v2.7 sec N.2.4.1 */
struct cper_sec_proc_generic {
- __u64 validation_bits;
- __u8 proc_type;
- __u8 proc_isa;
- __u8 proc_error_type;
- __u8 operation;
- __u8 flags;
- __u8 level;
- __u16 reserved;
- __u64 cpu_version;
+ u64 validation_bits;
+ u8 proc_type;
+ u8 proc_isa;
+ u8 proc_error_type;
+ u8 operation;
+ u8 flags;
+ u8 level;
+ u16 reserved;
+ u64 cpu_version;
char cpu_brand[128];
- __u64 proc_id;
- __u64 target_addr;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 ip;
+ u64 proc_id;
+ u64 target_addr;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 ip;
};
-/* IA32/X64 Processor Error Section */
+/* IA32/X64 Processor Error Section, UEFI v2.7 sec N.2.4.2 */
struct cper_sec_proc_ia {
- __u64 validation_bits;
- __u64 lapic_id;
- __u8 cpuid[48];
+ u64 validation_bits;
+ u64 lapic_id;
+ u8 cpuid[48];
};
-/* IA32/X64 Processor Error Information Structure */
+/* IA32/X64 Processor Error Information Structure, UEFI v2.7 sec N.2.4.2.1 */
struct cper_ia_err_info {
guid_t err_type;
- __u64 validation_bits;
- __u64 check_info;
- __u64 target_id;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 ip;
+ u64 validation_bits;
+ u64 check_info;
+ u64 target_id;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 ip;
};
-/* IA32/X64 Processor Context Information Structure */
+/* IA32/X64 Processor Context Information Structure, UEFI v2.7 sec N.2.4.2.2 */
struct cper_ia_proc_ctx {
- __u16 reg_ctx_type;
- __u16 reg_arr_size;
- __u32 msr_addr;
- __u64 mm_reg_addr;
+ u16 reg_ctx_type;
+ u16 reg_arr_size;
+ u32 msr_addr;
+ u64 mm_reg_addr;
};
-/* ARM Processor Error Section */
+/* ARM Processor Error Section, UEFI v2.7 sec N.2.4.4 */
struct cper_sec_proc_arm {
- __u32 validation_bits;
- __u16 err_info_num; /* Number of Processor Error Info */
- __u16 context_info_num; /* Number of Processor Context Info Records*/
- __u32 section_length;
- __u8 affinity_level;
- __u8 reserved[3]; /* must be zero */
- __u64 mpidr;
- __u64 midr;
- __u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */
- __u32 psci_state;
+ u32 validation_bits;
+ u16 err_info_num; /* Number of Processor Error Info */
+ u16 context_info_num; /* Number of Processor Context Info Records*/
+ u32 section_length;
+ u8 affinity_level;
+ u8 reserved[3]; /* must be zero */
+ u64 mpidr;
+ u64 midr;
+ u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */
+ u32 psci_state;
};
-/* ARM Processor Error Information Structure */
+/* ARM Processor Error Information Structure, UEFI v2.7 sec N.2.4.4.1 */
struct cper_arm_err_info {
- __u8 version;
- __u8 length;
- __u16 validation_bits;
- __u8 type;
- __u16 multiple_error;
- __u8 flags;
- __u64 error_info;
- __u64 virt_fault_addr;
- __u64 physical_fault_addr;
+ u8 version;
+ u8 length;
+ u16 validation_bits;
+ u8 type;
+ u16 multiple_error;
+ u8 flags;
+ u64 error_info;
+ u64 virt_fault_addr;
+ u64 physical_fault_addr;
};
-/* ARM Processor Context Information Structure */
+/* ARM Processor Context Information Structure, UEFI v2.7 sec N.2.4.4.2 */
struct cper_arm_ctx_info {
- __u16 version;
- __u16 type;
- __u32 size;
+ u16 version;
+ u16 type;
+ u32 size;
};
-/* Old Memory Error Section UEFI 2.1, 2.2 */
+/* Old Memory Error Section, UEFI v2.1, v2.2 */
struct cper_sec_mem_err_old {
- __u64 validation_bits;
- __u64 error_status;
- __u64 physical_addr;
- __u64 physical_addr_mask;
- __u16 node;
- __u16 card;
- __u16 module;
- __u16 bank;
- __u16 device;
- __u16 row;
- __u16 column;
- __u16 bit_pos;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 target_id;
- __u8 error_type;
+ u64 validation_bits;
+ u64 error_status;
+ u64 physical_addr;
+ u64 physical_addr_mask;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_pos;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u8 error_type;
};
-/* Memory Error Section UEFI >= 2.3 */
+/* Memory Error Section (UEFI >= v2.3), UEFI v2.7 sec N.2.5 */
struct cper_sec_mem_err {
- __u64 validation_bits;
- __u64 error_status;
- __u64 physical_addr;
- __u64 physical_addr_mask;
- __u16 node;
- __u16 card;
- __u16 module;
- __u16 bank;
- __u16 device;
- __u16 row;
- __u16 column;
- __u16 bit_pos;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 target_id;
- __u8 error_type;
- __u8 reserved;
- __u16 rank;
- __u16 mem_array_handle; /* card handle in UEFI 2.4 */
- __u16 mem_dev_handle; /* module handle in UEFI 2.4 */
+ u64 validation_bits;
+ u64 error_status;
+ u64 physical_addr;
+ u64 physical_addr_mask;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_pos;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u8 error_type;
+ u8 reserved;
+ u16 rank;
+ u16 mem_array_handle; /* "card handle" in UEFI 2.4 */
+ u16 mem_dev_handle; /* "module handle" in UEFI 2.4 */
};
struct cper_mem_err_compact {
- __u64 validation_bits;
- __u16 node;
- __u16 card;
- __u16 module;
- __u16 bank;
- __u16 device;
- __u16 row;
- __u16 column;
- __u16 bit_pos;
- __u64 requestor_id;
- __u64 responder_id;
- __u64 target_id;
- __u16 rank;
- __u16 mem_array_handle;
- __u16 mem_dev_handle;
+ u64 validation_bits;
+ u16 node;
+ u16 card;
+ u16 module;
+ u16 bank;
+ u16 device;
+ u16 row;
+ u16 column;
+ u16 bit_pos;
+ u64 requestor_id;
+ u64 responder_id;
+ u64 target_id;
+ u16 rank;
+ u16 mem_array_handle;
+ u16 mem_dev_handle;
};
+/* PCI Express Error Section, UEFI v2.7 sec N.2.7 */
struct cper_sec_pcie {
- __u64 validation_bits;
- __u32 port_type;
+ u64 validation_bits;
+ u32 port_type;
struct {
- __u8 minor;
- __u8 major;
- __u8 reserved[2];
+ u8 minor;
+ u8 major;
+ u8 reserved[2];
} version;
- __u16 command;
- __u16 status;
- __u32 reserved;
+ u16 command;
+ u16 status;
+ u32 reserved;
struct {
- __u16 vendor_id;
- __u16 device_id;
- __u8 class_code[3];
- __u8 function;
- __u8 device;
- __u16 segment;
- __u8 bus;
- __u8 secondary_bus;
- __u16 slot;
- __u8 reserved;
+ u16 vendor_id;
+ u16 device_id;
+ u8 class_code[3];
+ u8 function;
+ u8 device;
+ u16 segment;
+ u8 bus;
+ u8 secondary_bus;
+ u16 slot;
+ u8 reserved;
} device_id;
struct {
- __u32 lower;
- __u32 upper;
+ u32 lower;
+ u32 upper;
} serial_number;
struct {
- __u16 secondary_status;
- __u16 control;
+ u16 secondary_status;
+ u16 control;
} bridge;
- __u8 capability[60];
- __u8 aer_info[96];
+ u8 capability[60];
+ u8 aer_info[96];
};
/* Reset to default packing */
#pragma pack()
-extern const char * const cper_proc_error_type_strs[4];
+extern const char *const cper_proc_error_type_strs[4];
u64 cper_next_record_id(void);
const char *cper_severity_str(unsigned int);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 5041357d0297..3813fe45effd 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -57,6 +57,8 @@ extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_mds(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -137,9 +139,26 @@ static inline int disable_nonboot_cpus(void)
return freeze_secondary_cpus(0);
}
extern void enable_nonboot_cpus(void);
+
+static inline int suspend_disable_secondary_cpus(void)
+{
+ int cpu = 0;
+
+ if (IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU))
+ cpu = -1;
+
+ return freeze_secondary_cpus(cpu);
+}
+static inline void suspend_enable_secondary_cpus(void)
+{
+ return enable_nonboot_cpus();
+}
+
#else /* !CONFIG_PM_SLEEP_SMP */
static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
+static inline int suspend_disable_secondary_cpus(void) { return 0; }
+static inline void suspend_enable_secondary_cpus(void) { }
#endif /* !CONFIG_PM_SLEEP_SMP */
void cpu_startup_entry(enum cpuhp_state state);
@@ -175,6 +194,7 @@ enum cpuhp_smt_control {
CPU_SMT_DISABLED,
CPU_SMT_FORCE_DISABLED,
CPU_SMT_NOT_SUPPORTED,
+ CPU_SMT_NOT_IMPLEMENTED,
};
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
@@ -182,9 +202,33 @@ extern enum cpuhp_smt_control cpu_smt_control;
extern void cpu_smt_disable(bool force);
extern void cpu_smt_check_topology(void);
#else
-# define cpu_smt_control (CPU_SMT_ENABLED)
+# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
static inline void cpu_smt_disable(bool force) { }
static inline void cpu_smt_check_topology(void) { }
#endif
+/*
+ * These are used for a global "mitigations=" cmdline option for toggling
+ * optional CPU mitigations.
+ */
+enum cpu_mitigations {
+ CPU_MITIGATIONS_OFF,
+ CPU_MITIGATIONS_AUTO,
+ CPU_MITIGATIONS_AUTO_NOSMT,
+};
+
+extern enum cpu_mitigations cpu_mitigations;
+
+/* mitigations=off */
+static inline bool cpu_mitigations_off(void)
+{
+ return cpu_mitigations == CPU_MITIGATIONS_OFF;
+}
+
+/* mitigations=auto,nosmt */
+static inline bool cpu_mitigations_auto_nosmt(void)
+{
+ return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
+}
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index b160e98076e3..684caf067003 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -178,6 +178,11 @@ static inline struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { }
#endif
+static inline bool policy_is_inactive(struct cpufreq_policy *policy)
+{
+ return cpumask_empty(policy->cpus);
+}
+
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
@@ -193,8 +198,14 @@ unsigned int cpufreq_quick_get_max(unsigned int cpu);
void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
+
+struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
+void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+int cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy);
void cpufreq_update_policy(unsigned int cpu);
+void cpufreq_update_limits(unsigned int cpu);
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
@@ -322,6 +333,9 @@ struct cpufreq_driver {
/* should be defined, if possible */
unsigned int (*get)(unsigned int cpu);
+ /* Called to update policy limits on firmware notifications. */
+ void (*update_limits)(unsigned int cpu);
+
/* optional */
int (*bios_limit)(int cpu, unsigned int *limit);
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index e78281d07b70..6a381594608c 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -147,6 +147,7 @@ enum cpuhp_state {
CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
+ CPUHP_AP_X86_INTEL_EPB_ONLINE,
CPUHP_AP_PERF_ONLINE,
CPUHP_AP_PERF_X86_ONLINE,
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
@@ -170,6 +171,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 3b39472324a3..bb9a0db89f1a 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -83,6 +83,7 @@ struct cpuidle_device {
unsigned int use_deepest_state:1;
unsigned int poll_time_limit:1;
unsigned int cpu;
+ ktime_t next_hrtimer;
int last_residency;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
diff --git a/include/linux/cred.h b/include/linux/cred.h
index ddd45bb74887..efb6edf32de7 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -138,7 +138,7 @@ struct cred {
#ifdef CONFIG_KEYS
unsigned char jit_keyring; /* default keyring to attach requested
* keys to */
- struct key __rcu *session_keyring; /* keyring inherited over fork */
+ struct key *session_keyring; /* keyring inherited over fork */
struct key *process_keyring; /* keyring private to this process */
struct key *thread_keyring; /* keyring private to this thread */
struct key *request_key_auth; /* assumed request_key authority */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 60996e64c579..f14e587c5d5d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -176,7 +176,6 @@ struct dentry_operations {
* typically using d_splice_alias. */
#define DCACHE_REFERENCED 0x00000040 /* Recently used, don't discard. */
-#define DCACHE_RCUACCESS 0x00000080 /* Entry has ever been RCU-visible */
#define DCACHE_CANT_MOUNT 0x00000100
#define DCACHE_GENOCIDE 0x00000200
@@ -212,11 +211,12 @@ struct dentry_operations {
#define DCACHE_MAY_FREE 0x00800000
#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */
-#define DCACHE_ENCRYPTED_WITH_KEY 0x02000000 /* dir is encrypted with a valid key */
+#define DCACHE_ENCRYPTED_NAME 0x02000000 /* Encrypted name (dir key was unavailable) */
#define DCACHE_OP_REAL 0x04000000
#define DCACHE_PAR_LOOKUP 0x10000000 /* being looked up (with parent locked shared) */
#define DCACHE_DENTRY_CURSOR 0x20000000
+#define DCACHE_NORCU 0x40000000 /* No RCU delay for freeing */
extern seqlock_t rename_lock;
@@ -235,7 +235,6 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op
/* allocate/de-allocate */
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_anon(struct super_block *);
-extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
wait_queue_head_t *);
extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
@@ -594,7 +593,7 @@ static inline struct inode *d_real_inode(const struct dentry *dentry)
}
struct name_snapshot {
- const unsigned char *name;
+ struct qstr name;
unsigned char inline_name[DNAME_INLINE_LEN];
};
void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
diff --git a/include/linux/device.h b/include/linux/device.h
index b425a7ee04ce..e85264fb6616 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -49,8 +49,6 @@ struct bus_attribute {
ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
};
-#define BUS_ATTR(_name, _mode, _show, _store) \
- struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
#define BUS_ATTR_RW(_name) \
struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
#define BUS_ATTR_RO(_name) \
@@ -978,18 +976,14 @@ struct dev_links_info {
* a higher-level representation of the device.
*/
struct device {
+ struct kobject kobj;
struct device *parent;
struct device_private *p;
- struct kobject kobj;
const char *init_name; /* initial name of the device */
const struct device_type *type;
- struct mutex mutex; /* mutex to synchronize calls to
- * its driver.
- */
-
struct bus_type *bus; /* type of bus device is on */
struct device_driver *driver; /* which driver has allocated this
device */
@@ -997,6 +991,10 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set_drvdata/dev_get_drvdata */
+ struct mutex mutex; /* mutex to synchronize calls to
+ * its driver.
+ */
+
struct dev_links_info links;
struct dev_pm_info power;
struct dev_pm_domain *pm_domain;
@@ -1011,9 +1009,6 @@ struct device {
struct list_head msi_list;
#endif
-#ifdef CONFIG_NUMA
- int numa_node; /* NUMA node this device is close to */
-#endif
const struct dma_map_ops *dma_ops;
u64 *dma_mask; /* dma mask (if dma'able device) */
u64 coherent_dma_mask;/* Like dma_mask, but for
@@ -1042,6 +1037,9 @@ struct device {
struct device_node *of_node; /* associated device tree node */
struct fwnode_handle *fwnode; /* firmware device node */
+#ifdef CONFIG_NUMA
+ int numa_node; /* NUMA node this device is close to */
+#endif
dev_t devt; /* dev_t, creates the sysfs "dev" */
u32 id; /* device instance */
@@ -1231,7 +1229,7 @@ static inline void device_lock_assert(struct device *dev)
static inline struct device_node *dev_of_node(struct device *dev)
{
- if (!IS_ENABLED(CONFIG_OF))
+ if (!IS_ENABLED(CONFIG_OF) || !dev)
return NULL;
return dev->of_node;
}
diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h
new file mode 100644
index 000000000000..934a442db8ac
--- /dev/null
+++ b/include/linux/dma-fence-chain.h
@@ -0,0 +1,81 @@
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __LINUX_DMA_FENCE_CHAIN_H
+#define __LINUX_DMA_FENCE_CHAIN_H
+
+#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
+
+/**
+ * struct dma_fence_chain - fence to represent an node of a fence chain
+ * @base: fence base class
+ * @lock: spinlock for fence handling
+ * @prev: previous fence of the chain
+ * @prev_seqno: original previous seqno before garbage collection
+ * @fence: encapsulated fence
+ * @cb: callback structure for signaling
+ * @work: irq work item for signaling
+ */
+struct dma_fence_chain {
+ struct dma_fence base;
+ spinlock_t lock;
+ struct dma_fence __rcu *prev;
+ u64 prev_seqno;
+ struct dma_fence *fence;
+ struct dma_fence_cb cb;
+ struct irq_work work;
+};
+
+extern const struct dma_fence_ops dma_fence_chain_ops;
+
+/**
+ * to_dma_fence_chain - cast a fence to a dma_fence_chain
+ * @fence: fence to cast to a dma_fence_array
+ *
+ * Returns NULL if the fence is not a dma_fence_chain,
+ * or the dma_fence_chain otherwise.
+ */
+static inline struct dma_fence_chain *
+to_dma_fence_chain(struct dma_fence *fence)
+{
+ if (!fence || fence->ops != &dma_fence_chain_ops)
+ return NULL;
+
+ return container_of(fence, struct dma_fence_chain, base);
+}
+
+/**
+ * dma_fence_chain_for_each - iterate over all fences in chain
+ * @iter: current fence
+ * @head: starting point
+ *
+ * Iterate over all fences in the chain. We keep a reference to the current
+ * fence while inside the loop which must be dropped when breaking out.
+ */
+#define dma_fence_chain_for_each(iter, head) \
+ for (iter = dma_fence_get(head); iter; \
+ iter = dma_fence_chain_walk(iter))
+
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence);
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno);
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+ struct dma_fence *prev,
+ struct dma_fence *fence,
+ uint64_t seqno);
+
+#endif /* __LINUX_DMA_FENCE_CHAIN_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 6b788467b2e3..974717d6ac0c 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -112,6 +112,14 @@ struct dma_fence_cb {
*/
struct dma_fence_ops {
/**
+ * @use_64bit_seqno:
+ *
+ * True if this dma_fence implementation uses 64bit seqno, false
+ * otherwise.
+ */
+ bool use_64bit_seqno;
+
+ /**
* @get_driver_name:
*
* Returns the driver name. This is a callback to allow drivers to
@@ -410,18 +418,19 @@ dma_fence_is_signaled(struct dma_fence *fence)
* __dma_fence_is_later - return if f1 is chronologically later than f2
* @f1: the first fence's seqno
* @f2: the second fence's seqno from the same context
+ * @ops: dma_fence_ops associated with the seqno
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not common across contexts.
*/
-static inline bool __dma_fence_is_later(u64 f1, u64 f2)
+static inline bool __dma_fence_is_later(u64 f1, u64 f2,
+ const struct dma_fence_ops *ops)
{
/* This is for backward compatibility with drivers which can only handle
- * 32bit sequence numbers. Use a 64bit compare when any of the higher
- * bits are none zero, otherwise use a 32bit compare with wrap around
- * handling.
+ * 32bit sequence numbers. Use a 64bit compare when the driver says to
+ * do so.
*/
- if (upper_32_bits(f1) || upper_32_bits(f2))
+ if (ops->use_64bit_seqno)
return f1 > f2;
return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
@@ -441,7 +450,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
if (WARN_ON(f1->context != f2->context))
return false;
- return __dma_fence_is_later(f1->seqno, f2->seqno);
+ return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
}
/**
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 75e60be91e5f..6309a721394b 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -267,9 +267,9 @@ size_t dma_direct_max_mapping_size(struct device *dev);
static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
{
- if (dev && dev->dma_ops)
+ if (dev->dma_ops)
return dev->dma_ops;
- return get_arch_dma_ops(dev ? dev->bus : NULL);
+ return get_arch_dma_ops(dev->bus);
}
static inline void set_dma_ops(struct device *dev,
@@ -650,7 +650,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
static inline u64 dma_get_mask(struct device *dev)
{
- if (dev && dev->dma_mask && *dev->dma_mask)
+ if (dev->dma_mask && *dev->dma_mask)
return *dev->dma_mask;
return DMA_BIT_MASK(32);
}
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 69b36ed31a99..9741767e400f 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -72,6 +72,12 @@ static inline void arch_sync_dma_for_cpu_all(struct device *dev)
}
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+#ifdef CONFIG_ARCH_HAS_DMA_PREP_COHERENT
void arch_dma_prep_coherent(struct page *page, size_t size);
+#else
+static inline void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+}
+#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/dma/idma64.h b/include/linux/dma/idma64.h
new file mode 100644
index 000000000000..621cfae60554
--- /dev/null
+++ b/include/linux/dma/idma64.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for the Intel integrated DMA 64-bit
+ *
+ * Copyright (C) 2019 Intel Corporation
+ */
+
+#ifndef __LINUX_DMA_IDMA64_H__
+#define __LINUX_DMA_IDMA64_H__
+
+/* Platform driver name */
+#define LPSS_IDMA64_DRIVER_NAME "idma64"
+
+#endif /* __LINUX_DMA_IDMA64_H__ */
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index c46fdb36700b..8de8c4f15163 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -102,9 +102,7 @@ const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list);
extern const char * dmi_get_system_info(int field);
extern const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from);
-extern void dmi_scan_machine(void);
-extern void dmi_memdev_walk(void);
-extern void dmi_set_dump_stack_arch_desc(void);
+extern void dmi_setup(void);
extern bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp);
extern int dmi_get_bios_year(void);
extern int dmi_name_in_vendors(const char *str);
@@ -122,9 +120,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0;
static inline const char * dmi_get_system_info(int field) { return NULL; }
static inline const struct dmi_device * dmi_find_device(int type, const char *name,
const struct dmi_device *from) { return NULL; }
-static inline void dmi_scan_machine(void) { return; }
-static inline void dmi_memdev_walk(void) { }
-static inline void dmi_set_dump_stack_arch_desc(void) { }
+static inline void dmi_setup(void) { }
static inline bool dmi_get_date(int field, int *yearp, int *monthp, int *dayp)
{
if (yearp)
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
new file mode 100644
index 000000000000..3911e0586478
--- /dev/null
+++ b/include/linux/dsa/8021q.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+#ifndef _NET_DSA_8021Q_H
+#define _NET_DSA_8021Q_H
+
+#include <linux/types.h>
+
+struct dsa_switch;
+struct sk_buff;
+struct net_device;
+struct packet_type;
+
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q)
+
+int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
+ bool enabled);
+
+struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
+ u16 tpid, u16 tci);
+
+struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev,
+ struct packet_type *pt, u16 *tpid, u16 *tci);
+
+u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port);
+
+u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port);
+
+int dsa_8021q_rx_switch_id(u16 vid);
+
+int dsa_8021q_rx_source_port(u16 vid);
+
+#else
+
+int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
+ bool enabled)
+{
+ return 0;
+}
+
+struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
+ u16 tpid, u16 tci)
+{
+ return NULL;
+}
+
+struct sk_buff *dsa_8021q_rcv(struct sk_buff *skb, struct net_device *netdev,
+ struct packet_type *pt, u16 *tpid, u16 *tci)
+{
+ return NULL;
+}
+
+u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port)
+{
+ return 0;
+}
+
+u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port)
+{
+ return 0;
+}
+
+int dsa_8021q_rx_switch_id(u16 vid)
+{
+ return 0;
+}
+
+int dsa_8021q_rx_source_port(u16 vid)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
+
+#endif /* _NET_DSA_8021Q_H */
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
new file mode 100644
index 000000000000..603a02e5a8cb
--- /dev/null
+++ b/include/linux/dsa/sja1105.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+
+/* Included by drivers/net/dsa/sja1105/sja1105.h and net/dsa/tag_sja1105.c */
+
+#ifndef _NET_DSA_SJA1105_H
+#define _NET_DSA_SJA1105_H
+
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <net/dsa.h>
+
+#define ETH_P_SJA1105 ETH_P_DSA_8021Q
+
+/* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */
+#define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull
+#define SJA1105_LINKLOCAL_FILTER_A_MASK 0xFFFFFF000000ull
+/* IEEE 1588 Annex F: Transport of PTP over Ethernet (01:1B:19:xx:xx:xx) */
+#define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull
+#define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull
+
+enum sja1105_frame_type {
+ SJA1105_FRAME_TYPE_NORMAL = 0,
+ SJA1105_FRAME_TYPE_LINK_LOCAL,
+};
+
+struct sja1105_skb_cb {
+ enum sja1105_frame_type type;
+};
+
+#define SJA1105_SKB_CB(skb) \
+ ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
+
+struct sja1105_port {
+ struct dsa_port *dp;
+ int mgmt_slot;
+};
+
+#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index c2be029b9b53..6c809440f319 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -71,6 +71,13 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
const struct net_device *dev,
const char *fmt, ...);
+struct ib_device;
+
+extern __printf(3, 4)
+void __dynamic_ibdev_dbg(struct _ddebug *descriptor,
+ const struct ib_device *ibdev,
+ const char *fmt, ...);
+
#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
static struct _ddebug __aligned(8) \
__attribute__((section("__verbose"))) name = { \
@@ -154,6 +161,10 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor,
_dynamic_func_call(fmt, __dynamic_netdev_dbg, \
dev, fmt, ##__VA_ARGS__)
+#define dynamic_ibdev_dbg(dev, fmt, ...) \
+ _dynamic_func_call(fmt, __dynamic_ibdev_dbg, \
+ dev, fmt, ##__VA_ARGS__)
+
#define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
groupsize, buf, len, ascii) \
_dynamic_func_call_no_desc(__builtin_constant_p(prefix_str) ? prefix_str : "hexdump", \
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 54357a258b35..6ebc2098cfe1 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1611,7 +1611,12 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
struct screen_info *si, efi_guid_t *proto,
unsigned long size);
-bool efi_runtime_disabled(void);
+#ifdef CONFIG_EFI
+extern bool efi_runtime_disabled(void);
+#else
+static inline bool efi_runtime_disabled(void) { return true; }
+#endif
+
extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
extern unsigned long efi_call_virt_save_flags(void);
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 2e9e2763bf47..6e8bc53740f0 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -31,6 +31,7 @@ struct elevator_mq_ops {
void (*exit_sched)(struct elevator_queue *);
int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+ void (*depth_updated)(struct blk_mq_hw_ctx *);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index e2f3b21cd72a..25c0d049336f 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -33,7 +33,7 @@ struct device;
int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
unsigned char *arch_get_platform_mac_address(void);
int nvmem_get_mac_address(struct device *dev, void *addrbuf);
-u32 eth_get_headlen(void *data, unsigned int max_len);
+u32 eth_get_headlen(const struct net_device *dev, void *data, unsigned int len);
__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
extern const struct header_ops eth_header_ops;
@@ -449,6 +449,18 @@ static inline void eth_addr_dec(u8 *addr)
}
/**
+ * eth_addr_inc() - Increment the given MAC address.
+ * @addr: Pointer to a six-byte array containing Ethernet address to increment.
+ */
+static inline void eth_addr_inc(u8 *addr)
+{
+ u64 u = ether_addr_to_u64(addr);
+
+ u++;
+ u64_to_ether_addr(u, addr);
+}
+
+/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index f5740423b002..65559900d4d7 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -164,6 +164,10 @@ struct f2fs_checkpoint {
unsigned char sit_nat_version_bitmap[1];
} __packed;
+#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */
+#define CP_MIN_CHKSUM_OFFSET \
+ (offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
+
/*
* For orphan inode management
*/
@@ -198,11 +202,12 @@ struct f2fs_extent {
get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
-#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
#define ADDRS_PER_PAGE(page, inode) \
- (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
+ (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
@@ -267,7 +272,7 @@ struct f2fs_inode {
} __packed;
struct direct_node {
- __le32 addr[ADDRS_PER_BLOCK]; /* array of data block address */
+ __le32 addr[DEF_ADDRS_PER_BLOCK]; /* array of data block address */
} __packed;
struct indirect_node {
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 6074aa064b54..7148bab96943 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -20,6 +20,7 @@
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
#include <net/sch_generic.h>
@@ -33,6 +34,8 @@ struct bpf_prog_aux;
struct xdp_rxq_info;
struct xdp_buff;
struct sock_reuseport;
+struct ctl_table;
+struct ctl_table_header;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -503,7 +506,6 @@ struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
jit_requested:1,/* archs need to JIT the prog */
- undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1, /* Do we need dst entry? */
@@ -733,24 +735,15 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
- fp->undo_set_mem = 1;
+ set_vm_flush_reset_perms(fp);
set_memory_ro((unsigned long)fp, fp->pages);
}
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
- if (fp->undo_set_mem)
- set_memory_rw((unsigned long)fp, fp->pages);
-}
-
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
+ set_vm_flush_reset_perms(hdr);
set_memory_ro((unsigned long)hdr, hdr->pages);
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
- set_memory_rw((unsigned long)hdr, hdr->pages);
+ set_memory_x((unsigned long)hdr, hdr->pages);
}
static inline struct bpf_binary_header *
@@ -788,7 +781,6 @@ void __bpf_prog_free(struct bpf_prog *fp);
static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
{
- bpf_prog_unlock_ro(fp);
__bpf_prog_free(fp);
}
@@ -1177,4 +1169,18 @@ struct bpf_sock_ops_kern {
*/
};
+struct bpf_sysctl_kern {
+ struct ctl_table_header *head;
+ struct ctl_table *table;
+ void *cur_val;
+ size_t cur_len;
+ void *new_val;
+ size_t new_len;
+ int new_updated;
+ int write;
+ loff_t *ppos;
+ /* Temporary "register" for indirect stores to ppos. */
+ u64 tmp_reg;
+};
+
#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index 5be5dab50b13..01684d935580 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -309,4 +309,23 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
#define INTEL_SIP_SMC_FUNCID_RSU_UPDATE 12
#define INTEL_SIP_SMC_RSU_UPDATE \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_RSU_UPDATE)
+
+/*
+ * Request INTEL_SIP_SMC_ECC_DBE
+ *
+ * Sync call used by service driver at EL1 to alert EL3 that a Double
+ * Bit ECC error has occurred.
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_ECC_DBE
+ * a1 SysManager Double Bit Error value
+ * a2-7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK
+ */
+#define INTEL_SIP_SMC_FUNCID_ECC_DBE 13
+#define INTEL_SIP_SMC_ECC_DBE \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_ECC_DBE)
+
#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8b42df09b04c..f7fdfe93e25d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -158,14 +158,20 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_OPENED ((__force fmode_t)0x80000)
#define FMODE_CREATED ((__force fmode_t)0x100000)
+/* File is stream-like */
+#define FMODE_STREAM ((__force fmode_t)0x200000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
/* File is capable of returning -EAGAIN if I/O will block */
-#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
+#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
+
+/* File represents mount that needs unmounting */
+#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000)
/* File does not contribute to nr_files count */
-#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
+#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
/*
* Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
@@ -691,7 +697,10 @@ struct inode {
#ifdef CONFIG_IMA
atomic_t i_readcount; /* struct files open RO */
#endif
- const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
+ union {
+ const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
+ void (*free_inode)(struct inode *);
+ };
struct file_lock_context *i_flctx;
struct address_space i_data;
struct list_head i_devices;
@@ -966,7 +975,6 @@ static inline struct file *get_file(struct file *f)
#define get_file_rcu_many(x, cnt) \
atomic_long_add_unless(&(x)->f_count, (cnt), 0)
#define get_file_rcu(x) get_file_rcu_many((x), 1)
-#define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
#define file_count(x) atomic_long_read(&(x)->f_count)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -1090,6 +1098,7 @@ struct file_lock {
struct {
struct list_head link; /* link in AFS vnode's pending_locks list */
int state; /* state of grant or error if -ve */
+ unsigned int debug_id;
} afs;
} fl_u;
} __randomize_layout;
@@ -1900,6 +1909,7 @@ extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
+ void (*free_inode)(struct inode *);
void (*dirty_inode) (struct inode *, int flags);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
@@ -1953,6 +1963,7 @@ struct super_operations {
#define S_DAX 0 /* Make all the DAX code disappear */
#endif
#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */
+#define S_CASEFOLD 32768 /* Casefolded file */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -1993,6 +2004,7 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED)
+#define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD)
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
@@ -2782,6 +2794,9 @@ extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
+extern int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
+ unsigned int flags);
+
/*
* Sync the bytes written if this was a synchronous write. Expect ki_pos
* to already be updated for the write, and will return either the amount
@@ -3074,6 +3089,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t);
extern loff_t no_seek_end_llseek(struct file *, loff_t, int);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
+extern int stream_open(struct inode * inode, struct file * filp);
#ifdef CONFIG_BLOCK
typedef void (dio_submit_t)(struct bio *bio, struct inode *inode,
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index eaca452088fa..1f966670c8dc 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -13,8 +13,10 @@
#define _LINUX_FS_CONTEXT_H
#include <linux/kernel.h>
+#include <linux/refcount.h>
#include <linux/errno.h>
#include <linux/security.h>
+#include <linux/mutex.h>
struct cred;
struct dentry;
@@ -35,6 +37,19 @@ enum fs_context_purpose {
};
/*
+ * Userspace usage phase for fsopen/fspick.
+ */
+enum fs_context_phase {
+ FS_CONTEXT_CREATE_PARAMS, /* Loading params for sb creation */
+ FS_CONTEXT_CREATING, /* A superblock is being created */
+ FS_CONTEXT_AWAITING_MOUNT, /* Superblock created, awaiting fsmount() */
+ FS_CONTEXT_AWAITING_RECONF, /* Awaiting initialisation for reconfiguration */
+ FS_CONTEXT_RECONF_PARAMS, /* Loading params for reconfiguration */
+ FS_CONTEXT_RECONFIGURING, /* Reconfiguring the superblock */
+ FS_CONTEXT_FAILED, /* Failed to correctly transition a context */
+};
+
+/*
* Type of parameter value.
*/
enum fs_value_type {
@@ -74,12 +89,14 @@ struct fs_parameter {
*/
struct fs_context {
const struct fs_context_operations *ops;
+ struct mutex uapi_mutex; /* Userspace access mutex */
struct file_system_type *fs_type;
void *fs_private; /* The filesystem's context */
struct dentry *root; /* The root and superblock */
struct user_namespace *user_ns; /* The user namespace for this mount */
struct net *net_ns; /* The network namespace for this mount */
const struct cred *cred; /* The mounter's credentials */
+ struct fc_log *log; /* Logging buffer */
const char *source; /* The source name (eg. dev path) */
const char *subtype; /* The subtype to set on the superblock */
void *security; /* Linux S&M options */
@@ -88,6 +105,7 @@ struct fs_context {
unsigned int sb_flags_mask; /* Superblock flags that were changed */
unsigned int lsm_flags; /* Information flags from the fs to the LSM */
enum fs_context_purpose purpose:8;
+ enum fs_context_phase phase:8; /* The phase the context is in */
bool need_free:1; /* Need to call ops->free() */
bool global:1; /* Goes into &init_user_ns */
};
@@ -135,15 +153,21 @@ extern int vfs_get_super(struct fs_context *fc,
extern const struct file_operations fscontext_fops;
-#ifdef CONFIG_PRINTK
+/*
+ * Mount error, warning and informational message logging. This structure is
+ * shareable between a mount and a subordinate mount.
+ */
+struct fc_log {
+ refcount_t usage;
+ u8 head; /* Insertion index in buffer[] */
+ u8 tail; /* Removal index in buffer[] */
+ u8 need_free; /* Mask of kfree'able items in buffer[] */
+ struct module *owner; /* Owner module for strings that don't then need freeing */
+ char *buffer[8];
+};
+
extern __attribute__((format(printf, 2, 3)))
void logfc(struct fs_context *fc, const char *fmt, ...);
-#else
-static inline __attribute__((format(printf, 2, 3)))
-void logfc(struct fs_context *fc, const char *fmt, ...)
-{
-}
-#endif
/**
* infof - Store supplementary informational message
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index e5194fc3983e..f7680ef1abd2 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -33,6 +33,7 @@ struct fscrypt_name {
u32 hash;
u32 minor_hash;
struct fscrypt_str crypto_buf;
+ bool is_ciphertext_name;
};
#define FSTR_INIT(n, l) { .name = n, .len = l }
@@ -79,7 +80,8 @@ struct fscrypt_ctx {
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
- return (inode->i_crypt_info != NULL);
+ /* pairs with cmpxchg_release() in fscrypt_get_encryption_info() */
+ return READ_ONCE(inode->i_crypt_info) != NULL;
}
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
@@ -88,9 +90,21 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
inode->i_sb->s_cop->dummy_context(inode);
}
+/*
+ * When d_splice_alias() moves a directory's encrypted alias to its decrypted
+ * alias as a result of the encryption key being added, DCACHE_ENCRYPTED_NAME
+ * must be cleared. Note that we don't have to support arbitrary moves of this
+ * flag because fscrypt doesn't allow encrypted aliases to be the source or
+ * target of a rename().
+ */
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
+{
+ dentry->d_flags &= ~DCACHE_ENCRYPTED_NAME;
+}
+
/* crypto.c */
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
-extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t);
+extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *,
unsigned int, unsigned int,
@@ -114,6 +128,7 @@ extern int fscrypt_inherit_context(struct inode *, struct inode *,
/* keyinfo.c */
extern int fscrypt_get_encryption_info(struct inode *);
extern void fscrypt_put_encryption_info(struct inode *);
+extern void fscrypt_free_inode(struct inode *);
/* fname.c */
extern int fscrypt_setup_filename(struct inode *, const struct qstr *,
@@ -214,13 +229,15 @@ extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
/* hooks.c */
extern int fscrypt_file_open(struct inode *inode, struct file *filp);
-extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
+extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
+ struct dentry *dentry);
extern int __fscrypt_prepare_rename(struct inode *old_dir,
struct dentry *old_dentry,
struct inode *new_dir,
struct dentry *new_dentry,
unsigned int flags);
-extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry);
+extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct fscrypt_name *fname);
extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link);
@@ -230,6 +247,11 @@ extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target,
extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr,
unsigned int max_size,
struct delayed_call *done);
+static inline void fscrypt_set_ops(struct super_block *sb,
+ const struct fscrypt_operations *s_cop)
+{
+ sb->s_cop = s_cop;
+}
#else /* !CONFIG_FS_ENCRYPTION */
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
@@ -242,13 +264,16 @@ static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
return false;
}
+static inline void fscrypt_handle_d_move(struct dentry *dentry)
+{
+}
+
/* crypto.c */
static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
-static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode,
- gfp_t gfp_flags)
+static inline struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -322,6 +347,10 @@ static inline void fscrypt_put_encryption_info(struct inode *inode)
return;
}
+static inline void fscrypt_free_inode(struct inode *inode)
+{
+}
+
/* fname.c */
static inline int fscrypt_setup_filename(struct inode *dir,
const struct qstr *iname,
@@ -330,7 +359,7 @@ static inline int fscrypt_setup_filename(struct inode *dir,
if (IS_ENCRYPTED(dir))
return -EOPNOTSUPP;
- memset(fname, 0, sizeof(struct fscrypt_name));
+ memset(fname, 0, sizeof(*fname));
fname->usr_fname = iname;
fname->disk_name.name = (unsigned char *)iname->name;
fname->disk_name.len = iname->len;
@@ -401,8 +430,8 @@ static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
return 0;
}
-static inline int __fscrypt_prepare_link(struct inode *inode,
- struct inode *dir)
+static inline int __fscrypt_prepare_link(struct inode *inode, struct inode *dir,
+ struct dentry *dentry)
{
return -EOPNOTSUPP;
}
@@ -417,7 +446,8 @@ static inline int __fscrypt_prepare_rename(struct inode *old_dir,
}
static inline int __fscrypt_prepare_lookup(struct inode *dir,
- struct dentry *dentry)
+ struct dentry *dentry,
+ struct fscrypt_name *fname)
{
return -EOPNOTSUPP;
}
@@ -446,6 +476,12 @@ static inline const char *fscrypt_get_symlink(struct inode *inode,
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline void fscrypt_set_ops(struct super_block *sb,
+ const struct fscrypt_operations *s_cop)
+{
+}
+
#endif /* !CONFIG_FS_ENCRYPTION */
/**
@@ -497,7 +533,7 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry,
struct dentry *dentry)
{
if (IS_ENCRYPTED(dir))
- return __fscrypt_prepare_link(d_inode(old_dentry), dir);
+ return __fscrypt_prepare_link(d_inode(old_dentry), dir, dentry);
return 0;
}
@@ -538,27 +574,32 @@ static inline int fscrypt_prepare_rename(struct inode *old_dir,
* fscrypt_prepare_lookup - prepare to lookup a name in a possibly-encrypted directory
* @dir: directory being searched
* @dentry: filename being looked up
- * @flags: lookup flags
+ * @fname: (output) the name to use to search the on-disk directory
*
- * Prepare for ->lookup() in a directory which may be encrypted. Lookups can be
- * done with or without the directory's encryption key; without the key,
+ * Prepare for ->lookup() in a directory which may be encrypted by determining
+ * the name that will actually be used to search the directory on-disk. Lookups
+ * can be done with or without the directory's encryption key; without the key,
* filenames are presented in encrypted form. Therefore, we'll try to set up
* the directory's encryption key, but even without it the lookup can continue.
*
- * To allow invalidating stale dentries if the directory's encryption key is
- * added later, we also install a custom ->d_revalidate() method and use the
- * DCACHE_ENCRYPTED_WITH_KEY flag to indicate whether a given dentry is a
- * plaintext name (flag set) or a ciphertext name (flag cleared).
+ * This also installs a custom ->d_revalidate() method which will invalidate the
+ * dentry if it was created without the key and the key is later added.
*
- * Return: 0 on success, -errno if a problem occurred while setting up the
- * encryption key
+ * Return: 0 on success; -ENOENT if key is unavailable but the filename isn't a
+ * correctly formed encoded ciphertext name, so a negative dentry should be
+ * created; or another -errno code.
*/
static inline int fscrypt_prepare_lookup(struct inode *dir,
struct dentry *dentry,
- unsigned int flags)
+ struct fscrypt_name *fname)
{
if (IS_ENCRYPTED(dir))
- return __fscrypt_prepare_lookup(dir, dentry);
+ return __fscrypt_prepare_lookup(dir, dentry, fname);
+
+ memset(fname, 0, sizeof(*fname));
+ fname->usr_fname = &dentry->d_name;
+ fname->disk_name.name = (unsigned char *)dentry->d_name.name;
+ fname->disk_name.len = dentry->d_name.len;
return 0;
}
diff --git a/include/linux/fsl/ftm.h b/include/linux/fsl/ftm.h
new file mode 100644
index 000000000000..d59011acf66c
--- /dev/null
+++ b/include/linux/fsl/ftm.h
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __FSL_FTM_H__
+#define __FSL_FTM_H__
+
+#define FTM_SC 0x0 /* Status And Control */
+#define FTM_CNT 0x4 /* Counter */
+#define FTM_MOD 0x8 /* Modulo */
+
+#define FTM_CNTIN 0x4C /* Counter Initial Value */
+#define FTM_STATUS 0x50 /* Capture And Compare Status */
+#define FTM_MODE 0x54 /* Features Mode Selection */
+#define FTM_SYNC 0x58 /* Synchronization */
+#define FTM_OUTINIT 0x5C /* Initial State For Channels Output */
+#define FTM_OUTMASK 0x60 /* Output Mask */
+#define FTM_COMBINE 0x64 /* Function For Linked Channels */
+#define FTM_DEADTIME 0x68 /* Deadtime Insertion Control */
+#define FTM_EXTTRIG 0x6C /* FTM External Trigger */
+#define FTM_POL 0x70 /* Channels Polarity */
+#define FTM_FMS 0x74 /* Fault Mode Status */
+#define FTM_FILTER 0x78 /* Input Capture Filter Control */
+#define FTM_FLTCTRL 0x7C /* Fault Control */
+#define FTM_QDCTRL 0x80 /* Quadrature Decoder Control And Status */
+#define FTM_CONF 0x84 /* Configuration */
+#define FTM_FLTPOL 0x88 /* FTM Fault Input Polarity */
+#define FTM_SYNCONF 0x8C /* Synchronization Configuration */
+#define FTM_INVCTRL 0x90 /* FTM Inverting Control */
+#define FTM_SWOCTRL 0x94 /* FTM Software Output Control */
+#define FTM_PWMLOAD 0x98 /* FTM PWM Load */
+
+#define FTM_SC_CLK_MASK_SHIFT 3
+#define FTM_SC_CLK_MASK (3 << FTM_SC_CLK_MASK_SHIFT)
+#define FTM_SC_TOF 0x80
+#define FTM_SC_TOIE 0x40
+#define FTM_SC_CPWMS 0x20
+#define FTM_SC_CLKS 0x18
+#define FTM_SC_PS_1 0x0
+#define FTM_SC_PS_2 0x1
+#define FTM_SC_PS_4 0x2
+#define FTM_SC_PS_8 0x3
+#define FTM_SC_PS_16 0x4
+#define FTM_SC_PS_32 0x5
+#define FTM_SC_PS_64 0x6
+#define FTM_SC_PS_128 0x7
+#define FTM_SC_PS_MASK 0x7
+
+#define FTM_MODE_FAULTIE 0x80
+#define FTM_MODE_FAULTM 0x60
+#define FTM_MODE_CAPTEST 0x10
+#define FTM_MODE_PWMSYNC 0x8
+#define FTM_MODE_WPDIS 0x4
+#define FTM_MODE_INIT 0x2
+#define FTM_MODE_FTMEN 0x1
+
+/* NXP Errata: The PHAFLTREN and PHBFLTREN bits are tide to zero internally
+ * and these bits cannot be set. Flextimer cannot use Filter in
+ * Quadrature Decoder Mode.
+ * https://community.nxp.com/thread/467648#comment-1010319
+ */
+#define FTM_QDCTRL_PHAFLTREN 0x80
+#define FTM_QDCTRL_PHBFLTREN 0x40
+#define FTM_QDCTRL_PHAPOL 0x20
+#define FTM_QDCTRL_PHBPOL 0x10
+#define FTM_QDCTRL_QUADMODE 0x8
+#define FTM_QDCTRL_QUADDIR 0x4
+#define FTM_QDCTRL_TOFDIR 0x2
+#define FTM_QDCTRL_QUADEN 0x1
+
+#define FTM_FMS_FAULTF 0x80
+#define FTM_FMS_WPEN 0x40
+#define FTM_FMS_FAULTIN 0x10
+#define FTM_FMS_FAULTF3 0x8
+#define FTM_FMS_FAULTF2 0x4
+#define FTM_FMS_FAULTF1 0x2
+#define FTM_FMS_FAULTF0 0x1
+
+#define FTM_CSC_BASE 0xC
+#define FTM_CSC_MSB 0x20
+#define FTM_CSC_MSA 0x10
+#define FTM_CSC_ELSB 0x8
+#define FTM_CSC_ELSA 0x4
+#define FTM_CSC(_channel) (FTM_CSC_BASE + ((_channel) * 8))
+
+#define FTM_CV_BASE 0x10
+#define FTM_CV(_channel) (FTM_CV_BASE + ((_channel) * 8))
+
+#define FTM_PS_MAX 7
+
+#endif
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 09587e2860b5..94972e8eb6d1 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -27,7 +27,7 @@ static inline int fsnotify_dirent(struct inode *dir, struct dentry *dentry,
__u32 mask)
{
return fsnotify(dir, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE,
- dentry->d_name.name, 0);
+ &dentry->d_name, 0);
}
/* Notify this dentry's parent about a child's events. */
@@ -102,7 +102,7 @@ static inline void fsnotify_link_count(struct inode *inode)
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
- const unsigned char *old_name,
+ const struct qstr *old_name,
int isdir, struct inode *target,
struct dentry *moved)
{
@@ -111,7 +111,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
__u32 old_dir_mask = FS_MOVED_FROM;
__u32 new_dir_mask = FS_MOVED_TO;
__u32 mask = FS_MOVE_SELF;
- const unsigned char *new_name = moved->d_name.name;
+ const struct qstr *new_name = &moved->d_name;
if (old_dir == new_dir)
old_dir_mask |= FS_DN_RENAME;
@@ -152,39 +152,6 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
}
/*
- * fsnotify_nameremove - a filename was removed from a directory
- *
- * This is mostly called under parent vfs inode lock so name and
- * dentry->d_parent should be stable. However there are some corner cases where
- * inode lock is not held. So to be on the safe side and be reselient to future
- * callers and out of tree users of d_delete(), we do not assume that d_parent
- * and d_name are stable and we use dget_parent() and
- * take_dentry_name_snapshot() to grab stable references.
- */
-static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
-{
- struct dentry *parent;
- struct name_snapshot name;
- __u32 mask = FS_DELETE;
-
- /* d_delete() of pseudo inode? (e.g. __ns_get_path() playing tricks) */
- if (IS_ROOT(dentry))
- return;
-
- if (isdir)
- mask |= FS_ISDIR;
-
- parent = dget_parent(dentry);
- take_dentry_name_snapshot(&name, dentry);
-
- fsnotify(d_inode(parent), mask, d_inode(dentry), FSNOTIFY_EVENT_INODE,
- name.name, 0);
-
- release_dentry_name_snapshot(&name);
- dput(parent);
-}
-
-/*
* fsnotify_inoderemove - an inode is going away
*/
static inline void fsnotify_inoderemove(struct inode *inode)
@@ -218,7 +185,7 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
fsnotify_link_count(inode);
audit_inode_child(dir, new_dentry, AUDIT_TYPE_CHILD_CREATE);
- fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0);
+ fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, &new_dentry->d_name, 0);
}
/*
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index dfc28fcb4de8..a9f9dcc1e515 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -117,7 +117,7 @@ struct fsnotify_ops {
int (*handle_event)(struct fsnotify_group *group,
struct inode *inode,
u32 mask, const void *data, int data_type,
- const unsigned char *file_name, u32 cookie,
+ const struct qstr *file_name, u32 cookie,
struct fsnotify_iter_info *iter_info);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
@@ -350,11 +350,12 @@ struct fsnotify_mark {
/* main fsnotify call to send events */
extern int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
- const unsigned char *name, u32 cookie);
+ const struct qstr *name, u32 cookie);
extern int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern void fsnotify_sb_delete(struct super_block *sb);
+extern void fsnotify_nameremove(struct dentry *dentry, int isdir);
extern u32 fsnotify_get_cookie(void);
static inline int fsnotify_inode_watches_children(struct inode *inode)
@@ -505,7 +506,7 @@ static inline void fsnotify_init_event(struct fsnotify_event *event,
#else
static inline int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
- const unsigned char *name, u32 cookie)
+ const struct qstr *name, u32 cookie)
{
return 0;
}
@@ -524,6 +525,9 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
static inline void fsnotify_sb_delete(struct super_block *sb)
{}
+static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
+{}
+
static inline void fsnotify_update_flags(struct dentry *dentry)
{}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 730876187344..20899919ead8 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -241,21 +241,11 @@ static inline void ftrace_free_mem(struct module *mod, void *start, void *end) {
#ifdef CONFIG_STACK_TRACER
-#define STACK_TRACE_ENTRIES 500
-
-struct stack_trace;
-
-extern unsigned stack_trace_index[];
-extern struct stack_trace stack_trace_max;
-extern unsigned long stack_trace_max_size;
-extern arch_spinlock_t stack_trace_max_lock;
-
extern int stack_tracer_enabled;
-void stack_trace_print(void);
-int
-stack_trace_sysctl(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos);
+
+int stack_trace_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
DECLARE_PER_CPU(int, disable_stack_tracer);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 06c0fd594097..8b5330dd5ac0 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -150,6 +150,13 @@ enum {
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
};
+enum {
+ /* Poll even if events_poll_msecs is unset */
+ DISK_EVENT_FLAG_POLL = 1 << 0,
+ /* Forward events to udev */
+ DISK_EVENT_FLAG_UEVENT = 1 << 1,
+};
+
struct disk_part_tbl {
struct rcu_head rcu_head;
int len;
@@ -184,8 +191,8 @@ struct gendisk {
char disk_name[DISK_NAME_LEN]; /* name of major driver */
char *(*devnode)(struct gendisk *gd, umode_t *mode);
- unsigned int events; /* supported events */
- unsigned int async_events; /* async events, subset of all */
+ unsigned short events; /* supported events */
+ unsigned short event_flags; /* flags related to event processing */
/* Array of pointers to partitions indexed by partno.
* Protected with matching bdev lock but stat and other
@@ -610,6 +617,7 @@ struct unixware_disklabel {
extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
extern void blk_free_devt(dev_t devt);
+extern void blk_invalidate_devt(dev_t devt);
extern dev_t blk_lookup_devt(const char *name, int partno);
extern char *disk_name (struct gendisk *hd, int partno, char *buf);
@@ -714,7 +722,7 @@ static inline void hd_free_part(struct hd_struct *part)
*/
static inline sector_t part_nr_sects_read(struct hd_struct *part)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
sector_t nr_sects;
unsigned seq;
do {
@@ -722,7 +730,7 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part)
nr_sects = part->nr_sects;
} while (read_seqcount_retry(&part->nr_sects_seq, seq));
return nr_sects;
-#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
sector_t nr_sects;
preempt_disable();
@@ -741,11 +749,11 @@ static inline sector_t part_nr_sects_read(struct hd_struct *part)
*/
static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
{
-#if BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_SMP)
+#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
write_seqcount_begin(&part->nr_sects_seq);
part->nr_sects = size;
write_seqcount_end(&part->nr_sects_seq);
-#elif BITS_PER_LONG==32 && defined(CONFIG_LBDAF) && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
preempt_disable();
part->nr_sects = size;
preempt_enable();
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 83f81ac53282..6cb82301d8e9 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -233,7 +233,6 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
{ \
handler \
.cmd = op_name, \
- .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy), \
},
#define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
@@ -290,7 +289,8 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
#ifdef GENL_MAGIC_FAMILY_HDRSZ
.hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
#endif
- .maxattr = ARRAY_SIZE(drbd_tla_nl_policy)-1,
+ .maxattr = ARRAY_SIZE(CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
+ .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy),
.ops = ZZZ_genl_ops,
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
.mcgrps = ZZZ_genl_mcgrps,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index fdab7de7490d..fb07b503dc45 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -585,12 +585,12 @@ static inline bool pm_suspended_storage(void)
}
#endif /* CONFIG_PM_SLEEP */
-#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
+#ifdef CONFIG_CONTIG_ALLOC
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask);
-extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
#endif
+void free_contig_range(unsigned long pfn, unsigned int nr_pages);
#ifdef CONFIG_CMA
/* CMA stuff */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 01497910f023..a1d273c96016 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -227,9 +227,10 @@ struct gpio_irq_chip {
* @reg_dat: data (in) register for generic GPIO
* @reg_set: output set register (out=high) for generic GPIO
* @reg_clr: output clear register (out=low) for generic GPIO
- * @reg_dir: direction setting register for generic GPIO
- * @bgpio_dir_inverted: indicates that the direction register is inverted
- * (gpiolib private state variable)
+ * @reg_dir_out: direction out setting register for generic GPIO
+ * @reg_dir_in: direction in setting register for generic GPIO
+ * @bgpio_dir_unreadable: indicates that the direction register(s) cannot
+ * be read and we need to rely on out internal state tracking.
* @bgpio_bits: number of register bits used for a generic GPIO i.e.
* <register width> * 8
* @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep
@@ -237,7 +238,8 @@ struct gpio_irq_chip {
* @bgpio_data: shadowed data register for generic GPIO to clear/set bits
* safely.
* @bgpio_dir: shadowed direction register for generic GPIO to clear/set
- * direction safely.
+ * direction safely. A "1" in this word means the line is set as
+ * output.
*
* A gpio_chip can help platforms abstract various sources of GPIOs so
* they can all be accessed through a common programing interface.
@@ -298,8 +300,9 @@ struct gpio_chip {
void __iomem *reg_dat;
void __iomem *reg_set;
void __iomem *reg_clr;
- void __iomem *reg_dir;
- bool bgpio_dir_inverted;
+ void __iomem *reg_dir_out;
+ void __iomem *reg_dir_in;
+ bool bgpio_dir_unreadable;
int bgpio_bits;
spinlock_t bgpio_lock;
unsigned long bgpio_data;
@@ -614,6 +617,9 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
enum gpiod_flags flags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
+void devprop_gpiochip_set_names(struct gpio_chip *chip,
+ const struct fwnode_handle *fwnode);
+
#else /* CONFIG_GPIOLIB */
static inline struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc)
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h
index 69673be10213..35f299d1f6a7 100644
--- a/include/linux/gpio/machine.h
+++ b/include/linux/gpio/machine.h
@@ -6,14 +6,16 @@
#include <linux/list.h>
enum gpio_lookup_flags {
- GPIO_ACTIVE_HIGH = (0 << 0),
- GPIO_ACTIVE_LOW = (1 << 0),
- GPIO_OPEN_DRAIN = (1 << 1),
- GPIO_OPEN_SOURCE = (1 << 2),
- GPIO_PERSISTENT = (0 << 3),
- GPIO_TRANSITORY = (1 << 3),
- GPIO_PULL_UP = (1 << 4),
- GPIO_PULL_DOWN = (1 << 5),
+ GPIO_ACTIVE_HIGH = (0 << 0),
+ GPIO_ACTIVE_LOW = (1 << 0),
+ GPIO_OPEN_DRAIN = (1 << 1),
+ GPIO_OPEN_SOURCE = (1 << 2),
+ GPIO_PERSISTENT = (0 << 3),
+ GPIO_TRANSITORY = (1 << 3),
+ GPIO_PULL_UP = (1 << 4),
+ GPIO_PULL_DOWN = (1 << 5),
+
+ GPIO_LOOKUP_FLAGS_DEFAULT = GPIO_ACTIVE_HIGH | GPIO_PERSISTENT,
};
/**
@@ -22,7 +24,7 @@ enum gpio_lookup_flags {
* @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
* @con_id: name of the GPIO from the device's point of view
* @idx: index of the GPIO in case several GPIOs share the same name
- * @flags: mask of GPIO_* values
+ * @flags: bitmask of gpio_lookup_flags GPIO_* values
*
* gpiod_lookup is a lookup table for associating GPIOs to specific devices and
* functions using platform data.
@@ -32,7 +34,7 @@ struct gpiod_lookup {
u16 chip_hwnum;
const char *con_id;
unsigned int idx;
- enum gpio_lookup_flags flags;
+ unsigned long flags;
};
struct gpiod_lookup_table {
@@ -46,7 +48,7 @@ struct gpiod_lookup_table {
* @chip_label: name of the chip the GPIO belongs to
* @chip_hwnum: hardware number (i.e. relative to the chip) of the GPIO
* @line_name: consumer name for the hogged line
- * @lflags: mask of GPIO lookup flags
+ * @lflags: bitmask of gpio_lookup_flags GPIO_* values
* @dflags: GPIO flags used to specify the direction and value
*/
struct gpiod_hog {
@@ -54,7 +56,7 @@ struct gpiod_hog {
const char *chip_label;
u16 chip_hwnum;
const char *line_name;
- enum gpio_lookup_flags lflags;
+ unsigned long lflags;
int dflags;
};
diff --git a/include/linux/hid.h b/include/linux/hid.h
index f9707d1dcb58..ae9da674b749 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -382,6 +382,7 @@ struct hid_item {
#define HID_GROUP_WACOM 0x0101
#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
#define HID_GROUP_STEAM 0x0103
+#define HID_GROUP_LOGITECH_27MHZ_DEVICE 0x0104
/*
* HID protocol status
@@ -417,6 +418,7 @@ struct hid_global {
struct hid_local {
unsigned usage[HID_MAX_USAGES]; /* usage array */
+ u8 usage_size[HID_MAX_USAGES]; /* usage size array */
unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
unsigned usage_index;
unsigned usage_minimum;
@@ -893,7 +895,7 @@ struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
-void __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
+int __hid_request(struct hid_device *hid, struct hid_report *rep, int reqtype);
u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device,
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index ad50b7b4f141..51ec27a84668 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -77,8 +77,34 @@
#include <linux/migrate.h>
#include <linux/memremap.h>
#include <linux/completion.h>
+#include <linux/mmu_notifier.h>
-struct hmm;
+
+/*
+ * struct hmm - HMM per mm struct
+ *
+ * @mm: mm struct this HMM struct is bound to
+ * @lock: lock protecting ranges list
+ * @ranges: list of range being snapshotted
+ * @mirrors: list of mirrors for this mm
+ * @mmu_notifier: mmu notifier to track updates to CPU page table
+ * @mirrors_sem: read/write semaphore protecting the mirrors list
+ * @wq: wait queue for user waiting on a range invalidation
+ * @notifiers: count of active mmu notifiers
+ * @dead: is the mm dead ?
+ */
+struct hmm {
+ struct mm_struct *mm;
+ struct kref kref;
+ struct mutex lock;
+ struct list_head ranges;
+ struct list_head mirrors;
+ struct mmu_notifier mmu_notifier;
+ struct rw_semaphore mirrors_sem;
+ wait_queue_head_t wq;
+ long notifiers;
+ bool dead;
+};
/*
* hmm_pfn_flag_e - HMM flag enums
@@ -131,6 +157,7 @@ enum hmm_pfn_value_e {
/*
* struct hmm_range - track invalidation lock on virtual address range
*
+ * @hmm: the core HMM structure this range is active against
* @vma: the vm area struct for the range
* @list: all range lock are on a list
* @start: range virtual start address (inclusive)
@@ -138,10 +165,13 @@ enum hmm_pfn_value_e {
* @pfns: array of pfns (big enough for the range)
* @flags: pfn flags to match device driver page table
* @values: pfn value for some special case (none, special, error, ...)
+ * @default_flags: default flags for the range (write, read, ... see hmm doc)
+ * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
* @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
* @valid: pfns array did not change since it has been fill by an HMM function
*/
struct hmm_range {
+ struct hmm *hmm;
struct vm_area_struct *vma;
struct list_head list;
unsigned long start;
@@ -149,41 +179,96 @@ struct hmm_range {
uint64_t *pfns;
const uint64_t *flags;
const uint64_t *values;
+ uint64_t default_flags;
+ uint64_t pfn_flags_mask;
+ uint8_t page_shift;
uint8_t pfn_shift;
bool valid;
};
/*
- * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn
- * @range: range use to decode HMM pfn value
- * @pfn: HMM pfn value to get corresponding struct page from
- * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise
+ * hmm_range_page_shift() - return the page shift for the range
+ * @range: range being queried
+ * Returns: page shift (page size = 1 << page shift) for the range
+ */
+static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
+{
+ return range->page_shift;
+}
+
+/*
+ * hmm_range_page_size() - return the page size for the range
+ * @range: range being queried
+ * Returns: page size for the range in bytes
+ */
+static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
+{
+ return 1UL << hmm_range_page_shift(range);
+}
+
+/*
+ * hmm_range_wait_until_valid() - wait for range to be valid
+ * @range: range affected by invalidation to wait on
+ * @timeout: time out for wait in ms (ie abort wait after that period of time)
+ * Returns: true if the range is valid, false otherwise.
+ */
+static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
+ unsigned long timeout)
+{
+ /* Check if mm is dead ? */
+ if (range->hmm == NULL || range->hmm->dead || range->hmm->mm == NULL) {
+ range->valid = false;
+ return false;
+ }
+ if (range->valid)
+ return true;
+ wait_event_timeout(range->hmm->wq, range->valid || range->hmm->dead,
+ msecs_to_jiffies(timeout));
+ /* Return current valid status just in case we get lucky */
+ return range->valid;
+}
+
+/*
+ * hmm_range_valid() - test if a range is valid or not
+ * @range: range
+ * Returns: true if the range is valid, false otherwise.
+ */
+static inline bool hmm_range_valid(struct hmm_range *range)
+{
+ return range->valid;
+}
+
+/*
+ * hmm_device_entry_to_page() - return struct page pointed to by a device entry
+ * @range: range use to decode device entry value
+ * @entry: device entry value to get corresponding struct page from
+ * Returns: struct page pointer if entry is a valid, NULL otherwise
*
- * If the HMM pfn is valid (ie valid flag set) then return the struct page
- * matching the pfn value stored in the HMM pfn. Otherwise return NULL.
+ * If the device entry is valid (ie valid flag set) then return the struct page
+ * matching the entry value. Otherwise return NULL.
*/
-static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
- uint64_t pfn)
+static inline struct page *hmm_device_entry_to_page(const struct hmm_range *range,
+ uint64_t entry)
{
- if (pfn == range->values[HMM_PFN_NONE])
+ if (entry == range->values[HMM_PFN_NONE])
return NULL;
- if (pfn == range->values[HMM_PFN_ERROR])
+ if (entry == range->values[HMM_PFN_ERROR])
return NULL;
- if (pfn == range->values[HMM_PFN_SPECIAL])
+ if (entry == range->values[HMM_PFN_SPECIAL])
return NULL;
- if (!(pfn & range->flags[HMM_PFN_VALID]))
+ if (!(entry & range->flags[HMM_PFN_VALID]))
return NULL;
- return pfn_to_page(pfn >> range->pfn_shift);
+ return pfn_to_page(entry >> range->pfn_shift);
}
/*
- * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn
- * @range: range use to decode HMM pfn value
- * @pfn: HMM pfn value to extract pfn from
- * Returns: pfn value if HMM pfn is valid, -1UL otherwise
+ * hmm_device_entry_to_pfn() - return pfn value store in a device entry
+ * @range: range use to decode device entry value
+ * @entry: device entry to extract pfn from
+ * Returns: pfn value if device entry is valid, -1UL otherwise
*/
-static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
- uint64_t pfn)
+static inline unsigned long
+hmm_device_entry_to_pfn(const struct hmm_range *range, uint64_t pfn)
{
if (pfn == range->values[HMM_PFN_NONE])
return -1UL;
@@ -197,31 +282,66 @@ static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
}
/*
- * hmm_pfn_from_page() - create a valid HMM pfn value from struct page
+ * hmm_device_entry_from_page() - create a valid device entry for a page
* @range: range use to encode HMM pfn value
- * @page: struct page pointer for which to create the HMM pfn
- * Returns: valid HMM pfn for the page
+ * @page: page for which to create the device entry
+ * Returns: valid device entry for the page
*/
-static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
- struct page *page)
+static inline uint64_t hmm_device_entry_from_page(const struct hmm_range *range,
+ struct page *page)
{
return (page_to_pfn(page) << range->pfn_shift) |
range->flags[HMM_PFN_VALID];
}
/*
- * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn
+ * hmm_device_entry_from_pfn() - create a valid device entry value from pfn
* @range: range use to encode HMM pfn value
- * @pfn: pfn value for which to create the HMM pfn
- * Returns: valid HMM pfn for the pfn
+ * @pfn: pfn value for which to create the device entry
+ * Returns: valid device entry for the pfn
*/
-static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
- unsigned long pfn)
+static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
+ unsigned long pfn)
{
return (pfn << range->pfn_shift) |
range->flags[HMM_PFN_VALID];
}
+/*
+ * Old API:
+ * hmm_pfn_to_page()
+ * hmm_pfn_to_pfn()
+ * hmm_pfn_from_page()
+ * hmm_pfn_from_pfn()
+ *
+ * This are the OLD API please use new API, it is here to avoid cross-tree
+ * merge painfullness ie we convert things to new API in stages.
+ */
+static inline struct page *hmm_pfn_to_page(const struct hmm_range *range,
+ uint64_t pfn)
+{
+ return hmm_device_entry_to_page(range, pfn);
+}
+
+static inline unsigned long hmm_pfn_to_pfn(const struct hmm_range *range,
+ uint64_t pfn)
+{
+ return hmm_device_entry_to_pfn(range, pfn);
+}
+
+static inline uint64_t hmm_pfn_from_page(const struct hmm_range *range,
+ struct page *page)
+{
+ return hmm_device_entry_from_page(range, page);
+}
+
+static inline uint64_t hmm_pfn_from_pfn(const struct hmm_range *range,
+ unsigned long pfn)
+{
+ return hmm_device_entry_from_pfn(range, pfn);
+}
+
+
#if IS_ENABLED(CONFIG_HMM_MIRROR)
/*
@@ -353,43 +473,113 @@ struct hmm_mirror {
int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
void hmm_mirror_unregister(struct hmm_mirror *mirror);
-
/*
- * To snapshot the CPU page table, call hmm_vma_get_pfns(), then take a device
- * driver lock that serializes device page table updates, then call
- * hmm_vma_range_done(), to check if the snapshot is still valid. The same
- * device driver page table update lock must also be used in the
- * hmm_mirror_ops.sync_cpu_device_pagetables() callback, so that CPU page
- * table invalidation serializes on it.
- *
- * YOU MUST CALL hmm_vma_range_done() ONCE AND ONLY ONCE EACH TIME YOU CALL
- * hmm_vma_get_pfns() WITHOUT ERROR !
- *
- * IF YOU DO NOT FOLLOW THE ABOVE RULE THE SNAPSHOT CONTENT MIGHT BE INVALID !
+ * hmm_mirror_mm_is_alive() - test if mm is still alive
+ * @mirror: the HMM mm mirror for which we want to lock the mmap_sem
+ * Returns: false if the mm is dead, true otherwise
+ *
+ * This is an optimization it will not accurately always return -EINVAL if the
+ * mm is dead ie there can be false negative (process is being kill but HMM is
+ * not yet inform of that). It is only intented to be use to optimize out case
+ * where driver is about to do something time consuming and it would be better
+ * to skip it if the mm is dead.
*/
-int hmm_vma_get_pfns(struct hmm_range *range);
-bool hmm_vma_range_done(struct hmm_range *range);
+static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
+{
+ struct mm_struct *mm;
+
+ if (!mirror || !mirror->hmm)
+ return false;
+ mm = READ_ONCE(mirror->hmm->mm);
+ if (mirror->hmm->dead || !mm)
+ return false;
+
+ return true;
+}
/*
- * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will
- * not migrate any device memory back to system memory. The HMM pfn array will
- * be updated with the fault result and current snapshot of the CPU page table
- * for the range.
- *
- * The mmap_sem must be taken in read mode before entering and it might be
- * dropped by the function if the block argument is false. In that case, the
- * function returns -EAGAIN.
- *
- * Return value does not reflect if the fault was successful for every single
- * address or not. Therefore, the caller must to inspect the HMM pfn array to
- * determine fault status for each address.
- *
- * Trying to fault inside an invalid vma will result in -EINVAL.
+ * Please see Documentation/vm/hmm.rst for how to use the range API.
+ */
+int hmm_range_register(struct hmm_range *range,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end,
+ unsigned page_shift);
+void hmm_range_unregister(struct hmm_range *range);
+long hmm_range_snapshot(struct hmm_range *range);
+long hmm_range_fault(struct hmm_range *range, bool block);
+long hmm_range_dma_map(struct hmm_range *range,
+ struct device *device,
+ dma_addr_t *daddrs,
+ bool block);
+long hmm_range_dma_unmap(struct hmm_range *range,
+ struct vm_area_struct *vma,
+ struct device *device,
+ dma_addr_t *daddrs,
+ bool dirty);
+
+/*
+ * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
*
- * See the function description in mm/hmm.c for further documentation.
+ * When waiting for mmu notifiers we need some kind of time out otherwise we
+ * could potentialy wait for ever, 1000ms ie 1s sounds like a long time to
+ * wait already.
*/
-int hmm_vma_fault(struct hmm_range *range, bool block);
+#define HMM_RANGE_DEFAULT_TIMEOUT 1000
+
+/* This is a temporary helper to avoid merge conflict between trees. */
+static inline bool hmm_vma_range_done(struct hmm_range *range)
+{
+ bool ret = hmm_range_valid(range);
+
+ hmm_range_unregister(range);
+ return ret;
+}
+
+/* This is a temporary helper to avoid merge conflict between trees. */
+static inline int hmm_vma_fault(struct hmm_range *range, bool block)
+{
+ long ret;
+
+ /*
+ * With the old API the driver must set each individual entries with
+ * the requested flags (valid, write, ...). So here we set the mask to
+ * keep intact the entries provided by the driver and zero out the
+ * default_flags.
+ */
+ range->default_flags = 0;
+ range->pfn_flags_mask = -1UL;
+
+ ret = hmm_range_register(range, range->vma->vm_mm,
+ range->start, range->end,
+ PAGE_SHIFT);
+ if (ret)
+ return (int)ret;
+
+ if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
+ /*
+ * The mmap_sem was taken by driver we release it here and
+ * returns -EAGAIN which correspond to mmap_sem have been
+ * drop in the old API.
+ */
+ up_read(&range->vma->vm_mm->mmap_sem);
+ return -EAGAIN;
+ }
+
+ ret = hmm_range_fault(range, block);
+ if (ret <= 0) {
+ if (ret == -EBUSY || !ret) {
+ /* Same as above drop mmap_sem to match old API. */
+ up_read(&range->vma->vm_mm->mmap_sem);
+ ret = -EBUSY;
+ } else if (ret == -EAGAIN)
+ ret = -EBUSY;
+ hmm_range_unregister(range);
+ return ret;
+ }
+ return 0;
+}
/* Below are for HMM internal use only! Not to be used by device driver! */
void hmm_mm_destroy(struct mm_struct *mm);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 381e872bfde0..7cd5c150c21d 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -47,10 +47,8 @@ extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
-vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, pfn_t pfn, bool write);
-vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ea35263eb76b..edf476c8cfb9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -123,9 +123,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
- struct vm_area_struct *vma,
- struct address_space *mapping,
+u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
pgoff_t idx, unsigned long address);
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -203,7 +201,6 @@ static inline void hugetlb_show_meminfo(void)
#define pud_huge(x) 0
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
src_addr, pagep) ({ BUG(); 0; })
#define huge_pte_offset(mm, address, sz) 0
@@ -234,6 +231,13 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
{
BUG();
}
+static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags)
+{
+ BUG();
+ return 0;
+}
#endif /* !CONFIG_HUGETLB_PAGE */
/*
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 99e0c1b0b5fb..2b949fa501e1 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -40,6 +40,11 @@ enum hwmon_chip_attributes {
hwmon_chip_register_tz,
hwmon_chip_update_interval,
hwmon_chip_alarms,
+ hwmon_chip_samples,
+ hwmon_chip_curr_samples,
+ hwmon_chip_in_samples,
+ hwmon_chip_power_samples,
+ hwmon_chip_temp_samples,
};
#define HWMON_C_TEMP_RESET_HISTORY BIT(hwmon_chip_temp_reset_history)
@@ -49,6 +54,11 @@ enum hwmon_chip_attributes {
#define HWMON_C_REGISTER_TZ BIT(hwmon_chip_register_tz)
#define HWMON_C_UPDATE_INTERVAL BIT(hwmon_chip_update_interval)
#define HWMON_C_ALARMS BIT(hwmon_chip_alarms)
+#define HWMON_C_SAMPLES BIT(hwmon_chip_samples)
+#define HWMON_C_CURR_SAMPLES BIT(hwmon_chip_curr_samples)
+#define HWMON_C_IN_SAMPLES BIT(hwmon_chip_in_samples)
+#define HWMON_C_POWER_SAMPLES BIT(hwmon_chip_power_samples)
+#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
enum hwmon_temp_attributes {
hwmon_temp_input = 0,
@@ -365,6 +375,14 @@ struct hwmon_channel_info {
const u32 *config;
};
+#define HWMON_CHANNEL_INFO(stype, ...) \
+ (&(struct hwmon_channel_info) { \
+ .type = hwmon_##stype, \
+ .config = (u32 []) { \
+ __VA_ARGS__, 0 \
+ } \
+ })
+
/**
* Chip configuration
* @ops: Pointer to hwmon operations.
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 64698ec8f2ac..8b9a93c99c9b 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -141,6 +141,11 @@ struct hv_ring_buffer_info {
u32 ring_datasize; /* < ring_size */
u32 priv_read_index;
+ /*
+ * The ring buffer mutex lock. This lock prevents the ring buffer from
+ * being freed while the ring buffer is being accessed.
+ */
+ struct mutex ring_buffer_mutex;
};
@@ -1206,7 +1211,7 @@ struct hv_ring_buffer_debug_info {
};
-int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
/* Vmbus interface */
diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h
index 69045df78e2d..7fd5575a368f 100644
--- a/include/linux/i2c-algo-bit.h
+++ b/include/linux/i2c-algo-bit.h
@@ -33,6 +33,7 @@ struct i2c_algo_bit_data {
minimum 5 us for standard-mode I2C and SMBus,
maximum 50 us for SMBus */
int timeout; /* in jiffies */
+ bool can_do_atomic; /* callbacks don't sleep, we can be atomic */
};
int i2c_bit_add_bus(struct i2c_adapter *);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 383510b4f083..be27062f8ed1 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -499,9 +499,13 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* @master_xfer: Issue a set of i2c transactions to the given I2C adapter
* defined by the msgs array, with num messages available to transfer via
* the adapter specified by adap.
+ * @master_xfer_atomic: same as @master_xfer. Yet, only using atomic context
+ * so e.g. PMICs can be accessed very late before shutdown. Optional.
* @smbus_xfer: Issue smbus transactions to the given I2C adapter. If this
* is not present, then the bus layer will try and convert the SMBus calls
* into I2C transfers instead.
+ * @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context
+ * so e.g. PMICs can be accessed very late before shutdown. Optional.
* @functionality: Return the flags that this algorithm/adapter pair supports
* from the I2C_FUNC_* flags.
* @reg_slave: Register given client to I2C slave mode of this adapter
@@ -512,25 +516,33 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common.
*
- * The return codes from the @master_xfer field should indicate the type of
- * error code that occurred during the transfer, as documented in the kernel
- * Documentation file Documentation/i2c/fault-codes.
+ * The return codes from the @master_xfer{_atomic} fields should indicate the
+ * type of error code that occurred during the transfer, as documented in the
+ * Kernel Documentation file Documentation/i2c/fault-codes.
*/
struct i2c_algorithm {
- /* If an adapter algorithm can't do I2C-level access, set master_xfer
- to NULL. If an adapter algorithm can do SMBus access, set
- smbus_xfer. If set to NULL, the SMBus protocol is simulated
- using common I2C messages */
- /* master_xfer should return the number of messages successfully
- processed, or a negative value on error */
+ /*
+ * If an adapter algorithm can't do I2C-level access, set master_xfer
+ * to NULL. If an adapter algorithm can do SMBus access, set
+ * smbus_xfer. If set to NULL, the SMBus protocol is simulated
+ * using common I2C messages.
+ *
+ * master_xfer should return the number of messages successfully
+ * processed, or a negative value on error
+ */
int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
int num);
- int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
- unsigned short flags, char read_write,
- u8 command, int size, union i2c_smbus_data *data);
+ int (*master_xfer_atomic)(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num);
+ int (*smbus_xfer)(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data);
+ int (*smbus_xfer_atomic)(struct i2c_adapter *adap, u16 addr,
+ unsigned short flags, char read_write,
+ u8 command, int size, union i2c_smbus_data *data);
/* To determine what the adapter supports */
- u32 (*functionality) (struct i2c_adapter *);
+ u32 (*functionality)(struct i2c_adapter *adap);
#if IS_ENABLED(CONFIG_I2C_SLAVE)
int (*reg_slave)(struct i2c_client *client);
@@ -682,7 +694,8 @@ struct i2c_adapter {
int retries;
struct device dev; /* the adapter device */
unsigned long locked_flags; /* owned by the I2C core */
-#define I2C_ALF_IS_SUSPENDED 0
+#define I2C_ALF_IS_SUSPENDED 0
+#define I2C_ALF_SUSPEND_REPORTED 1
int nr;
char name[48];
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 48703ec60d06..61f0a316c6ac 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1557,7 +1557,7 @@ struct ieee80211_vht_operation {
* struct ieee80211_he_cap_elem - HE capabilities element
*
* This structure is the "HE capabilities element" fixed fields as
- * described in P802.11ax_D3.0 section 9.4.2.237.2 and 9.4.2.237.3
+ * described in P802.11ax_D4.0 section 9.4.2.242.2 and 9.4.2.242.3
*/
struct ieee80211_he_cap_elem {
u8 mac_cap_info[6];
@@ -1619,12 +1619,12 @@ struct ieee80211_he_mcs_nss_supp {
* struct ieee80211_he_operation - HE capabilities element
*
* This structure is the "HE operation element" fields as
- * described in P802.11ax_D3.0 section 9.4.2.238
+ * described in P802.11ax_D4.0 section 9.4.2.243
*/
struct ieee80211_he_operation {
__le32 he_oper_params;
__le16 he_mcs_nss_set;
- /* Optional 0,1,3 or 4 bytes: depends on @he_oper_params */
+ /* Optional 0,1,3,4,5,7 or 8 bytes: depends on @he_oper_params */
u8 optional[0];
} __packed;
@@ -1632,7 +1632,7 @@ struct ieee80211_he_operation {
* struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
*
* This structure is the "MU AC Parameter Record" fields as
- * described in P802.11ax_D2.0 section 9.4.2.240
+ * described in P802.11ax_D4.0 section 9.4.2.245
*/
struct ieee80211_he_mu_edca_param_ac_rec {
u8 aifsn;
@@ -1644,7 +1644,7 @@ struct ieee80211_he_mu_edca_param_ac_rec {
* struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
*
* This structure is the "MU EDCA Parameter Set element" fields as
- * described in P802.11ax_D2.0 section 9.4.2.240
+ * described in P802.11ax_D4.0 section 9.4.2.245
*/
struct ieee80211_mu_edca_param_set {
u8 mu_qos_info;
@@ -2026,6 +2026,7 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000
#define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000
#define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000
+#define IEEE80211_HE_OPERATION_6GHZ_OP_INFO 0x00020000
#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000
#define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24
#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000
@@ -2056,6 +2057,8 @@ ieee80211_he_oper_size(const u8 *he_oper_ie)
oper_len += 3;
if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS)
oper_len++;
+ if (he_oper_params & IEEE80211_HE_OPERATION_6GHZ_OP_INFO)
+ oper_len += 4;
/* Add the first byte (extension ID) to the total length */
oper_len++;
@@ -2487,6 +2490,7 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_HE_MU_EDCA = 38,
WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52,
WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55,
+ WLAN_EID_EXT_NON_INHERITANCE = 56,
};
/* Action category code */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 627b788ba0ff..ef0819ced0fc 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -56,9 +56,6 @@ struct br_ip_list {
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
-typedef int br_should_route_hook_t(struct sk_buff *skb);
-extern br_should_route_hook_t __rcu *br_should_route_hook;
-
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 7e84351fa2c0..6e9fb1932dde 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -69,6 +69,7 @@ struct ad_sigma_delta {
bool irq_dis;
bool bus_locked;
+ bool keep_cs_asserted;
uint8_t comm;
diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h
index 7dfb10ee2669..f54a7bcdefe3 100644
--- a/include/linux/iio/driver.h
+++ b/include/linux/iio/driver.h
@@ -11,6 +11,7 @@
#ifndef _IIO_INKERN_H_
#define _IIO_INKERN_H_
+struct iio_dev;
struct iio_map;
/**
diff --git a/include/linux/iio/frequency/ad9523.h b/include/linux/iio/frequency/ad9523.h
index 12ce3ee427fd..621b93c0bcf9 100644
--- a/include/linux/iio/frequency/ad9523.h
+++ b/include/linux/iio/frequency/ad9523.h
@@ -129,8 +129,8 @@ enum cpole1_capacitor {
* @pll2_ndiv_b_cnt: PLL2 Feedback N-divider, B Counter, range 0..63.
* @pll2_freq_doubler_en: PLL2 frequency doubler enable.
* @pll2_r2_div: PLL2 R2 divider, range 0..31.
- * @pll2_vco_diff_m1: VCO1 divider, range 3..5.
- * @pll2_vco_diff_m2: VCO2 divider, range 3..5.
+ * @pll2_vco_div_m1: VCO1 divider, range 3..5.
+ * @pll2_vco_div_m2: VCO2 divider, range 3..5.
* @rpole2: PLL2 loop filter Rpole resistor value.
* @rzero: PLL2 loop filter Rzero resistor value.
* @cpole1: PLL2 loop filter Cpole capacitor value.
@@ -176,8 +176,8 @@ struct ad9523_platform_data {
unsigned char pll2_ndiv_b_cnt;
bool pll2_freq_doubler_en;
unsigned char pll2_r2_div;
- unsigned char pll2_vco_diff_m1; /* 3..5 */
- unsigned char pll2_vco_diff_m2; /* 3..5 */
+ unsigned char pll2_vco_div_m1; /* 3..5 */
+ unsigned char pll2_vco_div_m2; /* 3..5 */
/* Loop Filter PLL2 */
enum rpole2_resistor rpole2;
diff --git a/include/linux/iio/gyro/itg3200.h b/include/linux/iio/gyro/itg3200.h
index 2a820850f284..0a30fddccfb3 100644
--- a/include/linux/iio/gyro/itg3200.h
+++ b/include/linux/iio/gyro/itg3200.h
@@ -104,6 +104,7 @@
struct itg3200 {
struct i2c_client *i2c;
struct iio_trigger *trig;
+ struct iio_mount_matrix orientation;
};
enum ITG3200_SCAN_INDEX {
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index a74cb177dc6f..bb10c1bee301 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -130,8 +130,8 @@ struct iio_mount_matrix {
ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
const struct iio_chan_spec *chan, char *buf);
-int of_iio_read_mount_matrix(const struct device *dev, const char *propname,
- struct iio_mount_matrix *matrix);
+int iio_read_mount_matrix(struct device *dev, const char *propname,
+ struct iio_mount_matrix *matrix);
typedef const struct iio_mount_matrix *
(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index 360da7d18a3d..469a493f7ae0 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -21,6 +21,7 @@
#define ADIS_REG_PAGE_ID 0x00
struct adis;
+struct adis_burst;
/**
* struct adis_data - ADIS chip variant specific data
@@ -57,6 +58,7 @@ struct adis {
struct iio_trigger *trig;
const struct adis_data *data;
+ struct adis_burst *burst;
struct mutex txrx_lock;
struct spi_message msg;
@@ -232,6 +234,18 @@ int adis_single_conversion(struct iio_dev *indio_dev,
#ifdef CONFIG_IIO_ADIS_LIB_BUFFER
+/**
+ * struct adis_burst - ADIS data for burst transfers
+ * @en burst mode enabled
+ * @reg_cmd register command that triggers burst
+ * @extra_len extra length to account in the SPI RX buffer
+ */
+struct adis_burst {
+ bool en;
+ unsigned int reg_cmd;
+ unsigned int extra_len;
+};
+
int adis_setup_buffer_and_trigger(struct adis *adis,
struct iio_dev *indio_dev, irqreturn_t (*trigger_handler)(int, void *));
void adis_cleanup_buffer_and_trigger(struct adis *adis,
diff --git a/include/linux/iio/timer/stm32-timer-trigger.h b/include/linux/iio/timer/stm32-timer-trigger.h
index d68add80ab86..cbb7c7ae6c0c 100644
--- a/include/linux/iio/timer/stm32-timer-trigger.h
+++ b/include/linux/iio/timer/stm32-timer-trigger.h
@@ -73,6 +73,15 @@
#define TIM17_OC1 "tim17_oc1"
+#if IS_REACHABLE(CONFIG_IIO_STM32_TIMER_TRIGGER)
bool is_stm32_timer_trigger(struct iio_trigger *trig);
-
+#else
+static inline bool is_stm32_timer_trigger(struct iio_trigger *trig)
+{
+#if IS_ENABLED(CONFIG_IIO_STM32_TIMER_TRIGGER)
+ pr_warn_once("stm32-timer-trigger not linked in\n");
+#endif
+ return false;
+}
+#endif
#endif
diff --git a/include/linux/ima.h b/include/linux/ima.h
index dc12fbcf484c..fd9f7cf4cdf5 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -31,7 +31,7 @@ extern void ima_post_path_mknod(struct dentry *dentry);
extern void ima_add_kexec_buffer(struct kimage *image);
#endif
-#if defined(CONFIG_X86) && defined(CONFIG_EFI)
+#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390)
extern bool arch_ima_get_secureboot(void);
extern const char * const *arch_get_ima_policy(void);
#else
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index a64f21a97369..367dc2a0f84a 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -237,6 +237,20 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
return rtnl_dereference(dev->ip_ptr);
}
+/* called with rcu_read_lock or rtnl held */
+static inline bool ip_ignore_linkdown(const struct net_device *dev)
+{
+ struct in_device *in_dev;
+ bool rc = false;
+
+ in_dev = rcu_dereference_rtnl(dev->ip_ptr);
+ if (in_dev &&
+ IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
+ rc = true;
+
+ return rc;
+}
+
static inline struct neigh_parms *__in_dev_arp_parms_get_rcu(const struct net_device *dev)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index fa364de9db18..6925a18a5ca3 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -489,9 +489,11 @@ struct dmar_domain {
/* Domain ids per IOMMU. Use u16 since
* domain ids are 16 bit wide according
* to VT-d spec, section 9.3 */
+ unsigned int auxd_refcnt; /* Refcount of auxiliary attaching */
bool has_iotlb_device;
struct list_head devices; /* all devices' list */
+ struct list_head auxd; /* link to device's auxiliary list */
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
@@ -510,6 +512,11 @@ struct dmar_domain {
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
u64 max_addr; /* maximum mapped address */
+ int default_pasid; /*
+ * The default pasid used for non-SVM
+ * traffic on mediated devices.
+ */
+
struct iommu_domain domain; /* generic domain data structure for
iommu core */
};
@@ -559,6 +566,9 @@ struct device_domain_info {
struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */
struct list_head table; /* link to pasid table */
+ struct list_head auxiliary_domains; /* auxiliary domains
+ * attached to this device
+ */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
u16 pfsid; /* SRIOV physical function source ID */
@@ -568,6 +578,7 @@ struct device_domain_info {
u8 pri_enabled:1;
u8 ats_supported:1;
u8 ats_enabled:1;
+ u8 auxd_enabled:1; /* Multiple domains per device */
u8 ats_qdep;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
@@ -650,6 +661,7 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
int for_each_device_domain(int (*fn)(struct device_domain_info *info,
void *data), void *data);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
+int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_SVM
int intel_svm_init(struct intel_iommu *iommu);
@@ -679,7 +691,6 @@ struct intel_svm {
struct list_head list;
};
-extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
#endif
diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h
new file mode 100644
index 000000000000..16255c2ca2f4
--- /dev/null
+++ b/include/linux/intel-ish-client-if.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Intel ISH client Interface definitions
+ *
+ * Copyright (c) 2019, Intel Corporation.
+ */
+
+#ifndef _INTEL_ISH_CLIENT_IF_H_
+#define _INTEL_ISH_CLIENT_IF_H_
+
+struct ishtp_cl_device;
+struct ishtp_device;
+struct ishtp_cl;
+struct ishtp_fw_client;
+
+/* Client state */
+enum cl_state {
+ ISHTP_CL_INITIALIZING = 0,
+ ISHTP_CL_CONNECTING,
+ ISHTP_CL_CONNECTED,
+ ISHTP_CL_DISCONNECTING,
+ ISHTP_CL_DISCONNECTED
+};
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @driver: driver instance on a bus
+ * @name: Name of the device for probe
+ * @probe: driver callback for device probe
+ * @remove: driver callback on device removal
+ *
+ * Client drivers defines to get probed/removed for ISHTP client device.
+ */
+struct ishtp_cl_driver {
+ struct device_driver driver;
+ const char *name;
+ const guid_t *guid;
+ int (*probe)(struct ishtp_cl_device *dev);
+ int (*remove)(struct ishtp_cl_device *dev);
+ int (*reset)(struct ishtp_cl_device *dev);
+ const struct dev_pm_ops *pm;
+};
+
+/**
+ * struct ishtp_msg_data - ISHTP message data struct
+ * @size: Size of data in the *data
+ * @data: Pointer to data
+ */
+struct ishtp_msg_data {
+ uint32_t size;
+ unsigned char *data;
+};
+
+/*
+ * struct ishtp_cl_rb - request block structure
+ * @list: Link to list members
+ * @cl: ISHTP client instance
+ * @buffer: message header
+ * @buf_idx: Index into buffer
+ * @read_time: unused at this time
+ */
+struct ishtp_cl_rb {
+ struct list_head list;
+ struct ishtp_cl *cl;
+ struct ishtp_msg_data buffer;
+ unsigned long buf_idx;
+ unsigned long read_time;
+};
+
+int ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner);
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*read_cb)(struct ishtp_cl_device *));
+
+/* Get the device * from ishtp device instance */
+struct device *ishtp_device(struct ishtp_cl_device *cl_device);
+/* Trace interface for clients */
+void *ishtp_trace_callback(struct ishtp_cl_device *cl_device);
+/* Get device pointer of PCI device for DMA acces */
+struct device *ishtp_get_pci_device(struct ishtp_cl_device *cl_device);
+
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_cl_device *cl_device);
+void ishtp_cl_free(struct ishtp_cl *cl);
+int ishtp_cl_link(struct ishtp_cl *cl);
+void ishtp_cl_unlink(struct ishtp_cl *cl);
+int ishtp_cl_disconnect(struct ishtp_cl *cl);
+int ishtp_cl_connect(struct ishtp_cl *cl);
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
+int ishtp_cl_flush_queues(struct ishtp_cl *cl);
+int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
+void *ishtp_get_client_data(struct ishtp_cl *cl);
+void ishtp_set_client_data(struct ishtp_cl *cl, void *data);
+struct ishtp_device *ishtp_get_ishtp_device(struct ishtp_cl *cl);
+void ishtp_set_tx_ring_size(struct ishtp_cl *cl, int size);
+void ishtp_set_rx_ring_size(struct ishtp_cl *cl, int size);
+void ishtp_set_connection_state(struct ishtp_cl *cl, int state);
+void ishtp_cl_set_fw_client_id(struct ishtp_cl *cl, int fw_client_id);
+
+void ishtp_put_device(struct ishtp_cl_device *cl_dev);
+void ishtp_get_device(struct ishtp_cl_device *cl_dev);
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*read_cb)(struct ishtp_cl_device *));
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const guid_t *uuid);
+int ishtp_get_fw_client_id(struct ishtp_fw_client *fw_client);
+int ish_hw_reset(struct ishtp_device *dev);
+#endif /* _INTEL_ISH_CLIENT_IF_H_ */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 690b238a44d5..c7eef32e7739 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -668,31 +668,6 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
-struct tasklet_hrtimer {
- struct hrtimer timer;
- struct tasklet_struct tasklet;
- enum hrtimer_restart (*function)(struct hrtimer *);
-};
-
-extern void
-tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
- enum hrtimer_restart (*function)(struct hrtimer *),
- clockid_t which_clock, enum hrtimer_mode mode);
-
-static inline
-void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
- const enum hrtimer_mode mode)
-{
- hrtimer_start(&ttimer->timer, time, mode);
-}
-
-static inline
-void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
-{
- hrtimer_cancel(&ttimer->timer);
- tasklet_kill(&ttimer->tasklet);
-}
-
/*
* Autoprobing for irqs:
*
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 47d5ae559329..76969a564831 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -12,6 +12,7 @@ enum io_pgtable_fmt {
ARM_64_LPAE_S1,
ARM_64_LPAE_S2,
ARM_V7S,
+ ARM_MALI_LPAE,
IO_PGTABLE_NUM_FMTS,
};
@@ -108,6 +109,11 @@ struct io_pgtable_cfg {
u32 nmrr;
u32 prrr;
} arm_v7s_cfg;
+
+ struct {
+ u64 transtab;
+ u64 memattr;
+ } arm_mali_lpae_cfg;
};
};
@@ -209,5 +215,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns;
#endif /* __IO_PGTABLE_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 0fefb5455bda..2103b94cb1bf 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -53,6 +53,8 @@ struct vm_fault;
*/
#define IOMAP_NULL_ADDR -1ULL /* addr is not valid */
+struct iomap_page_ops;
+
struct iomap {
u64 addr; /* disk offset of mapping, bytes */
loff_t offset; /* file offset of mapping, bytes */
@@ -63,12 +65,22 @@ struct iomap {
struct dax_device *dax_dev; /* dax_dev for dax operations */
void *inline_data;
void *private; /* filesystem private */
+ const struct iomap_page_ops *page_ops;
+};
- /*
- * Called when finished processing a page in the mapping returned in
- * this iomap. At least for now this is only supported in the buffered
- * write path.
- */
+/*
+ * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare
+ * and page_done will be called for each page written to. This only applies to
+ * buffered writes as unbuffered writes will not typically have pages
+ * associated with them.
+ *
+ * When page_prepare succeeds, page_done will always be called to do any
+ * cleanup work necessary. In that page_done call, @page will be NULL if the
+ * associated page could not be obtained.
+ */
+struct iomap_page_ops {
+ int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len,
+ struct iomap *iomap);
void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
struct page *page, struct iomap *iomap);
};
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index ffbbc7e39cee..a815cf6f6f47 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -48,6 +48,7 @@ struct bus_type;
struct device;
struct iommu_domain;
struct notifier_block;
+struct iommu_sva;
/* iommu fault flags */
#define IOMMU_FAULT_READ 0x0
@@ -55,6 +56,8 @@ struct notifier_block;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
+typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
+ void *);
struct iommu_domain_geometry {
dma_addr_t aperture_start; /* First address that can be mapped */
@@ -156,6 +159,33 @@ struct iommu_resv_region {
enum iommu_resv_type type;
};
+/* Per device IOMMU features */
+enum iommu_dev_features {
+ IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */
+ IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */
+};
+
+#define IOMMU_PASID_INVALID (-1U)
+
+/**
+ * struct iommu_sva_ops - device driver callbacks for an SVA context
+ *
+ * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
+ * @mm_exit returns, the device must not issue any more transaction
+ * with the PASID given as argument.
+ *
+ * The @mm_exit handler is allowed to sleep. Be careful about the
+ * locks taken in @mm_exit, because they might lead to deadlocks if
+ * they are also held when dropping references to the mm. Consider the
+ * following call chain:
+ * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
+ * Using mmput_async() prevents this scenario.
+ *
+ */
+struct iommu_sva_ops {
+ iommu_mm_exit_handler_t mm_exit;
+};
+
#ifdef CONFIG_IOMMU_API
/**
@@ -186,6 +216,14 @@ struct iommu_resv_region {
* @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no)
+ * @dev_has/enable/disable_feat: per device entries to check/enable/disable
+ * iommu specific features.
+ * @dev_feat_enabled: check enabled feature
+ * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
+ * @aux_get_pasid: get the pasid given an aux-domain
+ * @sva_bind: Bind process address space to device
+ * @sva_unbind: Unbind process address space from device
+ * @sva_get_pasid: Get PASID associated to a SVA handle
* @pgsize_bitmap: bitmap of all possible supported page sizes
*/
struct iommu_ops {
@@ -230,6 +268,22 @@ struct iommu_ops {
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
+ /* Per device IOMMU features */
+ bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
+ bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
+ int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
+ int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
+
+ /* Aux-domain specific attach/detach entries */
+ int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
+ void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
+ int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
+
+ struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
+ void *drvdata);
+ void (*sva_unbind)(struct iommu_sva *handle);
+ int (*sva_get_pasid)(struct iommu_sva *handle);
+
unsigned long pgsize_bitmap;
};
@@ -392,10 +446,22 @@ struct iommu_fwspec {
const struct iommu_ops *ops;
struct fwnode_handle *iommu_fwnode;
void *iommu_priv;
+ u32 flags;
unsigned int num_ids;
u32 ids[1];
};
+/* ATS is supported */
+#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
+
+/**
+ * struct iommu_sva - handle to a device-mm bond
+ */
+struct iommu_sva {
+ struct device *dev;
+ const struct iommu_sva_ops *ops;
+};
+
int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
const struct iommu_ops *ops);
void iommu_fwspec_free(struct device *dev);
@@ -416,6 +482,22 @@ static inline void dev_iommu_fwspec_set(struct device *dev,
int iommu_probe_device(struct device *dev);
void iommu_release_device(struct device *dev);
+bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
+int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
+int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
+bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
+int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
+void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
+int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
+
+struct iommu_sva *iommu_sva_bind_device(struct device *dev,
+ struct mm_struct *mm,
+ void *drvdata);
+void iommu_sva_unbind_device(struct iommu_sva *handle);
+int iommu_sva_set_ops(struct iommu_sva *handle,
+ const struct iommu_sva_ops *ops);
+int iommu_sva_get_pasid(struct iommu_sva *handle);
+
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -700,6 +782,68 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
return NULL;
}
+static inline bool
+iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ return false;
+}
+
+static inline bool
+iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
+{
+ return false;
+}
+
+static inline int
+iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ return -ENODEV;
+}
+
+static inline int
+iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
+{
+ return -ENODEV;
+}
+
+static inline int
+iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline void
+iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
+{
+}
+
+static inline int
+iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline struct iommu_sva *
+iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
+{
+ return NULL;
+}
+
+static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
+{
+}
+
+static inline int iommu_sva_set_ops(struct iommu_sva *handle,
+ const struct iommu_sva_ops *ops)
+{
+ return -EINVAL;
+}
+
+static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
+{
+ return IOMMU_PASID_INVALID;
+}
+
#endif /* CONFIG_IOMMU_API */
#ifdef CONFIG_IOMMU_DEBUGFS
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 0b93bf96693e..28a5128405f8 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -76,6 +76,14 @@ struct iova_domain {
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
unsigned long max32_alloc_size; /* Size of last failed allocation */
+ struct iova_fq __percpu *fq; /* Flush Queue */
+
+ atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
+ have been started */
+
+ atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
+ have been finished */
+
struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
@@ -85,14 +93,6 @@ struct iova_domain {
iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
iova entry */
- struct iova_fq __percpu *fq; /* Flush Queue */
-
- atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
- have been started */
-
- atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
- have been finished */
-
struct timer_list fq_timer; /* Timer to regularily empty the
flush-queues */
atomic_t fq_timer_on; /* 1 when timer is active, 0
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d6160d479b14..7ae8de5ad0f2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -195,7 +195,7 @@ struct irq_data {
* IRQD_LEVEL - Interrupt is level triggered
* IRQD_WAKEUP_STATE - Interrupt is configured for wakeup
* from suspend
- * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process
+ * IRQD_MOVE_PCNTXT - Interrupt can be moved in process
* context
* IRQD_IRQ_DISABLED - Disabled state of the interrupt
* IRQD_IRQ_MASKED - Masked state of the interrupt
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 626179077bb0..0f049b384ccd 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -158,8 +158,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq);
* Legacy platforms not converted to DT yet must use this to init
* their GIC
*/
-void gic_init(unsigned int nr, int start,
- void __iomem *dist , void __iomem *cpu);
+void gic_init(void __iomem *dist , void __iomem *cpu);
int gicv2m_init(struct fwnode_handle *parent_handle,
struct irq_domain *parent);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0f919d5fe84f..c2ffff5f9ae2 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1606,7 +1606,6 @@ static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
JBD_MAX_CHECKSUM_SIZE);
desc.shash.tfm = journal->j_chksum_driver;
- desc.shash.flags = 0;
*(u32 *)desc.ctx = crc;
err = crypto_shash_update(&desc.shash, address, length);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index fa928242567d..1b6d31da7cbc 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -297,6 +297,7 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
}
extern u64 jiffies64_to_nsecs(u64 j);
+extern u64 jiffies64_to_msecs(u64 j);
extern unsigned long __msecs_to_jiffies(const unsigned int m);
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index a49f2b45b3f0..42710d5949ba 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -12,21 +12,79 @@ struct static_key_deferred {
struct delayed_work work;
};
-extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
-extern void static_key_deferred_flush(struct static_key_deferred *key);
+struct static_key_true_deferred {
+ struct static_key_true key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
+struct static_key_false_deferred {
+ struct static_key_false key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+
+#define static_key_slow_dec_deferred(x) \
+ __static_key_slow_dec_deferred(&(x)->key, &(x)->work, (x)->timeout)
+#define static_branch_slow_dec_deferred(x) \
+ __static_key_slow_dec_deferred(&(x)->key.key, &(x)->work, (x)->timeout)
+
+#define static_key_deferred_flush(x) \
+ __static_key_deferred_flush((x), &(x)->work)
+
+extern void
+__static_key_slow_dec_deferred(struct static_key *key,
+ struct delayed_work *work,
+ unsigned long timeout);
+extern void __static_key_deferred_flush(void *key, struct delayed_work *work);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+extern void jump_label_update_timeout(struct work_struct *work);
+
+#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \
+ struct static_key_true_deferred name = { \
+ .key = { STATIC_KEY_INIT_TRUE }, \
+ .timeout = (rl), \
+ .work = __DELAYED_WORK_INITIALIZER((name).work, \
+ jump_label_update_timeout, \
+ 0), \
+ }
+
+#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \
+ struct static_key_false_deferred name = { \
+ .key = { STATIC_KEY_INIT_FALSE }, \
+ .timeout = (rl), \
+ .work = __DELAYED_WORK_INITIALIZER((name).work, \
+ jump_label_update_timeout, \
+ 0), \
+ }
+
+#define static_branch_deferred_inc(x) static_branch_inc(&(x)->key)
+
#else /* !CONFIG_JUMP_LABEL */
struct static_key_deferred {
struct static_key key;
};
+struct static_key_true_deferred {
+ struct static_key_true key;
+};
+struct static_key_false_deferred {
+ struct static_key_false key;
+};
+#define DEFINE_STATIC_KEY_DEFERRED_TRUE(name, rl) \
+ struct static_key_true_deferred name = { STATIC_KEY_TRUE_INIT }
+#define DEFINE_STATIC_KEY_DEFERRED_FALSE(name, rl) \
+ struct static_key_false_deferred name = { STATIC_KEY_FALSE_INIT }
+
+#define static_branch_slow_dec_deferred(x) static_branch_dec(&(x)->key)
+
static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
{
STATIC_KEY_CHECK_USE(key);
static_key_slow_dec(&key->key);
}
-static inline void static_key_deferred_flush(struct static_key_deferred *key)
+static inline void static_key_deferred_flush(void *key)
{
STATIC_KEY_CHECK_USE(key);
}
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 8c3f8c14eeaa..da676cdbd727 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -38,22 +38,13 @@ struct vmcoredd_node {
#ifdef CONFIG_PROC_KCORE
void __init kclist_add(struct kcore_list *, void *, size_t, int type);
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
- m->vaddr = (unsigned long)vaddr;
- kclist_add(m, addr, sz, KCORE_REMAP);
-}
+
+extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
#else
static inline
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
{
}
-
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
-}
#endif
#endif /* _LINUX_KCORE_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 34a5036debd3..a3b59d143afb 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -17,6 +17,7 @@
#include <asm/byteorder.h>
#include <asm/div64.h>
#include <uapi/linux/kernel.h>
+#include <asm/div64.h>
#define STACK_MAGIC 0xdeadbeef
@@ -47,8 +48,8 @@
#define u64_to_user_ptr(x) ( \
{ \
- typecheck(u64, x); \
- (void __user *)(uintptr_t)x; \
+ typecheck(u64, (x)); \
+ (void __user *)(uintptr_t)(x); \
} \
)
@@ -175,18 +176,7 @@
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
-#ifdef CONFIG_LBDAF
-# define sector_div(a, b) do_div(a, b)
-#else
-# define sector_div(n, b)( \
-{ \
- int _res; \
- _res = (n) % (b); \
- (n) /= (b); \
- _res; \
-} \
-)
-#endif
+#define sector_div(a, b) do_div(a, b)
/**
* upper_32_bits - return bits 32-63 of a number
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index c8893f663470..2bf477f86eb1 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -64,7 +64,7 @@ enum kernfs_root_flag {
KERNFS_ROOT_CREATE_DEACTIVATED = 0x0001,
/*
- * For regular flies, if the opener has CAP_DAC_OVERRIDE, open(2)
+ * For regular files, if the opener has CAP_DAC_OVERRIDE, open(2)
* succeeds regardless of the RW permissions. sysfs had an extra
* layer of enforcement where open(2) fails with -EACCES regardless
* of CAP_DAC_OVERRIDE if the permission doesn't have the
@@ -371,6 +371,11 @@ __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
struct poll_table_struct *pt);
void kernfs_notify(struct kernfs_node *kn);
+int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
+ void *value, size_t size);
+int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
+ const void *value, size_t size, int flags);
+
const void *kernfs_super_ns(struct super_block *sb);
int kernfs_get_tree(struct fs_context *fc);
void kernfs_free_fs_context(struct fs_context *fc);
@@ -473,6 +478,14 @@ static inline int kernfs_setattr(struct kernfs_node *kn,
static inline void kernfs_notify(struct kernfs_node *kn) { }
+static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
+ void *value, size_t size)
+{ return -ENOSYS; }
+
+static inline int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
+ const void *value, size_t size, int flags)
+{ return -ENOSYS; }
+
static inline const void *kernfs_super_ns(struct super_block *sb)
{ return NULL; }
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 1ab0d624fb36..e2ca0a292e21 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -139,7 +139,8 @@ static inline bool kobject_has_children(struct kobject *kobj)
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
- struct attribute **default_attrs;
+ struct attribute **default_attrs; /* use default_groups instead */
+ const struct attribute_group **default_groups;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 201f0f2683f2..9a897256e481 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -173,6 +173,7 @@ struct kretprobe_instance {
struct kretprobe *rp;
kprobe_opcode_t *ret_addr;
struct task_struct *task;
+ void *fp;
char data[0];
};
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9d55c63db09b..640a03642766 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -28,6 +28,7 @@
#include <linux/irqbypass.h>
#include <linux/swait.h>
#include <linux/refcount.h>
+#include <linux/nospec.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -513,10 +514,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
- /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
- * the caller has read kvm->online_vcpus before (as is the case
- * for kvm_for_each_vcpu, for example).
- */
+ int num_vcpus = atomic_read(&kvm->online_vcpus);
+ i = array_index_nospec(i, num_vcpus);
+
+ /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
smp_rmb();
return kvm->vcpus[i];
}
@@ -600,6 +601,7 @@ void kvm_put_kvm(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
+ as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM);
return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu,
lockdep_is_held(&kvm->slots_lock) ||
!refcount_read(&kvm->users_count));
diff --git a/include/linux/list.h b/include/linux/list.h
index 79626b5ab36c..9e9a6403dbe4 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -207,7 +207,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
}
/**
- * list_is_first -- tests whether @ list is the first entry in list @head
+ * list_is_first -- tests whether @list is the first entry in list @head
* @list: the entry to test
* @head: the head of the list
*/
@@ -271,6 +271,24 @@ static inline void list_rotate_left(struct list_head *head)
}
/**
+ * list_rotate_to_front() - Rotate list to specific item.
+ * @list: The desired new front of the list.
+ * @head: The head of the list.
+ *
+ * Rotates list so that @list becomes the new front of the list.
+ */
+static inline void list_rotate_to_front(struct list_head *list,
+ struct list_head *head)
+{
+ /*
+ * Deletes the list head from the list denoted by @head and
+ * places it as the tail of @list, this effectively rotates the
+ * list so that @list is at the front.
+ */
+ list_move_tail(head, list);
+}
+
+/**
* list_is_singular - tests whether a list has just one entry.
* @head: the list to test.
*/
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 53551f470722..a14bab1a0a3e 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -86,7 +86,6 @@ struct klp_func {
struct list_head node;
struct list_head stack_node;
unsigned long old_size, new_size;
- bool kobj_added;
bool nop;
bool patched;
bool transition;
@@ -141,7 +140,6 @@ struct klp_object {
struct list_head func_list;
struct list_head node;
struct module *mod;
- bool kobj_added;
bool dynamic;
bool patched;
};
@@ -170,7 +168,6 @@ struct klp_patch {
struct list_head list;
struct kobject kobj;
struct list_head obj_list;
- bool kobj_added;
bool enabled;
bool forced;
struct work_struct free_work;
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 053a4ef3d431..8c0cf1059443 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -46,6 +46,7 @@ struct nlmclnt_initdata {
int noresvport;
struct net *net;
const struct nlmclnt_operations *nlmclnt_ops;
+ const struct cred *cred;
};
/*
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index b065ef406770..c9b422dde542 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -70,6 +70,7 @@ struct nlm_host {
struct nsm_handle *h_nsmhandle; /* NSM status handle */
char *h_addrbuf; /* address eyecatcher */
struct net *net; /* host net */
+ const struct cred *h_cred;
char nodename[UNX_MAXNODENAME + 1];
const struct nlmclnt_operations *h_nlmclnt_ops; /* Callback ops for NLM users */
};
@@ -229,7 +230,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
const u32 version,
const char *hostname,
int noresvport,
- struct net *net);
+ struct net *net,
+ const struct cred *cred);
void nlmclnt_release_host(struct nlm_host *);
struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
const char *hostname,
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 79c3873d58ac..6e2377e6c1d6 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -66,6 +66,11 @@ struct lock_class_key {
extern struct lock_class_key __lockdep_no_validate__;
+struct lock_trace {
+ unsigned int nr_entries;
+ unsigned int offset;
+};
+
#define LOCKSTAT_POINTS 4
/*
@@ -100,7 +105,7 @@ struct lock_class {
* IRQ/softirq usage tracking bits:
*/
unsigned long usage_mask;
- struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
+ struct lock_trace usage_traces[XXX_LOCK_USAGE_STATES];
/*
* Generation counter, when doing certain classes of graph walking,
@@ -188,7 +193,7 @@ struct lock_list {
struct list_head entry;
struct lock_class *class;
struct lock_class *links_to;
- struct stack_trace trace;
+ struct lock_trace trace;
int distance;
/*
@@ -471,7 +476,7 @@ struct pin_cookie { };
#define NIL_COOKIE (struct pin_cookie){ }
-#define lockdep_pin_lock(l) ({ struct pin_cookie cookie; cookie; })
+#define lockdep_pin_lock(l) ({ struct pin_cookie cookie = { }; cookie; })
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index a9b8ff578b6b..47f58cfb6a19 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -127,7 +127,6 @@
* options cleanly (a filesystem may modify the data e.g. with strsep()).
* This also allows the original mount data to be stripped of security-
* specific options to avoid having to make filesystems aware of them.
- * @type the type of filesystem being mounted.
* @orig the original mount data copied from userspace.
* @copy copied data which will be passed to the security module.
* Returns 0 if the copy was successful.
@@ -160,6 +159,10 @@
* Parse a string of security data filling in the opts structure
* @options string containing all mount options known by the LSM
* @opts binary data structure usable by the LSM
+ * @move_mount:
+ * Check permission before a mount is moved.
+ * @from_path indicates the mount that is going to be moved.
+ * @to_path indicates the mountpoint that will be mounted upon.
* @dentry_init_security:
* Compute a context for a dentry as the inode is not yet available
* since NFSv4 has no label backed by an EA anyway.
@@ -320,10 +323,11 @@
* @new_dentry contains the dentry structure of the new link.
* Return 0 if permission is granted.
* @path_chmod:
- * Check for permission to change DAC's permission of a file or directory.
- * @dentry contains the dentry structure.
- * @mnt contains the vfsmnt structure.
- * @mode contains DAC's mode.
+ * Check for permission to change a mode of the file @path. The new
+ * mode is specified in @mode.
+ * @path contains the path structure of the file to change the mode.
+ * @mode contains the new DAC's permission, which is a bitmask of
+ * constants from <include/uapi/linux/stat.h>
* Return 0 if permission is granted.
* @path_chown:
* Check for permission to change owner/group of a file or directory.
@@ -445,6 +449,15 @@
* to abort the copy up. Note that the caller is responsible for reading
* and writing the xattrs as this hook is merely a filter.
*
+ * Security hooks for kernfs node operations
+ *
+ * @kernfs_init_security:
+ * Initialize the security context of a newly created kernfs node based
+ * on its own and its parent's attributes.
+ *
+ * @kn_dir the parent kernfs node
+ * @kn the new child kernfs node
+ *
* Security hooks for file operations
*
* @file_permission:
@@ -502,7 +515,7 @@
* Return 0 if permission is granted.
* @file_lock:
* Check permission before performing file locking operations.
- * Note: this hook mediates both flock and fcntl style locks.
+ * Note the hook mediates both flock and fcntl style locks.
* @file contains the file structure.
* @cmd contains the posix-translated lock operation to perform
* (e.g. F_RDLCK, F_WRLCK).
@@ -645,12 +658,12 @@
* @p contains the task_struct of process.
* @nice contains the new nice value.
* Return 0 if permission is granted.
- * @task_setioprio
+ * @task_setioprio:
* Check permission before setting the ioprio value of @p to @ioprio.
* @p contains the task_struct of process.
* @ioprio contains the new ioprio value
* Return 0 if permission is granted.
- * @task_getioprio
+ * @task_getioprio:
* Check permission before getting the ioprio value of @p.
* @p contains the task_struct of process.
* Return 0 if permission is granted.
@@ -672,17 +685,15 @@
* Return 0 if permission is granted.
* @task_setscheduler:
* Check permission before setting scheduling policy and/or parameters of
- * process @p based on @policy and @lp.
+ * process @p.
* @p contains the task_struct for process.
- * @policy contains the scheduling policy.
- * @lp contains the scheduling parameters.
* Return 0 if permission is granted.
* @task_getscheduler:
* Check permission before obtaining scheduling information for process
* @p.
* @p contains the task_struct for process.
* Return 0 if permission is granted.
- * @task_movememory
+ * @task_movememory:
* Check permission before moving memory owned by process @p.
* @p contains the task_struct for process.
* Return 0 if permission is granted.
@@ -769,9 +780,9 @@
* socket structure, but rather, the socket security information is stored
* in the associated inode. Typically, the inode alloc_security hook will
* allocate and and attach security information to
- * sock->inode->i_security. This hook may be used to update the
- * sock->inode->i_security field with additional information that wasn't
- * available when the inode was allocated.
+ * SOCK_INODE(sock)->i_security. This hook may be used to update the
+ * SOCK_INODE(sock)->i_security field with additional information that
+ * wasn't available when the inode was allocated.
* @sock contains the newly created socket structure.
* @family contains the requested protocol family.
* @type contains the requested communications type.
@@ -876,13 +887,13 @@
* @socket_getpeersec_dgram:
* This hook allows the security module to provide peer socket security
* state for udp sockets on a per-packet basis to userspace via
- * getsockopt SO_GETPEERSEC. The application must first have indicated
- * the IP_PASSSEC option via getsockopt. It can then retrieve the
+ * getsockopt SO_GETPEERSEC. The application must first have indicated
+ * the IP_PASSSEC option via getsockopt. It can then retrieve the
* security state returned by this hook for a packet via the SCM_SECURITY
* ancillary message type.
- * @skb is the skbuff for the packet being queried
- * @secdata is a pointer to a buffer in which to copy the security data
- * @seclen is the maximum length for @secdata
+ * @sock contains the peer socket. May be NULL.
+ * @skb is the sk_buff for the packet being queried. May be NULL.
+ * @secid pointer to store the secid of the packet.
* Return 0 on success, error on failure.
* @sk_alloc_security:
* Allocate and attach a security structure to the sk->sk_security field,
@@ -906,9 +917,9 @@
* @secmark_relabel_packet:
* check if the process should be allowed to relabel packets to
* the given secid
- * @security_secmark_refcount_inc
+ * @secmark_refcount_inc:
* tells the LSM to increment the number of secmark labeling rules loaded
- * @security_secmark_refcount_dec
+ * @secmark_refcount_dec:
* tells the LSM to decrement the number of secmark labeling rules loaded
* @req_classify_flow:
* Sets the flow's sid to the openreq sid.
@@ -1113,41 +1124,41 @@
*
* @msg_queue_alloc_security:
* Allocate and attach a security structure to the
- * msq->q_perm.security field. The security field is initialized to
+ * @perm->security field. The security field is initialized to
* NULL when the structure is first created.
- * @msq contains the message queue structure to be modified.
+ * @perm contains the IPC permissions of the message queue.
* Return 0 if operation was successful and permission is granted.
* @msg_queue_free_security:
- * Deallocate security structure for this message queue.
- * @msq contains the message queue structure to be modified.
+ * Deallocate security field @perm->security for the message queue.
+ * @perm contains the IPC permissions of the message queue.
* @msg_queue_associate:
* Check permission when a message queue is requested through the
- * msgget system call. This hook is only called when returning the
+ * msgget system call. This hook is only called when returning the
* message queue identifier for an existing message queue, not when a
* new message queue is created.
- * @msq contains the message queue to act upon.
+ * @perm contains the IPC permissions of the message queue.
* @msqflg contains the operation control flags.
* Return 0 if permission is granted.
* @msg_queue_msgctl:
* Check permission when a message control operation specified by @cmd
- * is to be performed on the message queue @msq.
- * The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
- * @msq contains the message queue to act upon. May be NULL.
+ * is to be performed on the message queue with permissions @perm.
+ * The @perm may be NULL, e.g. for IPC_INFO or MSG_INFO.
+ * @perm contains the IPC permissions of the msg queue. May be NULL.
* @cmd contains the operation to be performed.
* Return 0 if permission is granted.
* @msg_queue_msgsnd:
* Check permission before a message, @msg, is enqueued on the message
- * queue, @msq.
- * @msq contains the message queue to send message to.
+ * queue with permissions @perm.
+ * @perm contains the IPC permissions of the message queue.
* @msg contains the message to be enqueued.
* @msqflg contains operational flags.
* Return 0 if permission is granted.
* @msg_queue_msgrcv:
* Check permission before a message, @msg, is removed from the message
- * queue, @msq. The @target task structure contains a pointer to the
+ * queue. The @target task structure contains a pointer to the
* process that will be receiving the message (not equal to the current
* process when inline receives are being performed).
- * @msq contains the message queue to retrieve message from.
+ * @perm contains the IPC permissions of the message queue.
* @msg contains the message destination.
* @target contains the task structure for recipient process.
* @type contains the type of message requested.
@@ -1157,34 +1168,34 @@
* Security hooks for System V Shared Memory Segments
*
* @shm_alloc_security:
- * Allocate and attach a security structure to the shp->shm_perm.security
- * field. The security field is initialized to NULL when the structure is
+ * Allocate and attach a security structure to the @perm->security
+ * field. The security field is initialized to NULL when the structure is
* first created.
- * @shp contains the shared memory structure to be modified.
+ * @perm contains the IPC permissions of the shared memory structure.
* Return 0 if operation was successful and permission is granted.
* @shm_free_security:
- * Deallocate the security struct for this memory segment.
- * @shp contains the shared memory structure to be modified.
+ * Deallocate the security structure @perm->security for the memory segment.
+ * @perm contains the IPC permissions of the shared memory structure.
* @shm_associate:
* Check permission when a shared memory region is requested through the
- * shmget system call. This hook is only called when returning the shared
+ * shmget system call. This hook is only called when returning the shared
* memory region identifier for an existing region, not when a new shared
* memory region is created.
- * @shp contains the shared memory structure to be modified.
+ * @perm contains the IPC permissions of the shared memory structure.
* @shmflg contains the operation control flags.
* Return 0 if permission is granted.
* @shm_shmctl:
* Check permission when a shared memory control operation specified by
- * @cmd is to be performed on the shared memory region @shp.
- * The @shp may be NULL, e.g. for IPC_INFO or SHM_INFO.
- * @shp contains shared memory structure to be modified.
+ * @cmd is to be performed on the shared memory region with permissions @perm.
+ * The @perm may be NULL, e.g. for IPC_INFO or SHM_INFO.
+ * @perm contains the IPC permissions of the shared memory structure.
* @cmd contains the operation to be performed.
* Return 0 if permission is granted.
* @shm_shmat:
* Check permissions prior to allowing the shmat system call to attach the
- * shared memory segment @shp to the data segment of the calling process.
- * The attaching address is specified by @shmaddr.
- * @shp contains the shared memory structure to be modified.
+ * shared memory segment with permissions @perm to the data segment of the
+ * calling process. The attaching address is specified by @shmaddr.
+ * @perm contains the IPC permissions of the shared memory structure.
* @shmaddr contains the address to attach memory region to.
* @shmflg contains the operational flags.
* Return 0 if permission is granted.
@@ -1192,34 +1203,34 @@
* Security hooks for System V Semaphores
*
* @sem_alloc_security:
- * Allocate and attach a security structure to the sma->sem_perm.security
- * field. The security field is initialized to NULL when the structure is
+ * Allocate and attach a security structure to the @perm->security
+ * field. The security field is initialized to NULL when the structure is
* first created.
- * @sma contains the semaphore structure
+ * @perm contains the IPC permissions of the semaphore.
* Return 0 if operation was successful and permission is granted.
* @sem_free_security:
- * deallocate security struct for this semaphore
- * @sma contains the semaphore structure.
+ * Deallocate security structure @perm->security for the semaphore.
+ * @perm contains the IPC permissions of the semaphore.
* @sem_associate:
* Check permission when a semaphore is requested through the semget
- * system call. This hook is only called when returning the semaphore
+ * system call. This hook is only called when returning the semaphore
* identifier for an existing semaphore, not when a new one must be
* created.
- * @sma contains the semaphore structure.
+ * @perm contains the IPC permissions of the semaphore.
* @semflg contains the operation control flags.
* Return 0 if permission is granted.
* @sem_semctl:
* Check permission when a semaphore operation specified by @cmd is to be
- * performed on the semaphore @sma. The @sma may be NULL, e.g. for
+ * performed on the semaphore. The @perm may be NULL, e.g. for
* IPC_INFO or SEM_INFO.
- * @sma contains the semaphore structure. May be NULL.
+ * @perm contains the IPC permissions of the semaphore. May be NULL.
* @cmd contains the operation to be performed.
* Return 0 if permission is granted.
* @sem_semop:
* Check permissions before performing operations on members of the
- * semaphore set @sma. If the @alter flag is nonzero, the semaphore set
+ * semaphore set. If the @alter flag is nonzero, the semaphore set
* may be modified.
- * @sma contains the semaphore structure.
+ * @perm contains the IPC permissions of the semaphore.
* @sops contains the operations to perform.
* @nsops contains the number of operations to perform.
* @alter contains the flag indicating whether changes are to be made.
@@ -1292,13 +1303,12 @@
* Check permission before accessing the kernel message ring or changing
* logging to the console.
* See the syslog(2) manual page for an explanation of the @type values.
- * @type contains the type of action.
- * @from_file indicates the context of action (if it came from /proc).
+ * @type contains the SYSLOG_ACTION_* constant from <include/linux/syslog.h>
* Return 0 if permission is granted.
* @settime:
* Check permission to change the system time.
- * struct timespec64 is defined in include/linux/time64.h and timezone
- * is defined in include/linux/time.h
+ * struct timespec64 is defined in <include/linux/time64.h> and timezone
+ * is defined in <include/linux/time.h>
* @ts contains new time
* @tz contains new timezone
* Return 0 if permission is granted.
@@ -1340,7 +1350,7 @@
* @audit_rule_init:
* Allocate and initialize an LSM audit rule structure.
* @field contains the required Audit action.
- * Fields flags are defined in include/linux/audit.h
+ * Fields flags are defined in <include/linux/audit.h>
* @op contains the operator the rule uses.
* @rulestr contains the context where the rule will be applied to.
* @lsmrule contains a pointer to receive the result.
@@ -1348,9 +1358,9 @@
* -EINVAL in case of an invalid rule.
*
* @audit_rule_known:
- * Specifies whether given @rule contains any fields related to
+ * Specifies whether given @krule contains any fields related to
* current LSM.
- * @rule contains the audit rule of interest.
+ * @krule contains the audit rule of interest.
* Return 1 in case of relation found, 0 otherwise.
*
* @audit_rule_match:
@@ -1359,13 +1369,13 @@
* @secid contains the security id in question.
* @field contains the field which relates to current LSM.
* @op contains the operator that will be used for matching.
- * @rule points to the audit rule that will be checked against.
+ * @lrule points to the audit rule that will be checked against.
* Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure.
*
* @audit_rule_free:
* Deallocate the LSM audit rule structure previously allocated by
* audit_rule_init.
- * @rule contains the allocated rule
+ * @lsmrule contains the allocated rule
*
* @inode_invalidate_secctx:
* Notify the security module that it must revalidate the security context
@@ -1378,9 +1388,7 @@
* this hook to initialize the security context in its incore inode to the
* value provided by the server for the file when the server returned the
* file's attributes to the client.
- *
* Must be called with inode->i_mutex locked.
- *
* @inode we wish to set the security context of.
* @ctx contains the string which we wish to set in the inode.
* @ctxlen contains the length of @ctx.
@@ -1393,9 +1401,7 @@
* this hook to change the security context in its incore inode and on the
* backing filesystem to a value provided by the client on a SETATTR
* operation.
- *
* Must be called with inode->i_mutex locked.
- *
* @dentry contains the inode we wish to set the security context of.
* @ctx contains the string which we wish to set in the inode.
* @ctxlen contains the length of @ctx.
@@ -1403,7 +1409,6 @@
* @inode_getsecctx:
* On success, returns 0 and fills out @ctx and @ctxlen with the security
* context for the given @inode.
- *
* @inode we wish to get the security context of.
* @ctx is a pointer in which to place the allocated security context.
* @ctxlen points to the place to put the length of @ctx.
@@ -1501,6 +1506,7 @@ union security_list_options {
unsigned long *set_kern_flags);
int (*sb_add_mnt_opt)(const char *option, const char *val, int len,
void **mnt_opts);
+ int (*move_mount)(const struct path *from_path, const struct path *to_path);
int (*dentry_init_security)(struct dentry *dentry, int mode,
const struct qstr *name, void **ctx,
u32 *ctxlen);
@@ -1578,6 +1584,9 @@ union security_list_options {
int (*inode_copy_up)(struct dentry *src, struct cred **new);
int (*inode_copy_up_xattr)(const char *name);
+ int (*kernfs_init_security)(struct kernfs_node *kn_dir,
+ struct kernfs_node *kn);
+
int (*file_permission)(struct file *file, int mask);
int (*file_alloc_security)(struct file *file);
void (*file_free_security)(struct file *file);
@@ -1640,28 +1649,28 @@ union security_list_options {
int (*msg_msg_alloc_security)(struct msg_msg *msg);
void (*msg_msg_free_security)(struct msg_msg *msg);
- int (*msg_queue_alloc_security)(struct kern_ipc_perm *msq);
- void (*msg_queue_free_security)(struct kern_ipc_perm *msq);
- int (*msg_queue_associate)(struct kern_ipc_perm *msq, int msqflg);
- int (*msg_queue_msgctl)(struct kern_ipc_perm *msq, int cmd);
- int (*msg_queue_msgsnd)(struct kern_ipc_perm *msq, struct msg_msg *msg,
+ int (*msg_queue_alloc_security)(struct kern_ipc_perm *perm);
+ void (*msg_queue_free_security)(struct kern_ipc_perm *perm);
+ int (*msg_queue_associate)(struct kern_ipc_perm *perm, int msqflg);
+ int (*msg_queue_msgctl)(struct kern_ipc_perm *perm, int cmd);
+ int (*msg_queue_msgsnd)(struct kern_ipc_perm *perm, struct msg_msg *msg,
int msqflg);
- int (*msg_queue_msgrcv)(struct kern_ipc_perm *msq, struct msg_msg *msg,
+ int (*msg_queue_msgrcv)(struct kern_ipc_perm *perm, struct msg_msg *msg,
struct task_struct *target, long type,
int mode);
- int (*shm_alloc_security)(struct kern_ipc_perm *shp);
- void (*shm_free_security)(struct kern_ipc_perm *shp);
- int (*shm_associate)(struct kern_ipc_perm *shp, int shmflg);
- int (*shm_shmctl)(struct kern_ipc_perm *shp, int cmd);
- int (*shm_shmat)(struct kern_ipc_perm *shp, char __user *shmaddr,
+ int (*shm_alloc_security)(struct kern_ipc_perm *perm);
+ void (*shm_free_security)(struct kern_ipc_perm *perm);
+ int (*shm_associate)(struct kern_ipc_perm *perm, int shmflg);
+ int (*shm_shmctl)(struct kern_ipc_perm *perm, int cmd);
+ int (*shm_shmat)(struct kern_ipc_perm *perm, char __user *shmaddr,
int shmflg);
- int (*sem_alloc_security)(struct kern_ipc_perm *sma);
- void (*sem_free_security)(struct kern_ipc_perm *sma);
- int (*sem_associate)(struct kern_ipc_perm *sma, int semflg);
- int (*sem_semctl)(struct kern_ipc_perm *sma, int cmd);
- int (*sem_semop)(struct kern_ipc_perm *sma, struct sembuf *sops,
+ int (*sem_alloc_security)(struct kern_ipc_perm *perm);
+ void (*sem_free_security)(struct kern_ipc_perm *perm);
+ int (*sem_associate)(struct kern_ipc_perm *perm, int semflg);
+ int (*sem_semctl)(struct kern_ipc_perm *perm, int cmd);
+ int (*sem_semop)(struct kern_ipc_perm *perm, struct sembuf *sops,
unsigned nsops, int alter);
int (*netlink_send)(struct sock *sk, struct sk_buff *skb);
@@ -1835,6 +1844,7 @@ struct security_hook_heads {
struct hlist_head sb_set_mnt_opts;
struct hlist_head sb_clone_mnt_opts;
struct hlist_head sb_add_mnt_opt;
+ struct hlist_head move_mount;
struct hlist_head dentry_init_security;
struct hlist_head dentry_create_files_as;
#ifdef CONFIG_SECURITY_PATH
@@ -1879,6 +1889,7 @@ struct security_hook_heads {
struct hlist_head inode_getsecid;
struct hlist_head inode_copy_up;
struct hlist_head inode_copy_up_xattr;
+ struct hlist_head kernfs_init_security;
struct hlist_head file_permission;
struct hlist_head file_alloc_security;
struct hlist_head file_free_security;
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 73d04743a2bb..af6b11d4d673 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -34,5 +34,6 @@
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
+#define MARVELL_PHY_LED0_LINK_LED1_ACTIVE 0x00000004
#endif /* _MARVELL_PHY_H */
diff --git a/include/linux/math64.h b/include/linux/math64.h
index bb2c84afb80c..65bef21cdddb 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -284,4 +284,17 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
#define DIV64_U64_ROUND_UP(ll, d) \
({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
+/**
+ * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
+ * @dividend: unsigned 64bit dividend
+ * @divisor: unsigned 64bit divisor
+ *
+ * Divide unsigned 64bit dividend by unsigned 64bit divisor
+ * and round to closest integer.
+ *
+ * Return: dividend / divisor rounded to nearest integer
+ */
+#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
+ ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
+
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/mdev.h b/include/linux/mdev.h
index d7aee90e5da5..89a52fd5756e 100644
--- a/include/linux/mdev.h
+++ b/include/linux/mdev.h
@@ -15,6 +15,20 @@
struct mdev_device;
+/*
+ * Called by the parent device driver to set the device which represents
+ * this mdev in iommu protection scope. By default, the iommu device is
+ * NULL, that indicates using vendor defined isolation.
+ *
+ * @dev: the mediated device that iommu will isolate.
+ * @iommu_device: a pci device which represents the iommu for @dev.
+ *
+ * Return 0 for success, otherwise negative error value.
+ */
+int mdev_set_iommu_device(struct device *dev, struct device *iommu_device);
+
+struct device *mdev_get_iommu_device(struct device *dev);
+
/**
* struct mdev_parent_ops - Structure to be registered for each parent device to
* register the device to mdev module.
@@ -118,21 +132,20 @@ struct mdev_driver {
#define to_mdev_driver(drv) container_of(drv, struct mdev_driver, driver)
-extern void *mdev_get_drvdata(struct mdev_device *mdev);
-extern void mdev_set_drvdata(struct mdev_device *mdev, void *data);
-extern const guid_t *mdev_uuid(struct mdev_device *mdev);
+void *mdev_get_drvdata(struct mdev_device *mdev);
+void mdev_set_drvdata(struct mdev_device *mdev, void *data);
+const guid_t *mdev_uuid(struct mdev_device *mdev);
extern struct bus_type mdev_bus_type;
-extern int mdev_register_device(struct device *dev,
- const struct mdev_parent_ops *ops);
-extern void mdev_unregister_device(struct device *dev);
+int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops);
+void mdev_unregister_device(struct device *dev);
-extern int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
-extern void mdev_unregister_driver(struct mdev_driver *drv);
+int mdev_register_driver(struct mdev_driver *drv, struct module *owner);
+void mdev_unregister_driver(struct mdev_driver *drv);
-extern struct device *mdev_parent_dev(struct mdev_device *mdev);
-extern struct device *mdev_dev(struct mdev_device *mdev);
-extern struct mdev_device *mdev_from_dev(struct device *dev);
+struct device *mdev_parent_dev(struct mdev_device *mdev);
+struct device *mdev_dev(struct mdev_device *mdev);
+struct mdev_device *mdev_from_dev(struct device *dev);
#endif /* MDEV_H */
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 3e99ae3ed87f..9dc16d5705a1 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -39,7 +39,8 @@ struct mdio_device {
/* Bus address of the MDIO device (0-31) */
int addr;
int flags;
- struct gpio_desc *reset;
+ struct gpio_desc *reset_gpio;
+ struct reset_control *reset_ctrl;
unsigned int reset_assert_delay;
unsigned int reset_deassert_delay;
};
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index 03b6ba2a63f8..52aa4821093a 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -1,4 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2013-2016, Intel Corporation. All rights reserved.
+ */
#ifndef _LINUX_MEI_CL_BUS_H
#define _LINUX_MEI_CL_BUS_H
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 294d5d80e150..676d3900e1bd 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -96,13 +96,14 @@ struct memblock {
extern struct memblock memblock;
extern int memblock_debug;
-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
void memblock_discard(void);
#else
#define __init_memblock
#define __initdata_memblock
+static inline void memblock_discard(void) {}
#endif
#define memblock_dbg(fmt, ...) \
@@ -240,6 +241,47 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
+ unsigned long *out_spfn,
+ unsigned long *out_epfn);
+/**
+ * for_each_free_mem_range_in_zone - iterate through zone specific free
+ * memblock areas
+ * @i: u64 used as loop variable
+ * @zone: zone in which all of the memory blocks reside
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock in a specific
+ * zone. Available once memblock and an empty zone is initialized. The main
+ * assumption is that the zone start, end, and pgdat have been associated.
+ * This way we can use the zone to determine NUMA node, and if a given part
+ * of the memblock is valid for the zone.
+ */
+#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
+ for (i = 0, \
+ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
+ i != U64_MAX; \
+ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
+
+/**
+ * for_each_free_mem_range_in_zone_from - iterate through zone specific
+ * free memblock areas from a given point
+ * @i: u64 used as loop variable
+ * @zone: zone in which all of the memory blocks reside
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
+ *
+ * Walks over free (memory && !reserved) areas of memblock in a specific
+ * zone, continuing from current position. Available as soon as memblock is
+ * initialized.
+ */
+#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
+ for (; i != U64_MAX; \
+ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
+#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+
/**
* for_each_free_mem_range - iterate through free memblock areas
* @i: u64 used as loop variable
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 1f3d880b7ca1..30561a954ee0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -501,22 +501,6 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int zid, int nr_pages);
-unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
- int nid, unsigned int lru_mask);
-
-static inline
-unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
-{
- struct mem_cgroup_per_node *mz;
- unsigned long nr_pages = 0;
- int zid;
-
- mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for (zid = 0; zid < MAX_NR_ZONES; zid++)
- nr_pages += mz->lru_zone_size[zid][lru];
- return nr_pages;
-}
-
static inline
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
enum lru_list lru, int zone_idx)
@@ -566,7 +550,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page);
void __unlock_page_memcg(struct mem_cgroup *memcg);
void unlock_page_memcg(struct page *page);
-/* idx can be of type enum memcg_stat_item or node_stat_item */
+/*
+ * idx can be of type enum memcg_stat_item or node_stat_item.
+ * Keep in sync with memcg_exact_page_state().
+ */
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
int idx)
{
@@ -957,11 +944,6 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
return true;
}
-static inline unsigned long
-mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
-{
- return 0;
-}
static inline
unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
enum lru_list lru, int zone_idx)
@@ -969,13 +951,6 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return 0;
}
-static inline unsigned long
-mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
- int nid, unsigned int lru_mask)
-{
- return 0;
-}
-
static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
{
return 0;
@@ -1114,6 +1089,12 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
{
}
+static inline void __count_memcg_events(struct mem_cgroup *memcg,
+ enum vm_event_item idx,
+ unsigned long count)
+{
+}
+
static inline void count_memcg_page_event(struct page *page,
int idx)
{
diff --git a/include/linux/memory.h b/include/linux/memory.h
index a6ddefc60517..e1dc1bb2b787 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -113,7 +113,7 @@ extern int register_memory_isolate_notifier(struct notifier_block *nb);
extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
int hotplug_memory_register(int nid, struct mem_section *section);
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int unregister_memory_section(struct mem_section *);
+extern void unregister_memory_section(struct mem_section *);
#endif
extern int memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 8ade08c50d26..ae892eef8b82 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -54,6 +54,16 @@ enum {
};
/*
+ * Restrictions for the memory hotplug:
+ * flags: MHP_ flags
+ * altmap: alternative allocator for memmap array
+ */
+struct mhp_restrictions {
+ unsigned long flags;
+ struct vmem_altmap *altmap;
+};
+
+/*
* Zone resizing functions
*
* Note: any attempt to resize a zone should has pgdat_resize_lock()
@@ -87,7 +97,8 @@ extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
extern int online_pages(unsigned long, unsigned long, int);
extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
unsigned long *valid_start, unsigned long *valid_end);
-extern void __offline_isolated_pages(unsigned long, unsigned long);
+extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
+ unsigned long end_pfn);
typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
@@ -100,6 +111,8 @@ extern void __online_page_free(struct page *page);
extern int try_online_node(int nid);
+extern int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_restrictions *restrictions);
extern u64 max_mem_size;
extern bool memhp_auto_online;
@@ -111,26 +124,33 @@ static inline bool movable_node_is_enabled(void)
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-extern int arch_remove_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap);
-extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap);
+extern void arch_remove_memory(int nid, u64 start, u64 size,
+ struct vmem_altmap *altmap);
+extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
+ unsigned long nr_pages, struct vmem_altmap *altmap);
#endif /* CONFIG_MEMORY_HOTREMOVE */
+/*
+ * Do we want sysfs memblock files created. This will allow userspace to online
+ * and offline memory explicitly. Lack of this bit means that the caller has to
+ * call move_pfn_range_to_zone to finish the initialization.
+ */
+
+#define MHP_MEMBLOCK_API (1<<0)
+
/* reasonably generic interface to expand the physical pages */
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap, bool want_memblock);
+ struct mhp_restrictions *restrictions);
#ifndef CONFIG_ARCH_HAS_ADD_PAGES
static inline int add_pages(int nid, unsigned long start_pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap,
- bool want_memblock)
+ unsigned long nr_pages, struct mhp_restrictions *restrictions)
{
- return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
+ return __add_pages(nid, start_pfn, nr_pages, restrictions);
}
#else /* ARCH_HAS_ADD_PAGES */
int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
- struct vmem_altmap *altmap, bool want_memblock);
+ struct mhp_restrictions *restrictions);
#endif /* ARCH_HAS_ADD_PAGES */
#ifdef CONFIG_NUMA
@@ -331,8 +351,6 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource);
-extern int arch_add_memory(int nid, u64 start, u64 size,
- struct vmem_altmap *altmap, bool want_memblock);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
extern bool is_memblock_offlined(struct memory_block *mem);
diff --git a/include/linux/mfd/altera-sysmgr.h b/include/linux/mfd/altera-sysmgr.h
new file mode 100644
index 000000000000..b1ef11a83872
--- /dev/null
+++ b/include/linux/mfd/altera-sysmgr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2012 Linaro Ltd.
+ */
+
+#ifndef __LINUX_MFD_ALTERA_SYSMGR_H__
+#define __LINUX_MFD_ALTERA_SYSMGR_H__
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/firmware/intel/stratix10-smc.h>
+
+struct device_node;
+
+#ifdef CONFIG_MFD_ALTERA_SYSMGR
+struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property);
+#else
+static inline struct regmap *
+altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np,
+ const char *property)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+#endif
+
+#endif /* __LINUX_MFD_ALTERA_SYSMGR_H__ */
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 8f2a8918bfa3..cfa78bb4990f 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -23,7 +23,10 @@
#include <linux/mutex.h>
#define CROS_EC_DEV_NAME "cros_ec"
+#define CROS_EC_DEV_FP_NAME "cros_fp"
#define CROS_EC_DEV_PD_NAME "cros_pd"
+#define CROS_EC_DEV_TP_NAME "cros_tp"
+#define CROS_EC_DEV_ISH_NAME "cros_ish"
/*
* The EC is unresponsive for a time after a reboot command. Add a
@@ -120,6 +123,7 @@ struct cros_ec_command {
* @pkt_xfer: Send packet to EC and get response.
* @lock: One transaction at a time.
* @mkbp_event_supported: True if this EC supports the MKBP event protocol.
+ * @host_sleep_v1: True if this EC supports the sleep v1 command.
* @event_notifier: Interrupt event notifier for transport devices.
* @event_data: Raw payload transferred with the MKBP event.
* @event_size: Size in bytes of the event data.
@@ -153,6 +157,7 @@ struct cros_ec_device {
struct cros_ec_command *msg);
struct mutex lock;
bool mkbp_event_supported;
+ bool host_sleep_v1;
struct blocking_notifier_head event_notifier;
struct ec_response_get_next_event_v1 event_data;
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index fc91082d4c35..dcec96f01879 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -840,7 +840,7 @@ enum ec_feature_code {
* (Common Smart Battery System Interface Specification)
*/
EC_FEATURE_SMART_BATTERY = 18,
- /* EC can dectect when the host hangs. */
+ /* EC can detect when the host hangs. */
EC_FEATURE_HANG_DETECT = 19,
/* Report power information, for pit only */
EC_FEATURE_PMU = 20,
@@ -852,10 +852,42 @@ enum ec_feature_code {
EC_FEATURE_USB_MUX = 23,
/* Motion Sensor code has an internal software FIFO */
EC_FEATURE_MOTION_SENSE_FIFO = 24,
+ /* Support temporary secure vstore */
+ EC_FEATURE_VSTORE = 25,
+ /* EC decides on USB-C SS mux state, muxes configured by host */
+ EC_FEATURE_USBC_SS_MUX_VIRTUAL = 26,
/* EC has RTC feature that can be controlled by host commands */
EC_FEATURE_RTC = 27,
+ /* The MCU exposes a Fingerprint sensor */
+ EC_FEATURE_FINGERPRINT = 28,
+ /* The MCU exposes a Touchpad */
+ EC_FEATURE_TOUCHPAD = 29,
+ /* The MCU has RWSIG task enabled */
+ EC_FEATURE_RWSIG = 30,
+ /* EC has device events support */
+ EC_FEATURE_DEVICE_EVENT = 31,
+ /* EC supports the unified wake masks for LPC/eSPI systems */
+ EC_FEATURE_UNIFIED_WAKE_MASKS = 32,
+ /* EC supports 64-bit host events */
+ EC_FEATURE_HOST_EVENT64 = 33,
+ /* EC runs code in RAM (not in place, a.k.a. XIP) */
+ EC_FEATURE_EXEC_IN_RAM = 34,
/* EC supports CEC commands */
EC_FEATURE_CEC = 35,
+ /* EC supports tight sensor timestamping. */
+ EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS = 36,
+ /*
+ * EC supports tablet mode detection aligned to Chrome and allows
+ * setting of threshold by host command using
+ * MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE.
+ */
+ EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS = 37,
+ /* EC supports audio codec. */
+ EC_FEATURE_AUDIO_CODEC = 38,
+ /* EC Supports SCP. */
+ EC_FEATURE_SCP = 39,
+ /* The MCU is an Integrated Sensor Hub */
+ EC_FEATURE_ISH = 40,
};
#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
@@ -2729,6 +2761,63 @@ struct ec_params_host_sleep_event {
uint8_t sleep_event;
} __packed;
+/*
+ * Use a default timeout value (CONFIG_SLEEP_TIMEOUT_MS) for detecting sleep
+ * transition failures
+ */
+#define EC_HOST_SLEEP_TIMEOUT_DEFAULT 0
+
+/* Disable timeout detection for this sleep transition */
+#define EC_HOST_SLEEP_TIMEOUT_INFINITE 0xFFFF
+
+struct ec_params_host_sleep_event_v1 {
+ /* The type of sleep being entered or exited. */
+ uint8_t sleep_event;
+
+ /* Padding */
+ uint8_t reserved;
+ union {
+ /* Parameters that apply for suspend messages. */
+ struct {
+ /*
+ * The timeout in milliseconds between when this message
+ * is received and when the EC will declare sleep
+ * transition failure if the sleep signal is not
+ * asserted.
+ */
+ uint16_t sleep_timeout_ms;
+ } suspend_params;
+
+ /* No parameters for non-suspend messages. */
+ };
+} __packed;
+
+/* A timeout occurred when this bit is set */
+#define EC_HOST_RESUME_SLEEP_TIMEOUT 0x80000000
+
+/*
+ * The mask defining which bits correspond to the number of sleep transitions,
+ * as well as the maximum number of suspend line transitions that will be
+ * reported back to the host.
+ */
+#define EC_HOST_RESUME_SLEEP_TRANSITIONS_MASK 0x7FFFFFFF
+
+struct ec_response_host_sleep_event_v1 {
+ union {
+ /* Response fields that apply for resume messages. */
+ struct {
+ /*
+ * The number of sleep power signal transitions that
+ * occurred since the suspend message. The high bit
+ * indicates a timeout occurred.
+ */
+ uint32_t sleep_transitions;
+ } resume_response;
+
+ /* No response fields for non-resume messages. */
+ };
+} __packed;
+
/*****************************************************************************/
/* Smart battery pass-through */
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index 71b09154e2db..5cd06ab26352 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Definitions for DA9063 MFD driver
*
@@ -5,12 +6,6 @@
*
* Author: Michal Hajduk, Dialog Semiconductor
* Author: Krystian Garbaciak, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __MFD_DA9063_CORE_H__
diff --git a/include/linux/mfd/da9063/registers.h b/include/linux/mfd/da9063/registers.h
index 5d42859cb441..ba706b0e28c2 100644
--- a/include/linux/mfd/da9063/registers.h
+++ b/include/linux/mfd/da9063/registers.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Registers definition for DA9063 modules
*
@@ -5,12 +6,6 @@
*
* Author: Michal Hajduk, Dialog Semiconductor
* Author: Krystian Garbaciak, Dialog Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef _DA9063_REG_H
@@ -215,9 +210,9 @@
/* DA9063 Configuration registers */
/* OTP */
-#define DA9063_REG_OPT_COUNT 0x101
-#define DA9063_REG_OPT_ADDR 0x102
-#define DA9063_REG_OPT_DATA 0x103
+#define DA9063_REG_OTP_CONT 0x101
+#define DA9063_REG_OTP_ADDR 0x102
+#define DA9063_REG_OTP_DATA 0x103
/* Customer Trim and Configuration */
#define DA9063_REG_T_OFFSET 0x104
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
index ad2a9a852aea..82407fe85ca2 100644
--- a/include/linux/mfd/max77620.h
+++ b/include/linux/mfd/max77620.h
@@ -136,8 +136,8 @@
#define MAX77620_FPS_PERIOD_MIN_US 40
#define MAX20024_FPS_PERIOD_MIN_US 20
-#define MAX77620_FPS_PERIOD_MAX_US 2560
-#define MAX20024_FPS_PERIOD_MAX_US 5120
+#define MAX20024_FPS_PERIOD_MAX_US 2560
+#define MAX77620_FPS_PERIOD_MAX_US 5120
#define MAX77620_REG_FPS_GPIO1 0x54
#define MAX77620_REG_FPS_GPIO2 0x55
@@ -324,6 +324,7 @@ enum max77620_fps_src {
enum max77620_chip_id {
MAX77620,
MAX20024,
+ MAX77663,
};
struct max77620_chip {
diff --git a/include/linux/mfd/max77650.h b/include/linux/mfd/max77650.h
new file mode 100644
index 000000000000..c809e211a8cd
--- /dev/null
+++ b/include/linux/mfd/max77650.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 BayLibre SAS
+ * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ *
+ * Common definitions for MAXIM 77650/77651 charger/power-supply.
+ */
+
+#ifndef MAX77650_H
+#define MAX77650_H
+
+#include <linux/bits.h>
+
+#define MAX77650_REG_INT_GLBL 0x00
+#define MAX77650_REG_INT_CHG 0x01
+#define MAX77650_REG_STAT_CHG_A 0x02
+#define MAX77650_REG_STAT_CHG_B 0x03
+#define MAX77650_REG_ERCFLAG 0x04
+#define MAX77650_REG_STAT_GLBL 0x05
+#define MAX77650_REG_INTM_GLBL 0x06
+#define MAX77650_REG_INTM_CHG 0x07
+#define MAX77650_REG_CNFG_GLBL 0x10
+#define MAX77650_REG_CID 0x11
+#define MAX77650_REG_CNFG_GPIO 0x12
+#define MAX77650_REG_CNFG_CHG_A 0x18
+#define MAX77650_REG_CNFG_CHG_B 0x19
+#define MAX77650_REG_CNFG_CHG_C 0x1a
+#define MAX77650_REG_CNFG_CHG_D 0x1b
+#define MAX77650_REG_CNFG_CHG_E 0x1c
+#define MAX77650_REG_CNFG_CHG_F 0x1d
+#define MAX77650_REG_CNFG_CHG_G 0x1e
+#define MAX77650_REG_CNFG_CHG_H 0x1f
+#define MAX77650_REG_CNFG_CHG_I 0x20
+#define MAX77650_REG_CNFG_SBB_TOP 0x28
+#define MAX77650_REG_CNFG_SBB0_A 0x29
+#define MAX77650_REG_CNFG_SBB0_B 0x2a
+#define MAX77650_REG_CNFG_SBB1_A 0x2b
+#define MAX77650_REG_CNFG_SBB1_B 0x2c
+#define MAX77650_REG_CNFG_SBB2_A 0x2d
+#define MAX77650_REG_CNFG_SBB2_B 0x2e
+#define MAX77650_REG_CNFG_LDO_A 0x38
+#define MAX77650_REG_CNFG_LDO_B 0x39
+#define MAX77650_REG_CNFG_LED0_A 0x40
+#define MAX77650_REG_CNFG_LED1_A 0x41
+#define MAX77650_REG_CNFG_LED2_A 0x42
+#define MAX77650_REG_CNFG_LED0_B 0x43
+#define MAX77650_REG_CNFG_LED1_B 0x44
+#define MAX77650_REG_CNFG_LED2_B 0x45
+#define MAX77650_REG_CNFG_LED_TOP 0x46
+
+#define MAX77650_CID_MASK GENMASK(3, 0)
+#define MAX77650_CID_BITS(_reg) (_reg & MAX77650_CID_MASK)
+
+#define MAX77650_CID_77650A 0x03
+#define MAX77650_CID_77650C 0x0a
+#define MAX77650_CID_77651A 0x06
+#define MAX77650_CID_77651B 0x08
+
+#endif /* MAX77650_H */
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 75e5c8ff85fc..c34d5f0d34d7 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -553,7 +553,6 @@ struct palmas_pmic {
struct palmas *palmas;
struct device *dev;
struct regulator_desc desc[PALMAS_NUM_REGS];
- struct regulator_dev *rdev[PALMAS_NUM_REGS];
struct mutex mutex;
int smps123;
diff --git a/include/linux/mfd/stmfx.h b/include/linux/mfd/stmfx.h
new file mode 100644
index 000000000000..d890595b89b6
--- /dev/null
+++ b/include/linux/mfd/stmfx.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 STMicroelectronics
+ * Author(s): Amelie Delaunay <amelie.delaunay@st.com>.
+ */
+
+#ifndef MFD_STMFX_H
+#define MFX_STMFX_H
+
+#include <linux/regmap.h>
+
+/* General */
+#define STMFX_REG_CHIP_ID 0x00 /* R */
+#define STMFX_REG_FW_VERSION_MSB 0x01 /* R */
+#define STMFX_REG_FW_VERSION_LSB 0x02 /* R */
+#define STMFX_REG_SYS_CTRL 0x40 /* RW */
+/* IRQ output management */
+#define STMFX_REG_IRQ_OUT_PIN 0x41 /* RW */
+#define STMFX_REG_IRQ_SRC_EN 0x42 /* RW */
+#define STMFX_REG_IRQ_PENDING 0x08 /* R */
+#define STMFX_REG_IRQ_ACK 0x44 /* RW */
+/* GPIO management */
+#define STMFX_REG_IRQ_GPI_PENDING1 0x0C /* R */
+#define STMFX_REG_IRQ_GPI_PENDING2 0x0D /* R */
+#define STMFX_REG_IRQ_GPI_PENDING3 0x0E /* R */
+#define STMFX_REG_GPIO_STATE1 0x10 /* R */
+#define STMFX_REG_GPIO_STATE2 0x11 /* R */
+#define STMFX_REG_GPIO_STATE3 0x12 /* R */
+#define STMFX_REG_IRQ_GPI_SRC1 0x48 /* RW */
+#define STMFX_REG_IRQ_GPI_SRC2 0x49 /* RW */
+#define STMFX_REG_IRQ_GPI_SRC3 0x4A /* RW */
+#define STMFX_REG_IRQ_GPI_EVT1 0x4C /* RW */
+#define STMFX_REG_IRQ_GPI_EVT2 0x4D /* RW */
+#define STMFX_REG_IRQ_GPI_EVT3 0x4E /* RW */
+#define STMFX_REG_IRQ_GPI_TYPE1 0x50 /* RW */
+#define STMFX_REG_IRQ_GPI_TYPE2 0x51 /* RW */
+#define STMFX_REG_IRQ_GPI_TYPE3 0x52 /* RW */
+#define STMFX_REG_IRQ_GPI_ACK1 0x54 /* RW */
+#define STMFX_REG_IRQ_GPI_ACK2 0x55 /* RW */
+#define STMFX_REG_IRQ_GPI_ACK3 0x56 /* RW */
+#define STMFX_REG_GPIO_DIR1 0x60 /* RW */
+#define STMFX_REG_GPIO_DIR2 0x61 /* RW */
+#define STMFX_REG_GPIO_DIR3 0x62 /* RW */
+#define STMFX_REG_GPIO_TYPE1 0x64 /* RW */
+#define STMFX_REG_GPIO_TYPE2 0x65 /* RW */
+#define STMFX_REG_GPIO_TYPE3 0x66 /* RW */
+#define STMFX_REG_GPIO_PUPD1 0x68 /* RW */
+#define STMFX_REG_GPIO_PUPD2 0x69 /* RW */
+#define STMFX_REG_GPIO_PUPD3 0x6A /* RW */
+#define STMFX_REG_GPO_SET1 0x6C /* RW */
+#define STMFX_REG_GPO_SET2 0x6D /* RW */
+#define STMFX_REG_GPO_SET3 0x6E /* RW */
+#define STMFX_REG_GPO_CLR1 0x70 /* RW */
+#define STMFX_REG_GPO_CLR2 0x71 /* RW */
+#define STMFX_REG_GPO_CLR3 0x72 /* RW */
+
+#define STMFX_REG_MAX 0xB0
+
+/* MFX boot time is around 10ms, so after reset, we have to wait this delay */
+#define STMFX_BOOT_TIME_MS 10
+
+/* STMFX_REG_CHIP_ID bitfields */
+#define STMFX_REG_CHIP_ID_MASK GENMASK(7, 0)
+
+/* STMFX_REG_SYS_CTRL bitfields */
+#define STMFX_REG_SYS_CTRL_GPIO_EN BIT(0)
+#define STMFX_REG_SYS_CTRL_TS_EN BIT(1)
+#define STMFX_REG_SYS_CTRL_IDD_EN BIT(2)
+#define STMFX_REG_SYS_CTRL_ALTGPIO_EN BIT(3)
+#define STMFX_REG_SYS_CTRL_SWRST BIT(7)
+
+/* STMFX_REG_IRQ_OUT_PIN bitfields */
+#define STMFX_REG_IRQ_OUT_PIN_TYPE BIT(0) /* 0-OD 1-PP */
+#define STMFX_REG_IRQ_OUT_PIN_POL BIT(1) /* 0-active LOW 1-active HIGH */
+
+/* STMFX_REG_IRQ_(SRC_EN/PENDING/ACK) bit shift */
+enum stmfx_irqs {
+ STMFX_REG_IRQ_SRC_EN_GPIO = 0,
+ STMFX_REG_IRQ_SRC_EN_IDD,
+ STMFX_REG_IRQ_SRC_EN_ERROR,
+ STMFX_REG_IRQ_SRC_EN_TS_DET,
+ STMFX_REG_IRQ_SRC_EN_TS_NE,
+ STMFX_REG_IRQ_SRC_EN_TS_TH,
+ STMFX_REG_IRQ_SRC_EN_TS_FULL,
+ STMFX_REG_IRQ_SRC_EN_TS_OVF,
+ STMFX_REG_IRQ_SRC_MAX,
+};
+
+enum stmfx_functions {
+ STMFX_FUNC_GPIO = BIT(0), /* GPIO[15:0] */
+ STMFX_FUNC_ALTGPIO_LOW = BIT(1), /* aGPIO[3:0] */
+ STMFX_FUNC_ALTGPIO_HIGH = BIT(2), /* aGPIO[7:4] */
+ STMFX_FUNC_TS = BIT(3),
+ STMFX_FUNC_IDD = BIT(4),
+};
+
+/**
+ * struct stmfx_ddata - STMFX MFD structure
+ * @device: device reference used for logs
+ * @map: register map
+ * @vdd: STMFX power supply
+ * @irq_domain: IRQ domain
+ * @lock: IRQ bus lock
+ * @irq_src: cache of IRQ_SRC_EN register for bus_lock
+ * @bkp_sysctrl: backup of SYS_CTRL register for suspend/resume
+ * @bkp_irqoutpin: backup of IRQ_OUT_PIN register for suspend/resume
+ */
+struct stmfx {
+ struct device *dev;
+ struct regmap *map;
+ struct regulator *vdd;
+ struct irq_domain *irq_domain;
+ struct mutex lock; /* IRQ bus lock */
+ u8 irq_src;
+#ifdef CONFIG_PM
+ u8 bkp_sysctrl;
+ u8 bkp_irqoutpin;
+#endif
+};
+
+int stmfx_function_enable(struct stmfx *stmfx, u32 func);
+int stmfx_function_disable(struct stmfx *stmfx, u32 func);
+#endif
diff --git a/include/linux/mfd/syscon/atmel-matrix.h b/include/linux/mfd/syscon/atmel-matrix.h
index 8293c3e2a82a..f61cd127a852 100644
--- a/include/linux/mfd/syscon/atmel-matrix.h
+++ b/include/linux/mfd/syscon/atmel-matrix.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2014 Atmel Corporation.
*
* Memory Controllers (MATRIX, EBI) - System peripherals registers.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_MATRIX_H
diff --git a/include/linux/mfd/syscon/atmel-mc.h b/include/linux/mfd/syscon/atmel-mc.h
index afd9b8f1e363..99c56205c410 100644
--- a/include/linux/mfd/syscon/atmel-mc.h
+++ b/include/linux/mfd/syscon/atmel-mc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2005 Ivan Kokshaysky
* Copyright (C) SAN People
@@ -5,11 +6,6 @@
* Memory Controllers (MC, EBI, SMC, SDRAMC, BFC) - System peripherals
* registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_MC_H_
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
index 7a367f34b66a..e9e24f4c4578 100644
--- a/include/linux/mfd/syscon/atmel-smc.h
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Atmel SMC (Static Memory Controller) register offsets and bit definitions.
*
@@ -5,10 +6,6 @@
* Copyright (C) 2014 Free Electrons
*
* Author: Boris Brezillon <boris.brezillon@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_
diff --git a/include/linux/mfd/syscon/atmel-st.h b/include/linux/mfd/syscon/atmel-st.h
index 8acf1ec1fa32..5b6013d0c440 100644
--- a/include/linux/mfd/syscon/atmel-st.h
+++ b/include/linux/mfd/syscon/atmel-st.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2005 Ivan Kokshaysky
* Copyright (C) SAN People
*
* System Timer (ST) - System peripherals registers.
* Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef _LINUX_MFD_SYSCON_ATMEL_ST_H
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index c1b25f5e386d..f232c8130d00 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -410,6 +410,15 @@
#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17)
#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13)
+#define IMX6SX_GPR2_MQS_OVERSAMPLE_MASK (0x1 << 26)
+#define IMX6SX_GPR2_MQS_OVERSAMPLE_SHIFT (26)
+#define IMX6SX_GPR2_MQS_EN_MASK (0x1 << 25)
+#define IMX6SX_GPR2_MQS_EN_SHIFT (25)
+#define IMX6SX_GPR2_MQS_SW_RST_MASK (0x1 << 24)
+#define IMX6SX_GPR2_MQS_SW_RST_SHIFT (24)
+#define IMX6SX_GPR2_MQS_CLK_DIV_MASK (0xFF << 16)
+#define IMX6SX_GPR2_MQS_CLK_DIV_SHIFT (16)
+
#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3)
#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4)
diff --git a/include/linux/mfd/ti-lmu-register.h b/include/linux/mfd/ti-lmu-register.h
index 2125c7c02818..f09510561a55 100644
--- a/include/linux/mfd/ti-lmu-register.h
+++ b/include/linux/mfd/ti-lmu-register.h
@@ -15,50 +15,6 @@
#include <linux/bitops.h>
-/* LM3532 */
-#define LM3532_REG_OUTPUT_CFG 0x10
-#define LM3532_ILED1_CFG_MASK 0x03
-#define LM3532_ILED2_CFG_MASK 0x0C
-#define LM3532_ILED3_CFG_MASK 0x30
-#define LM3532_ILED1_CFG_SHIFT 0
-#define LM3532_ILED2_CFG_SHIFT 2
-#define LM3532_ILED3_CFG_SHIFT 4
-
-#define LM3532_REG_RAMPUP 0x12
-#define LM3532_REG_RAMPDN LM3532_REG_RAMPUP
-#define LM3532_RAMPUP_MASK 0x07
-#define LM3532_RAMPUP_SHIFT 0
-#define LM3532_RAMPDN_MASK 0x38
-#define LM3532_RAMPDN_SHIFT 3
-
-#define LM3532_REG_ENABLE 0x1D
-
-#define LM3532_REG_PWM_A_CFG 0x13
-#define LM3532_PWM_A_MASK 0x05 /* zone 0 */
-#define LM3532_PWM_ZONE_0 BIT(2)
-
-#define LM3532_REG_PWM_B_CFG 0x14
-#define LM3532_PWM_B_MASK 0x09 /* zone 1 */
-#define LM3532_PWM_ZONE_1 BIT(3)
-
-#define LM3532_REG_PWM_C_CFG 0x15
-#define LM3532_PWM_C_MASK 0x11 /* zone 2 */
-#define LM3532_PWM_ZONE_2 BIT(4)
-
-#define LM3532_REG_ZONE_CFG_A 0x16
-#define LM3532_REG_ZONE_CFG_B 0x18
-#define LM3532_REG_ZONE_CFG_C 0x1A
-#define LM3532_ZONE_MASK (BIT(2) | BIT(3) | BIT(4))
-#define LM3532_ZONE_0 0
-#define LM3532_ZONE_1 BIT(2)
-#define LM3532_ZONE_2 BIT(3)
-
-#define LM3532_REG_BRT_A 0x70 /* zone 0 */
-#define LM3532_REG_BRT_B 0x76 /* zone 1 */
-#define LM3532_REG_BRT_C 0x7C /* zone 2 */
-
-#define LM3532_MAX_REG 0x7E
-
/* LM3631 */
#define LM3631_REG_DEVCTRL 0x00
#define LM3631_LCD_EN_MASK BIT(1)
diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h
index 1ef51ed36be5..7762c1bce55d 100644
--- a/include/linux/mfd/ti-lmu.h
+++ b/include/linux/mfd/ti-lmu.h
@@ -22,7 +22,6 @@
#define LMU_EVENT_MONITOR_DONE 0x01
enum ti_lmu_id {
- LM3532,
LM3631,
LM3632,
LM3633,
diff --git a/include/linux/mfd/wm831x/regulator.h b/include/linux/mfd/wm831x/regulator.h
index 955d30fc6a27..30c587a0624c 100644
--- a/include/linux/mfd/wm831x/regulator.h
+++ b/include/linux/mfd/wm831x/regulator.h
@@ -1213,6 +1213,6 @@
#define WM831X_LDO1_OK_WIDTH 1 /* LDO1_OK */
#define WM831X_ISINK_MAX_ISEL 55
-extern int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
+extern const unsigned int wm831x_isinkv_values[WM831X_ISINK_MAX_ISEL + 1];
#endif
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 4ee908f5b834..43d0d307e2e3 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -923,12 +923,4 @@ struct wm8400 {
#define WM8400_LINE_CMP_VTHD_SHIFT 0 /* LINE_CMP_VTHD - [3:0] */
#define WM8400_LINE_CMP_VTHD_WIDTH 4 /* LINE_CMP_VTHD - [3:0] */
-int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
-
-static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
- u16 mask, u16 val)
-{
- return regmap_update_bits(wm8400->regmap, reg, mask, val);
-}
-
#endif
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 6fee8b1a4400..5cd824c1c0ca 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
advertising))
lcl_adv |= ADVERTISE_PAUSE_CAP;
- if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
advertising))
lcl_adv |= ADVERTISE_PAUSE_ASYM;
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
index 612c8c2f2466..769326ea1d9b 100644
--- a/include/linux/mlx5/cq.h
+++ b/include/linux/mlx5/cq.h
@@ -170,7 +170,7 @@ static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
doorbell[1] = cpu_to_be32(cq->cqn);
- mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
+ mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL);
}
static inline void mlx5_cq_hold(struct mlx5_core_cq *cq)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index f93a5598b942..fc2b6e807f06 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -361,6 +361,7 @@ enum {
enum {
MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
+ MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
};
enum {
@@ -1001,7 +1002,8 @@ enum {
MLX5_MATCH_OUTER_HEADERS = 1 << 0,
MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
MLX5_MATCH_INNER_HEADERS = 1 << 2,
-
+ MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
+ MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
};
enum {
@@ -1045,6 +1047,7 @@ enum mlx5_mpls_supported_fields {
};
enum mlx5_flex_parser_protos {
+ MLX5_FLEX_PROTO_GENEVE = 1 << 3,
MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
};
@@ -1166,6 +1169,12 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
index 0787de28f2fc..5c267707e1df 100644
--- a/include/linux/mlx5/doorbell.h
+++ b/include/linux/mlx5/doorbell.h
@@ -36,46 +36,25 @@
#define MLX5_BF_OFFSET 0x800
#define MLX5_CQ_DOORBELL 0x20
-#if BITS_PER_LONG == 64
/* Assume that we can just write a 64-bit doorbell atomically. s390
* actually doesn't have writeq() but S/390 systems don't even have
* PCI so we won't worry about it.
+ *
+ * Note that the write is not atomic on 32-bit systems! In contrast to 64-bit
+ * ones, it requires proper locking. mlx5_write64 doesn't do any locking, so use
+ * it at your own discretion, protected by some kind of lock on 32 bits.
+ *
+ * TODO: use write{q,l}_relaxed()
*/
-#define MLX5_DECLARE_DOORBELL_LOCK(name)
-#define MLX5_INIT_DOORBELL_LOCK(ptr) do { } while (0)
-#define MLX5_GET_DOORBELL_LOCK(ptr) (NULL)
-
-static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
- spinlock_t *doorbell_lock)
+static inline void mlx5_write64(__be32 val[2], void __iomem *dest)
{
+#if BITS_PER_LONG == 64
__raw_writeq(*(u64 *)val, dest);
-}
-
#else
-
-/* Just fall back to a spinlock to protect the doorbell if
- * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
- * MMIO writes.
- */
-
-#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
-#define MLX5_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr)
-#define MLX5_GET_DOORBELL_LOCK(ptr) (ptr)
-
-static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
- spinlock_t *doorbell_lock)
-{
- unsigned long flags;
-
- if (doorbell_lock)
- spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest);
__raw_writel((__force u32) val[1], dest + 4);
- if (doorbell_lock)
- spin_unlock_irqrestore(doorbell_lock, flags);
-}
-
#endif
+}
#endif /* MLX5_DOORBELL_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 022541dc5dbf..5a27246db883 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -56,7 +56,6 @@
enum {
MLX5_BOARD_ID_LEN = 64,
- MLX5_MAX_NAME_LEN = 16,
};
enum {
@@ -133,6 +132,7 @@ enum {
MLX5_REG_MTRC_CONF = 0x9041,
MLX5_REG_MTRC_STDB = 0x9042,
MLX5_REG_MTRC_CTRL = 0x9043,
+ MLX5_REG_MPEIN = 0x9050,
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
@@ -512,8 +512,13 @@ struct mlx5_rl_table {
struct mlx5_rl_entry *rl_entry;
};
+struct mlx5_core_roce {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *allow_rule;
+};
+
struct mlx5_priv {
- char name[MLX5_MAX_NAME_LEN];
struct mlx5_eq_table *eq_table;
/* pages stuff */
@@ -566,6 +571,7 @@ struct mlx5_priv {
struct mlx5_lag *lag;
struct mlx5_devcom *devcom;
unsigned long pci_dev_data;
+ struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
@@ -594,6 +600,8 @@ enum mlx5_pagefault_type_flags {
};
struct mlx5_td {
+ /* protects tirs list changes while tirs refresh */
+ struct mutex list_lock;
struct list_head tirs_list;
u32 tdn;
};
@@ -640,6 +648,7 @@ struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_core_dev {
+ struct device *device;
struct pci_dev *pdev;
/* sync pci state */
struct mutex pci_status_mutex;
@@ -660,6 +669,7 @@ struct mlx5_core_dev {
u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
+ phys_addr_t bar_addr;
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
@@ -679,7 +689,6 @@ struct mlx5_core_dev {
#endif
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
- struct page *clock_info_page;
struct mlx5_fw_tracer *tracer;
};
@@ -885,6 +894,7 @@ void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
+void mlx5_health_flush(struct mlx5_core_dev *dev);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 96d8435421de..0ca77dd1429c 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -35,7 +35,7 @@ struct mlx5_eswitch_rep_if {
void (*unload)(struct mlx5_eswitch_rep *rep);
void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep);
void *priv;
- u8 state;
+ atomic_t state;
};
struct mlx5_eswitch_rep {
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 9df51da04621..e690ba0f965c 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -73,6 +73,13 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS,
+ MLX5_FLOW_NAMESPACE_RDMA_RX,
+};
+
+enum {
+ FDB_BYPASS_PATH,
+ FDB_FAST_PATH,
+ FDB_SLOW_PATH,
};
struct mlx5_flow_table;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 3b83288749c6..82612741b29e 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -81,6 +81,19 @@ enum {
};
enum {
+ MLX5_OBJ_TYPE_SW_ICM = 0x0008,
+};
+
+enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
+ MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
+};
+
+enum {
+ MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b,
+};
+
+enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -299,7 +312,11 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_gre_protocol[0x1];
u8 outer_gre_key[0x1];
u8 outer_vxlan_vni[0x1];
- u8 reserved_at_1a[0x5];
+ u8 outer_geneve_vni[0x1];
+ u8 outer_geneve_oam[0x1];
+ u8 outer_geneve_protocol_type[0x1];
+ u8 outer_geneve_opt_len[0x1];
+ u8 reserved_at_1e[0x1];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
@@ -327,7 +344,8 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_tcp_flags[0x1];
u8 reserved_at_37[0x9];
- u8 reserved_at_40[0x5];
+ u8 geneve_tlv_option_0_data[0x1];
+ u8 reserved_at_41[0x4];
u8 outer_first_mpls_over_udp[0x4];
u8 outer_first_mpls_over_gre[0x4];
u8 inner_first_mpls[0x4];
@@ -357,11 +375,14 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 pop_vlan_2[0x1];
u8 push_vlan_2[0x1];
u8 reformat_and_vlan_action[0x1];
- u8 reserved_at_10[0x2];
+ u8 reserved_at_10[0x1];
+ u8 sw_owner[0x1];
u8 reformat_l3_tunnel_to_l2[0x1];
u8 reformat_l2_to_l3_tunnel[0x1];
u8 reformat_and_modify_action[0x1];
- u8 reserved_at_15[0xb];
+ u8 reserved_at_15[0x2];
+ u8 table_miss_action_domain[0x1];
+ u8 reserved_at_18[0x8];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
@@ -469,7 +490,9 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 vxlan_vni[0x18];
u8 reserved_at_b8[0x8];
- u8 reserved_at_c0[0x20];
+ u8 geneve_vni[0x18];
+ u8 reserved_at_d8[0x7];
+ u8 geneve_oam[0x1];
u8 reserved_at_e0[0xc];
u8 outer_ipv6_flow_label[0x14];
@@ -477,7 +500,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
- u8 reserved_at_120[0x28];
+ u8 reserved_at_120[0xa];
+ u8 geneve_opt_len[0x6];
+ u8 geneve_protocol_type[0x10];
+
+ u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
u8 reserved_at_160[0x20];
u8 outer_esp_spi[0x20];
@@ -507,6 +534,12 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
u8 reserved_at_1a0[0x60];
};
+struct mlx5_ifc_fte_match_set_misc3_bits {
+ u8 reserved_at_0[0x120];
+ u8 geneve_tlv_option_0_data[0x20];
+ u8 reserved_at_140[0xc0];
+};
+
struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20];
@@ -589,7 +622,7 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
- u8 reserved_at_400[0x200];
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_rdma;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
@@ -770,7 +803,19 @@ struct mlx5_ifc_device_mem_cap_bits {
u8 max_memic_size[0x20];
- u8 reserved_at_c0[0x740];
+ u8 steering_sw_icm_start_address[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 log_header_modify_sw_icm_size[0x8];
+ u8 reserved_at_110[0x2];
+ u8 log_sw_icm_alloc_granularity[0x6];
+ u8 log_steering_sw_icm_size[0x8];
+
+ u8 reserved_at_120[0x20];
+
+ u8 header_modify_sw_icm_start_address[0x40];
+
+ u8 reserved_at_180[0x680];
};
enum {
@@ -919,6 +964,7 @@ enum {
enum {
MLX5_UCTX_CAP_RAW_TX = 1UL << 0,
+ MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
};
struct mlx5_ifc_cmd_hca_cap_bits {
@@ -929,7 +975,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
- u8 reserved_at_90[0xb];
+ u8 reserved_at_90[0x8];
+ u8 prio_tag_required[0x1];
+ u8 reserved_at_99[0x2];
u8 log_max_qp[0x5];
u8 reserved_at_a0[0xb];
@@ -1211,7 +1259,11 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 num_of_uars_per_page[0x20];
u8 flex_parser_protocols[0x20];
- u8 reserved_at_560[0x20];
+
+ u8 max_geneve_tlv_options[0x8];
+ u8 reserved_at_568[0x3];
+ u8 max_geneve_tlv_option_data_len[0x5];
+ u8 reserved_at_570[0x10];
u8 reserved_at_580[0x3c];
u8 mini_cqe_resp_stride_index[0x1];
@@ -1247,7 +1299,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 uctx_cap[0x20];
- u8 reserved_at_6c0[0x140];
+ u8 reserved_at_6c0[0x4];
+ u8 flex_parser_id_geneve_tlv_option_0[0x4];
+ u8 reserved_at_6c8[0x138];
};
enum mlx5_flow_destination_type {
@@ -1260,6 +1314,12 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101,
};
+enum mlx5_flow_table_miss_action {
+ MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ MLX5_FLOW_TABLE_MISS_ACTION_FWD,
+ MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
+};
+
struct mlx5_ifc_dest_format_struct_bits {
u8 destination_type[0x8];
u8 destination_id[0x18];
@@ -1299,7 +1359,9 @@ struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
- u8 reserved_at_800[0x800];
+ struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
+
+ u8 reserved_at_a00[0x600];
};
enum {
@@ -2920,6 +2982,7 @@ enum {
MLX5_MKC_ACCESS_MODE_MTT = 0x1,
MLX5_MKC_ACCESS_MODE_KLMS = 0x2,
MLX5_MKC_ACCESS_MODE_KSM = 0x3,
+ MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4,
MLX5_MKC_ACCESS_MODE_MEMIC = 0x5,
};
@@ -4807,6 +4870,7 @@ enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
+ MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_3 = 0x4,
};
struct mlx5_ifc_query_flow_group_out_bits {
@@ -5110,6 +5174,7 @@ enum {
MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14,
MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15,
MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
+ MLX5_ACTION_IN_FIELD_OUT_FIRST_VID = 0x17,
MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT = 0x47,
};
@@ -6874,14 +6939,14 @@ struct mlx5_ifc_create_tis_in_bits {
struct mlx5_ifc_create_tir_out_bits {
u8 status[0x8];
- u8 reserved_at_8[0x18];
+ u8 icm_address_63_40[0x18];
u8 syndrome[0x20];
- u8 reserved_at_40[0x8];
+ u8 icm_address_39_32[0x8];
u8 tirn[0x18];
- u8 reserved_at_60[0x20];
+ u8 icm_address_31_0[0x20];
};
struct mlx5_ifc_create_tir_in_bits {
@@ -8026,6 +8091,52 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
+struct mlx5_ifc_mpein_reg_bits {
+ u8 reserved_at_0[0x2];
+ u8 depth[0x6];
+ u8 pcie_index[0x8];
+ u8 node[0x8];
+ u8 reserved_at_18[0x8];
+
+ u8 capability_mask[0x20];
+
+ u8 reserved_at_40[0x8];
+ u8 link_width_enabled[0x8];
+ u8 link_speed_enabled[0x10];
+
+ u8 lane0_physical_position[0x8];
+ u8 link_width_active[0x8];
+ u8 link_speed_active[0x10];
+
+ u8 num_of_pfs[0x10];
+ u8 num_of_vfs[0x10];
+
+ u8 bdf0[0x10];
+ u8 reserved_at_b0[0x10];
+
+ u8 max_read_request_size[0x4];
+ u8 max_payload_size[0x4];
+ u8 reserved_at_c8[0x5];
+ u8 pwr_status[0x3];
+ u8 port_type[0x4];
+ u8 reserved_at_d4[0xb];
+ u8 lane_reversal[0x1];
+
+ u8 reserved_at_e0[0x14];
+ u8 pci_power[0xc];
+
+ u8 reserved_at_100[0x20];
+
+ u8 device_status[0x10];
+ u8 port_state[0x8];
+ u8 reserved_at_138[0x8];
+
+ u8 reserved_at_140[0x10];
+ u8 receiver_detect_result[0x10];
+
+ u8 reserved_at_160[0x20];
+};
+
struct mlx5_ifc_mpcnt_reg_bits {
u8 reserved_at_0[0x8];
u8 pcie_index[0x8];
@@ -8345,7 +8456,9 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x74];
+ u8 reserved_at_0[0x6e];
+ u8 pci_status_and_power[0x1];
+ u8 reserved_at_6f[0x5];
u8 mark_tx_action_cnp[0x1];
u8 mark_tx_action_cqe[0x1];
u8 dynamic_tx_overflow[0x1];
@@ -8953,6 +9066,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_mpein_reg_bits mpein_reg;
struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
@@ -9442,6 +9556,33 @@ struct mlx5_ifc_uctx_bits {
u8 reserved_at_20[0x160];
};
+struct mlx5_ifc_sw_icm_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x18];
+ u8 log_sw_icm_size[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ u8 sw_icm_start_addr[0x40];
+
+ u8 reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_geneve_tlv_option_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x18];
+ u8 geneve_option_fte_index[0x8];
+
+ u8 option_class[0x10];
+ u8 option_type[0x8];
+ u8 reserved_at_78[0x3];
+ u8 option_data_length[0x5];
+
+ u8 reserved_at_80[0x180];
+};
+
struct mlx5_ifc_create_umem_in_bits {
u8 opcode[0x10];
u8 uid[0x10];
@@ -9479,6 +9620,16 @@ struct mlx5_ifc_destroy_uctx_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_create_sw_icm_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_sw_icm_bits sw_icm;
+};
+
+struct mlx5_ifc_create_geneve_tlv_option_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_geneve_tlv_option_bits geneve_tlv_opt;
+};
+
struct mlx5_ifc_mtrc_string_db_param_bits {
u8 string_db_base_address[0x20];
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 64e78394fc9c..de9a272c9f3d 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -60,6 +60,7 @@ enum mlx5_an_status {
#define MLX5_I2C_ADDR_LOW 0x50
#define MLX5_I2C_ADDR_HIGH 0x51
#define MLX5_EEPROM_PAGE_LENGTH 256
+#define MLX5_EEPROM_HIGH_PAGE_LENGTH 128
enum mlx5e_link_mode {
MLX5E_1000BASE_CX_SGMII = 0,
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index b26ea9077384..3ba4edbd17a6 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -395,6 +395,7 @@ struct mlx5_wqe_signature_seg {
struct mlx5_wqe_inline_seg {
__be32 byte_count;
+ __be32 data[0];
};
enum mlx5_sig_type {
@@ -557,7 +558,8 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev,
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *qp,
- u32 *in, int inlen);
+ u32 *in, int inlen,
+ u32 *out, int outlen);
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in,
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
index a261d5528ff7..dc6b1e7cb8c4 100644
--- a/include/linux/mlx5/transobj.h
+++ b/include/linux/mlx5/transobj.h
@@ -50,6 +50,9 @@ int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
int mlx5_core_query_sq_state(struct mlx5_core_dev *dev, u32 sqn, u8 *state);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
+int mlx5_core_create_tir_out(struct mlx5_core_dev *dev,
+ u32 *in, int inlen,
+ u32 *out, int outlen);
int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
int inlen);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 0eef548b9946..3d1c6cdbbba7 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -118,10 +118,6 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
int promisc_uc,
int promisc_mc,
int promisc_all);
-int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
- u16 vport,
- u16 vlans[],
- int *size);
int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
int list_size);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 76769749b5a5..912614fbbef3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -124,10 +124,45 @@ extern int mmap_rnd_compat_bits __read_mostly;
/*
* On some architectures it is expensive to call memset() for small sizes.
- * Those architectures should provide their own implementation of "struct page"
- * zeroing by defining this macro in <asm/pgtable.h>.
+ * If an architecture decides to implement their own version of
+ * mm_zero_struct_page they should wrap the defines below in a #ifndef and
+ * define their own version of this macro in <asm/pgtable.h>
*/
-#ifndef mm_zero_struct_page
+#if BITS_PER_LONG == 64
+/* This function must be updated when the size of struct page grows above 80
+ * or reduces below 56. The idea that compiler optimizes out switch()
+ * statement, and only leaves move/store instructions. Also the compiler can
+ * combine write statments if they are both assignments and can be reordered,
+ * this can result in several of the writes here being dropped.
+ */
+#define mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
+static inline void __mm_zero_struct_page(struct page *page)
+{
+ unsigned long *_pp = (void *)page;
+
+ /* Check that struct page is either 56, 64, 72, or 80 bytes */
+ BUILD_BUG_ON(sizeof(struct page) & 7);
+ BUILD_BUG_ON(sizeof(struct page) < 56);
+ BUILD_BUG_ON(sizeof(struct page) > 80);
+
+ switch (sizeof(struct page)) {
+ case 80:
+ _pp[9] = 0; /* fallthrough */
+ case 72:
+ _pp[8] = 0; /* fallthrough */
+ case 64:
+ _pp[7] = 0; /* fallthrough */
+ case 56:
+ _pp[6] = 0;
+ _pp[5] = 0;
+ _pp[4] = 0;
+ _pp[3] = 0;
+ _pp[2] = 0;
+ _pp[1] = 0;
+ _pp[0] = 0;
+ }
+}
+#else
#define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
#endif
@@ -966,6 +1001,10 @@ static inline bool is_pci_p2pdma_page(const struct page *page)
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
+/* 127: arbitrary random number, small enough to assemble well */
+#define page_ref_zero_or_close_to_overflow(page) \
+ ((unsigned int) page_ref_count(page) + 127u <= 127u)
+
static inline void get_page(struct page *page)
{
page = compound_head(page);
@@ -973,8 +1012,17 @@ static inline void get_page(struct page *page)
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_refcount.
*/
- VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
+ VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page);
+ page_ref_inc(page);
+}
+
+static inline __must_check bool try_get_page(struct page *page)
+{
+ page = compound_head(page);
+ if (WARN_ON_ONCE(page_ref_count(page) <= 0))
+ return false;
page_ref_inc(page);
+ return true;
}
static inline void put_page(struct page *page)
@@ -994,6 +1042,30 @@ static inline void put_page(struct page *page)
__put_page(page);
}
+/**
+ * put_user_page() - release a gup-pinned page
+ * @page: pointer to page to be released
+ *
+ * Pages that were pinned via get_user_pages*() must be released via
+ * either put_user_page(), or one of the put_user_pages*() routines
+ * below. This is so that eventually, pages that are pinned via
+ * get_user_pages*() can be separately tracked and uniquely handled. In
+ * particular, interactions with RDMA and filesystems need special
+ * handling.
+ *
+ * put_user_page() and put_page() are not interchangeable, despite this early
+ * implementation that makes them look the same. put_user_page() calls must
+ * be perfectly matched up with get_user_page() calls.
+ */
+static inline void put_user_page(struct page *page)
+{
+ put_page(page);
+}
+
+void put_user_pages_dirty(struct page **pages, unsigned long npages);
+void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
+void put_user_pages(struct page **pages, unsigned long npages);
+
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif
@@ -1492,21 +1564,8 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
-#if defined(CONFIG_FS_DAX) || defined(CONFIG_CMA)
-long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
- unsigned int gup_flags, struct page **pages,
- struct vm_area_struct **vmas);
-#else
-static inline long get_user_pages_longterm(unsigned long start,
- unsigned long nr_pages, unsigned int gup_flags,
- struct page **pages, struct vm_area_struct **vmas)
-{
- return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
-}
-#endif /* CONFIG_FS_DAX */
-
-int get_user_pages_fast(unsigned long start, int nr_pages, int write,
- struct page **pages);
+int get_user_pages_fast(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
/* Container for pinned pfns / pages */
struct frame_vector {
@@ -2520,6 +2579,10 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
+ unsigned long num);
+int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
+ unsigned long num);
vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
@@ -2570,6 +2633,34 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
#define FOLL_COW 0x4000 /* internal GUP flag */
#define FOLL_ANON 0x8000 /* don't do file mappings */
+#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
+
+/*
+ * NOTE on FOLL_LONGTERM:
+ *
+ * FOLL_LONGTERM indicates that the page will be held for an indefinite time
+ * period _often_ under userspace control. This is contrasted with
+ * iov_iter_get_pages() where usages which are transient.
+ *
+ * FIXME: For pages which are part of a filesystem, mappings are subject to the
+ * lifetime enforced by the filesystem and we need guarantees that longterm
+ * users like RDMA and V4L2 only establish mappings which coordinate usage with
+ * the filesystem. Ideas for this coordination include revoking the longterm
+ * pin, delaying writeback, bounce buffer page writeback, etc. As FS DAX was
+ * added after the problem with filesystems was found FS DAX VMAs are
+ * specifically failed. Filesystem pages are still subject to bugs and use of
+ * FOLL_LONGTERM should be avoided on those pages.
+ *
+ * FIXME: Also NOTE that FOLL_LONGTERM is not supported in every GUP call.
+ * Currently only get_user_pages() and get_user_pages_fast() support this flag
+ * and calls to get_user_pages_[un]locked are specifically not allowed. This
+ * is due to an incompatibility with the FS DAX check and
+ * FAULT_FLAG_ALLOW_RETRY
+ *
+ * In the CMA case: longterm pins in a CMA region would unnecessarily fragment
+ * that region. And so CMA attempts to migrate the page before pinning when
+ * FOLL_LONGTERM is specified.
+ */
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
@@ -2597,37 +2688,31 @@ static inline void kernel_poison_pages(struct page *page, int numpages,
int enable) { }
#endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
extern bool _debug_pagealloc_enabled;
-extern void __kernel_map_pages(struct page *page, int numpages, int enable);
static inline bool debug_pagealloc_enabled(void)
{
- return _debug_pagealloc_enabled;
+ return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) && _debug_pagealloc_enabled;
}
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
- if (!debug_pagealloc_enabled())
- return;
-
__kernel_map_pages(page, numpages, enable);
}
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */
-#else /* CONFIG_DEBUG_PAGEALLOC */
+#else /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
static inline void
kernel_map_pages(struct page *page, int numpages, int enable) {}
#ifdef CONFIG_HIBERNATION
static inline bool kernel_page_present(struct page *page) { return true; }
#endif /* CONFIG_HIBERNATION */
-static inline bool debug_pagealloc_enabled(void)
-{
- return false;
-}
-#endif /* CONFIG_DEBUG_PAGEALLOC */
+#endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_ARCH_HAS_SET_DIRECT_MAP */
#ifdef __HAVE_ARCH_GATE_AREA
extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 04ec454d44ce..6f2fef7b0784 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -29,7 +29,7 @@ static __always_inline void __update_lru_size(struct lruvec *lruvec,
{
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
- __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
+ __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
__mod_zone_page_state(&pgdat->node_zones[zid],
NR_ZONE_LRU_BASE + lru, nr_pages);
}
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7eade9132f02..e1f42a07d8f0 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -103,7 +103,7 @@ struct page {
};
struct { /* slab, slob and slub */
union {
- struct list_head slab_list; /* uses lru */
+ struct list_head slab_list;
struct { /* Partial pages */
struct page *next;
#ifdef CONFIG_64BIT
@@ -671,7 +671,7 @@ enum vm_fault_reason {
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
-#define VM_FAULT_GET_HINDEX(x) (((x) >> 16) & 0xf)
+#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \
VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 97ca105347a6..5685805533b5 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -111,6 +111,18 @@ struct sdio_driver {
extern int sdio_register_driver(struct sdio_driver *);
extern void sdio_unregister_driver(struct sdio_driver *);
+/**
+ * module_sdio_driver() - Helper macro for registering a SDIO driver
+ * @__sdio_driver: sdio_driver struct
+ *
+ * Helper macro for SDIO drivers which do not do anything special in module
+ * init/exit. This eliminates a lot of boilerplate. Each module may only
+ * use this macro once, and calling it replaces module_init() and module_exit()
+ */
+#define module_sdio_driver(__sdio_driver) \
+ module_driver(__sdio_driver, sdio_register_driver, \
+ sdio_unregister_driver)
+
/*
* SDIO I/O operations
*/
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 4332199c71c2..d1a5d5df02f5 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -59,6 +59,8 @@
#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
#define SDIO_DEVICE_ID_MARVELL_8887WLAN 0x9134
+#define SDIO_VENDOR_ID_MEDIATEK 0x037a
+
#define SDIO_VENDOR_ID_SIANO 0x039a
#define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201
#define SDIO_DEVICE_ID_SIANO_NICE 0x0202
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 4050ec1c3b45..b6c004bd9f6a 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -10,6 +10,36 @@
struct mmu_notifier;
struct mmu_notifier_ops;
+/**
+ * enum mmu_notifier_event - reason for the mmu notifier callback
+ * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that
+ * move the range
+ *
+ * @MMU_NOTIFY_CLEAR: clear page table entry (many reasons for this like
+ * madvise() or replacing a page by another one, ...).
+ *
+ * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range
+ * ie using the vma access permission (vm_page_prot) to update the whole range
+ * is enough no need to inspect changes to the CPU page table (mprotect()
+ * syscall)
+ *
+ * @MMU_NOTIFY_PROTECTION_PAGE: update is due to change in read/write flag for
+ * pages in the range so to mirror those changes the user must inspect the CPU
+ * page table (from the end callback).
+ *
+ * @MMU_NOTIFY_SOFT_DIRTY: soft dirty accounting (still same page and same
+ * access flags). User should soft dirty the page in the end callback to make
+ * sure that anyone relying on soft dirtyness catch pages that might be written
+ * through non CPU mappings.
+ */
+enum mmu_notifier_event {
+ MMU_NOTIFY_UNMAP = 0,
+ MMU_NOTIFY_CLEAR,
+ MMU_NOTIFY_PROTECTION_VMA,
+ MMU_NOTIFY_PROTECTION_PAGE,
+ MMU_NOTIFY_SOFT_DIRTY,
+};
+
#ifdef CONFIG_MMU_NOTIFIER
/*
@@ -25,11 +55,15 @@ struct mmu_notifier_mm {
spinlock_t lock;
};
+#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
+
struct mmu_notifier_range {
+ struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long start;
unsigned long end;
- bool blockable;
+ unsigned flags;
+ enum mmu_notifier_event event;
};
struct mmu_notifier_ops {
@@ -225,6 +259,14 @@ extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
bool only_end);
extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
unsigned long start, unsigned long end);
+extern bool
+mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
+
+static inline bool
+mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
+{
+ return (range->flags & MMU_NOTIFIER_RANGE_BLOCKABLE);
+}
static inline void mmu_notifier_release(struct mm_struct *mm)
{
@@ -269,7 +311,7 @@ static inline void
mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
{
if (mm_has_notifiers(range->mm)) {
- range->blockable = true;
+ range->flags |= MMU_NOTIFIER_RANGE_BLOCKABLE;
__mmu_notifier_invalidate_range_start(range);
}
}
@@ -278,7 +320,7 @@ static inline int
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
if (mm_has_notifiers(range->mm)) {
- range->blockable = false;
+ range->flags &= ~MMU_NOTIFIER_RANGE_BLOCKABLE;
return __mmu_notifier_invalidate_range_start(range);
}
return 0;
@@ -318,13 +360,19 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
+ enum mmu_notifier_event event,
+ unsigned flags,
+ struct vm_area_struct *vma,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
+ range->vma = vma;
+ range->event = event;
range->mm = mm;
range->start = start;
range->end = end;
+ range->flags = flags;
}
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
@@ -452,9 +500,14 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
range->end = end;
}
-#define mmu_notifier_range_init(range, mm, start, end) \
+#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
_mmu_notifier_range_init(range, start, end)
+static inline bool
+mmu_notifier_range_blockable(const struct mmu_notifier_range *range)
+{
+ return true;
+}
static inline int mm_has_notifiers(struct mm_struct *mm)
{
@@ -517,6 +570,8 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
{
}
+#define mmu_notifier_range_update_to_read_only(r) false
+
#define ptep_clear_flush_young_notify ptep_clear_flush_young
#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
#define ptep_clear_young_notify ptep_test_and_clear_young
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fba7741533be..5a4aedc160bd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -247,11 +247,6 @@ struct lruvec {
#endif
};
-/* Mask used at gathering information at once (see memcontrol.c) */
-#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
-#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
-#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
-
/* Isolate unmapped file */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
diff --git a/include/linux/module.h b/include/linux/module.h
index 5bf5dcd91009..8f75277d4cef 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -253,6 +253,7 @@ extern typeof(name) __mod_##type##__##name##_device_table \
#define MODULE_VERSION(_version) MODULE_INFO(version, _version)
#else
#define MODULE_VERSION(_version) \
+ MODULE_INFO(version, _version); \
static struct module_version_attribute ___modver_attr = { \
.mattr = { \
.attr = { \
@@ -676,6 +677,7 @@ static inline bool is_livepatch_module(struct module *mod)
#endif /* CONFIG_LIVEPATCH */
bool is_module_sig_enforced(void);
+void set_module_sig_enforced(void);
#else /* !CONFIG_MODULES... */
@@ -709,6 +711,12 @@ static inline bool is_module_text_address(unsigned long addr)
return false;
}
+static inline bool within_module_core(unsigned long addr,
+ const struct module *mod)
+{
+ return false;
+}
+
/* Get/put a kernel symbol (calls should be symmetric) */
#define symbol_get(x) ({ extern typeof(x) x __attribute__((weak)); &(x); })
#define symbol_put(x) do { } while (0)
@@ -796,6 +804,10 @@ static inline bool is_module_sig_enforced(void)
return false;
}
+static inline void set_module_sig_enforced(void)
+{
+}
+
/* Dereference module function descriptor */
static inline
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index ba36506db4fb..5ba250d9172a 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -10,23 +10,21 @@
module name. */
#ifdef MODULE
#define MODULE_PARAM_PREFIX /* empty */
+#define __MODULE_INFO_PREFIX /* empty */
#else
#define MODULE_PARAM_PREFIX KBUILD_MODNAME "."
+/* We cannot use MODULE_PARAM_PREFIX because some modules override it. */
+#define __MODULE_INFO_PREFIX KBUILD_MODNAME "."
#endif
/* Chosen so that structs with an unsigned long line up. */
#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
-#ifdef MODULE
#define __MODULE_INFO(tag, name, info) \
static const char __UNIQUE_ID(name)[] \
__used __attribute__((section(".modinfo"), unused, aligned(1))) \
- = __stringify(tag) "=" info
-#else /* !MODULE */
-/* This struct is here for syntactic coherency, it is not used */
-#define __MODULE_INFO(tag, name, info) \
- struct __UNIQUE_ID(name) {}
-#endif
+ = __MODULE_INFO_PREFIX __stringify(tag) "=" info
+
#define __MODULE_PARM_TYPE(name, _type) \
__MODULE_INFO(parmtype, name##type, #name ":" _type)
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 9197ddbf35fb..bf8cc4108b8f 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -87,6 +87,8 @@ extern bool mnt_may_suid(struct vfsmount *mnt);
struct path;
extern struct vfsmount *clone_private_mount(const struct path *path);
+extern int __mnt_want_write(struct vfsmount *);
+extern void __mnt_drop_write(struct vfsmount *);
struct file_system_type;
extern struct vfsmount *fc_mount(struct fs_context *fc);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 7e9b81c3b50d..052f04fcf953 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -148,24 +148,6 @@ u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);
-/* Conversion helpers. Should be removed after merging */
-static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
-{
- __pci_write_msi_msg(entry, msg);
-}
-static inline void write_msi_msg(int irq, struct msi_msg *msg)
-{
- pci_write_msi_msg(irq, msg);
-}
-static inline void mask_msi_irq(struct irq_data *data)
-{
- pci_msi_mask_irq(data);
-}
-static inline void unmask_msi_irq(struct irq_data *data)
-{
- pci_msi_unmask_irq(data);
-}
-
/*
* The arch hooks to setup up msi irqs. Those functions are
* implemented as weak symbols so that they /can/ be overriden by
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 3102bd754d18..010bc5544c54 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -93,10 +93,7 @@ struct nand_bbt_descr {
#define NAND_BBT_WRITE 0x00002000
/* Read and write back block contents when writing bbt */
#define NAND_BBT_SAVECONTENT 0x00004000
-/* Search good / bad pattern on the first and the second page */
-#define NAND_BBT_SCAN2NDPAGE 0x00008000
-/* Search good / bad pattern on the last page of the eraseblock */
-#define NAND_BBT_SCANLASTPAGE 0x00010000
+
/*
* Use a flash based bad block table. By default, OOB identifier is saved in
* OOB area. This option is passed to the default bad block table function.
@@ -124,13 +121,6 @@ struct nand_bbt_descr {
#define NAND_BBT_SCAN_MAXBLOCKS 4
/*
- * Constants for oob configuration
- */
-#define NAND_SMALL_BADBLOCK_POS 5
-#define NAND_LARGE_BADBLOCK_POS 0
-#define ONENAND_BADBLOCK_POS 0
-
-/*
* Bad block scanning errors
*/
#define ONENAND_BBT_READ_ERROR 1
@@ -140,7 +130,6 @@ struct nand_bbt_descr {
/**
* struct bbm_info - [GENERIC] Bad Block Table data structure
* @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
- * @badblockpos: [INTERN] position of the bad block marker in the oob area
* @options: options for this descriptor
* @bbt: [INTERN] bad block table pointer
* @isbad_bbt: function to determine if a block is bad
@@ -150,7 +139,6 @@ struct nand_bbt_descr {
*/
struct bbm_info {
int bbt_erase_shift;
- int badblockpos;
int options;
uint8_t *bbt;
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7f53ece2c039..cebc38b6d6f5 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -19,6 +19,7 @@
* @oobsize: OOB area size
* @pages_per_eraseblock: number of pages per eraseblock
* @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
+ * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
* @planes_per_lun: number of planes per LUN
* @luns_per_target: number of LUN per target (target is a synonym for die)
* @ntargets: total number of targets exposed by the NAND device
@@ -29,18 +30,20 @@ struct nand_memory_organization {
unsigned int oobsize;
unsigned int pages_per_eraseblock;
unsigned int eraseblocks_per_lun;
+ unsigned int max_bad_eraseblocks_per_lun;
unsigned int planes_per_lun;
unsigned int luns_per_target;
unsigned int ntargets;
};
-#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \
+#define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \
{ \
.bits_per_cell = (bpc), \
.pagesize = (ps), \
.oobsize = (os), \
.pages_per_eraseblock = (ppe), \
.eraseblocks_per_lun = (epl), \
+ .max_bad_eraseblocks_per_lun = (mbb), \
.planes_per_lun = (ppl), \
.luns_per_target = (lpt), \
.ntargets = (nt), \
@@ -269,6 +272,20 @@ nanddev_pages_per_eraseblock(const struct nand_device *nand)
}
/**
+ * nanddev_pages_per_target() - Get the number of pages per target
+ * @nand: NAND device
+ *
+ * Return: the number of pages per target.
+ */
+static inline unsigned int
+nanddev_pages_per_target(const struct nand_device *nand)
+{
+ return nand->memorg.pages_per_eraseblock *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.luns_per_target;
+}
+
+/**
* nanddev_per_page_oobsize() - Get NAND erase block size
* @nand: NAND device
*
@@ -292,6 +309,18 @@ nanddev_eraseblocks_per_lun(const struct nand_device *nand)
}
/**
+ * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per target.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_target(const struct nand_device *nand)
+{
+ return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
+}
+
+/**
* nanddev_target_size() - Get the total size provided by a single target/die
* @nand: NAND device
*
@@ -729,5 +758,6 @@ static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
/* MTD -> NAND helper functions. */
int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
+int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
index b8106651f807..a8a6909b594e 100644
--- a/include/linux/mtd/nand_bch.h
+++ b/include/linux/mtd/nand_bch.h
@@ -15,7 +15,7 @@ struct mtd_info;
struct nand_chip;
struct nand_bch_control;
-#if defined(CONFIG_MTD_NAND_ECC_BCH)
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
static inline int mtd_nand_has_bch(void) { return 1; }
@@ -39,7 +39,7 @@ struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
*/
void nand_bch_free(struct nand_bch_control *nbc);
-#else /* !CONFIG_MTD_NAND_ECC_BCH */
+#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */
static inline int mtd_nand_has_bch(void) { return 0; }
@@ -64,6 +64,6 @@ static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
static inline void nand_bch_free(struct nand_bch_control *nbc) {}
-#endif /* CONFIG_MTD_NAND_ECC_BCH */
+#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
#endif /* __MTD_NAND_BCH_H__ */
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 0aaa98b219a4..bfe9e10fae04 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -94,6 +94,7 @@ struct onenand_chip {
unsigned int technology;
unsigned int density_mask;
unsigned int options;
+ unsigned int badblockpos;
unsigned int erase_shift;
unsigned int page_shift;
@@ -188,6 +189,8 @@ struct onenand_chip {
/* Check byte access in OneNAND */
#define ONENAND_CHECK_BYTE_ACCESS(addr) (addr & 0x1)
+#define ONENAND_BADBLOCK_POS 0
+
/*
* Options bits
*/
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index b7445a44a814..dbfffa5bec7b 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -20,6 +20,7 @@
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
#include <linux/mtd/jedec.h>
+#include <linux/mtd/nand.h>
#include <linux/mtd/onfi.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -168,6 +169,21 @@ enum nand_ecc_algo {
/* Macros to identify the above */
#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
+/*
+ * There are different places where the manufacturer stores the factory bad
+ * block markers.
+ *
+ * Position within the block: Each of these pages needs to be checked for a
+ * bad block marking pattern.
+ */
+#define NAND_BBM_FIRSTPAGE 0x01000000
+#define NAND_BBM_SECONDPAGE 0x02000000
+#define NAND_BBM_LASTPAGE 0x04000000
+
+/* Position within the OOB data of the page */
+#define NAND_BBM_POS_SMALL 5
+#define NAND_BBM_POS_LARGE 0
+
/* Non chip related options */
/* This option skips the bbt scan during initialization. */
#define NAND_SKIP_BBTSCAN 0x00010000
@@ -805,7 +821,7 @@ struct nand_op_parser_pattern {
#define NAND_OP_PARSER_PATTERN(_exec, ...) \
{ \
.exec = _exec, \
- .elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
+ .elems = (const struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
.nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
sizeof(struct nand_op_parser_pattern_elem), \
}
@@ -831,7 +847,7 @@ struct nand_op_parser {
#define NAND_OP_PARSER(...) \
{ \
- .patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
+ .patterns = (const struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
.npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
sizeof(struct nand_op_parser_pattern), \
}
@@ -860,6 +876,7 @@ struct nand_operation {
int nand_op_parser_exec_op(struct nand_chip *chip,
const struct nand_op_parser *parser,
const struct nand_operation *op, bool check_only);
+
/**
* struct nand_controller_ops - Controller operations
*
@@ -962,7 +979,7 @@ struct nand_legacy {
/**
* struct nand_chip - NAND Private Flash Chip Data
- * @mtd: MTD device registered to the MTD framework
+ * @base: Inherit from the generic NAND device
* @legacy: All legacy fields/hooks. If you develop a new driver,
* don't even try to use any of these fields/hooks, and if
* you're modifying an existing driver that is using those
@@ -990,37 +1007,26 @@ struct nand_legacy {
* @badblockbits: [INTERN] minimum number of set bits in a good block's
* bad block marker position; i.e., BBM == 11110111b is
* not bad when badblockbits == 7
- * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC.
- * @ecc_strength_ds: [INTERN] ECC correctability from the datasheet.
- * Minimum amount of bit errors per @ecc_step_ds guaranteed
- * to be correctable. If unknown, set to zero.
- * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds,
- * also from the datasheet. It is the recommended ECC step
- * size, if known; if unknown, set to zero.
* @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
* set to the actually used ONFI mode if the chip is
* ONFI compliant or deduced from the datasheet if
* the NAND chip is not ONFI compliant.
- * @numchips: [INTERN] number of physical chips
- * @chipsize: [INTERN] the size of one chip for multichip arrays
* @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
* @data_buf: [INTERN] buffer for data, size is (page size + oobsize).
- * @pagebuf: [INTERN] holds the pagenumber which is currently in
- * data_buf.
- * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
- * currently in data_buf.
+ * @pagecache: Structure containing page cache related fields
+ * @pagecache.bitflips: Number of bitflips of the cached page
+ * @pagecache.page: Page number currently in the cache. -1 means no page is
+ * currently cached
* @subpagesize: [INTERN] holds the subpagesize
* @id: [INTERN] holds NAND ID
* @parameters: [INTERN] holds generic parameters under an easily
* readable form.
- * @max_bb_per_die: [INTERN] the max number of bad blocks each die of a
- * this nand device will encounter their life times.
- * @blocks_per_die: [INTERN] The number of PEBs in a die
* @data_interface: [INTERN] NAND interface timing information
* @cur_cs: currently selected target. -1 means no target selected,
* otherwise we should always have cur_cs >= 0 &&
- * cur_cs < numchips. NAND Controller drivers should not
- * modify this value, but they're allowed to read it.
+ * cur_cs < nanddev_ntargets(). NAND Controller drivers
+ * should not modify this value, but they're allowed to
+ * read it.
* @read_retries: [INTERN] the number of read retry modes supported
* @lock: lock protecting the suspended field. Also used to
* serialize accesses to the NAND device.
@@ -1041,7 +1047,7 @@ struct nand_legacy {
*/
struct nand_chip {
- struct mtd_info mtd;
+ struct nand_device base;
struct nand_legacy legacy;
@@ -1054,24 +1060,21 @@ struct nand_chip {
int phys_erase_shift;
int bbt_erase_shift;
int chip_shift;
- int numchips;
- uint64_t chipsize;
int pagemask;
u8 *data_buf;
- int pagebuf;
- unsigned int pagebuf_bitflips;
+
+ struct {
+ unsigned int bitflips;
+ int page;
+ } pagecache;
+
int subpagesize;
- uint8_t bits_per_cell;
- uint16_t ecc_strength_ds;
- uint16_t ecc_step_ds;
int onfi_timing_mode_default;
- int badblockpos;
+ unsigned int badblockpos;
int badblockbits;
struct nand_id id;
struct nand_parameters parameters;
- u16 max_bb_per_die;
- u32 blocks_per_die;
struct nand_data_interface data_interface;
@@ -1105,25 +1108,14 @@ struct nand_chip {
extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
-static inline void nand_set_flash_node(struct nand_chip *chip,
- struct device_node *np)
-{
- mtd_set_of_node(&chip->mtd, np);
-}
-
-static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
-{
- return mtd_get_of_node(&chip->mtd);
-}
-
static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
{
- return container_of(mtd, struct nand_chip, mtd);
+ return container_of(mtd, struct nand_chip, base.mtd);
}
static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
{
- return &chip->mtd;
+ return &chip->base.mtd;
}
static inline void *nand_get_controller_data(struct nand_chip *chip)
@@ -1147,6 +1139,17 @@ static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
return chip->manufacturer.priv;
}
+static inline void nand_set_flash_node(struct nand_chip *chip,
+ struct device_node *np)
+{
+ mtd_set_of_node(nand_to_mtd(chip), np);
+}
+
+static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
+{
+ return mtd_get_of_node(nand_to_mtd(chip));
+}
+
/*
* A helper for defining older NAND chips where the second ID byte fully
* defined the chip, including the geometry (chip size, eraseblock size, page
@@ -1180,9 +1183,9 @@ static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
* @name: a human-readable name of the NAND chip
* @dev_id: the device ID (the second byte of the full chip ID array)
* @mfr_id: manufecturer ID part of the full chip ID array (refers the same
- * memory address as @id[0])
+ * memory address as ``id[0]``)
* @dev_id: device ID part of the full chip ID array (refers the same memory
- * address as @id[1])
+ * address as ``id[1]``)
* @id: full device ID array
* @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
* well as the eraseblock size) is determined from the extended NAND
@@ -1235,9 +1238,9 @@ int nand_create_bbt(struct nand_chip *chip);
*/
static inline bool nand_is_slc(struct nand_chip *chip)
{
- WARN(chip->bits_per_cell == 0,
+ WARN(nanddev_bits_per_cell(&chip->base) == 0,
"chip->bits_per_cell is used uninitialized\n");
- return chip->bits_per_cell == 1;
+ return nanddev_bits_per_cell(&chip->base) == 1;
}
/**
@@ -1348,4 +1351,25 @@ int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
void nand_select_target(struct nand_chip *chip, unsigned int cs);
void nand_deselect_target(struct nand_chip *chip);
+/**
+ * nand_get_data_buf() - Get the internal page buffer
+ * @chip: NAND chip object
+ *
+ * Returns the pre-allocated page buffer after invalidating the cache. This
+ * function should be used by drivers that do not want to allocate their own
+ * bounce buffer and still need such a buffer for specific operations (most
+ * commonly when reading OOB data only).
+ *
+ * Be careful to never call this function in the write/write_oob path, because
+ * the core may have placed the data to be written out in this buffer.
+ *
+ * Return: pointer to the page cache buffer
+ */
+static inline void *nand_get_data_buf(struct nand_chip *chip)
+{
+ chip->pagecache.page = -1;
+
+ return chip->data_buf;
+}
+
#endif /* __LINUX_MTD_RAWNAND_H */
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index b92e2aa955b6..507f7e289bd1 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -302,6 +302,11 @@ struct spinand_info {
__VA_ARGS__ \
}
+struct spinand_dirmap {
+ struct spi_mem_dirmap_desc *wdesc;
+ struct spi_mem_dirmap_desc *rdesc;
+};
+
/**
* struct spinand_device - SPI NAND device instance
* @base: NAND device instance
@@ -341,6 +346,8 @@ struct spinand_device {
const struct spi_mem_op *update_cache;
} op_templates;
+ struct spinand_dirmap *dirmaps;
+
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
unsigned int cur_target;
diff --git a/include/linux/net.h b/include/linux/net.h
index 651fca72286c..50bf5206ead6 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -83,6 +83,12 @@ enum sock_type {
#endif /* ARCH_HAS_SOCKET_TYPES */
+/**
+ * enum sock_shutdown_cmd - Shutdown types
+ * @SHUT_RD: shutdown receptions
+ * @SHUT_WR: shutdown transmissions
+ * @SHUT_RDWR: shutdown receptions/transmissions
+ */
enum sock_shutdown_cmd {
SHUT_RD,
SHUT_WR,
@@ -155,6 +161,8 @@ struct proto_ops {
int (*compat_ioctl) (struct socket *sock, unsigned int cmd,
unsigned long arg);
#endif
+ int (*gettstamp) (struct socket *sock, void __user *userstamp,
+ bool timeval, bool time32);
int (*listen) (struct socket *sock, int len);
int (*shutdown) (struct socket *sock, int flags);
int (*setsockopt)(struct socket *sock, int level,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 26f69cf763f4..44b47e9df94a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -194,8 +194,8 @@ struct net_device_stats {
#ifdef CONFIG_RPS
#include <linux/static_key.h>
-extern struct static_key rps_needed;
-extern struct static_key rfs_needed;
+extern struct static_key_false rps_needed;
+extern struct static_key_false rfs_needed;
#endif
struct neighbour;
@@ -914,34 +914,13 @@ struct xfrmdev_ops {
};
#endif
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
-enum tls_offload_ctx_dir {
- TLS_OFFLOAD_CTX_DIR_RX,
- TLS_OFFLOAD_CTX_DIR_TX,
-};
-
-struct tls_crypto_info;
-struct tls_context;
-
-struct tlsdev_ops {
- int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn);
- void (*tls_dev_del)(struct net_device *netdev,
- struct tls_context *ctx,
- enum tls_offload_ctx_dir direction);
- void (*tls_dev_resync_rx)(struct net_device *netdev,
- struct sock *sk, u32 seq, u64 rcd_sn);
-};
-#endif
-
struct dev_ifalias {
struct rcu_head rcuhead;
char ifalias[];
};
struct devlink;
+struct tlsdev_ops;
/*
* This structure defines the management hooks for network devices.
@@ -986,8 +965,7 @@ struct devlink;
* those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- * struct net_device *sb_dev,
- * select_queue_fallback_t fallback);
+ * struct net_device *sb_dev);
* Called to decide which queue to use when device supports multiple
* transmit queues.
*
@@ -1251,8 +1229,8 @@ struct devlink;
* that got dropped are freed/returned via xdp_return_frame().
* Returns negative number, means general error invoking ndo, meaning
* no frames were xmit'ed and core-caller will free all frames.
- * struct devlink *(*ndo_get_devlink)(struct net_device *dev);
- * Get devlink instance associated with a given netdev.
+ * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev);
+ * Get devlink port instance associated with a given netdev.
* Called with a reference on the netdevice and devlink locks only,
* rtnl_lock is not held.
*/
@@ -1268,8 +1246,7 @@ struct net_device_ops {
netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1453,7 +1430,7 @@ struct net_device_ops {
u32 flags);
int (*ndo_xsk_async_xmit)(struct net_device *dev,
u32 queue_id);
- struct devlink * (*ndo_get_devlink)(struct net_device *dev);
+ struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
};
/**
@@ -1500,6 +1477,7 @@ struct net_device_ops {
* @IFF_FAILOVER: device is a failover master device
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
+ * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running
*/
enum netdev_priv_flags {
IFF_802_1Q_VLAN = 1<<0,
@@ -1532,6 +1510,7 @@ enum netdev_priv_flags {
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
+ IFF_LIVE_RENAME_OK = 1<<30,
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1563,6 +1542,7 @@ enum netdev_priv_flags {
#define IFF_FAILOVER IFF_FAILOVER
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
+#define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
/**
* struct net_device - The DEVICE structure.
@@ -2152,9 +2132,11 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
&qdisc_xmit_lock_key); \
}
-struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb,
- struct net_device *sb_dev);
+u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev);
/* returns the headroom that the master device needs to take in account
* when forwarding to this dev
@@ -2639,11 +2621,9 @@ void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
int dev_queue_xmit(struct sk_buff *skb);
int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
@@ -2661,14 +2641,6 @@ void netdev_freemem(struct net_device *dev);
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
-DECLARE_PER_CPU(int, xmit_recursion);
-#define XMIT_RECURSION_LIMIT 10
-
-static inline int dev_recursion_level(void)
-{
- return this_cpu_read(xmit_recursion);
-}
-
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
@@ -3017,6 +2989,11 @@ struct softnet_data {
#ifdef CONFIG_XFRM_OFFLOAD
struct sk_buff_head xfrm_backlog;
#endif
+ /* written and read only by owning cpu: */
+ struct {
+ u16 recursion;
+ u8 more;
+ } xmit;
#ifdef CONFIG_RPS
/* input_queue_head should be written by cpu owning this struct,
* and only read by other cpus. Worth using a cache line.
@@ -3052,6 +3029,28 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
+static inline int dev_recursion_level(void)
+{
+ return this_cpu_read(softnet_data.xmit.recursion);
+}
+
+#define XMIT_RECURSION_LIMIT 10
+static inline bool dev_xmit_recursion(void)
+{
+ return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
+ XMIT_RECURSION_LIMIT);
+}
+
+static inline void dev_xmit_recursion_inc(void)
+{
+ __this_cpu_inc(softnet_data.xmit.recursion);
+}
+
+static inline void dev_xmit_recursion_dec(void)
+{
+ __this_cpu_dec(softnet_data.xmit.recursion);
+}
+
void __netif_schedule(struct Qdisc *q);
void netif_schedule_queue(struct netdev_queue *txq);
@@ -4407,10 +4406,15 @@ static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
struct sk_buff *skb, struct net_device *dev,
bool more)
{
- skb->xmit_more = more ? 1 : 0;
+ __this_cpu_write(softnet_data.xmit.more, more);
return ops->ndo_start_xmit(skb, dev);
}
+static inline bool netdev_xmit_more(void)
+{
+ return __this_cpu_read(softnet_data.xmit.more);
+}
+
static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, bool more)
{
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 72cb19c3db6a..996bc247ef6e 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -24,20 +24,36 @@ static inline int NF_DROP_GETERR(int verdict)
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul1 = (const unsigned long *)a1;
+ const unsigned long *ul2 = (const unsigned long *)a2;
+
+ return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
+#else
return a1->all[0] == a2->all[0] &&
a1->all[1] == a2->all[1] &&
a1->all[2] == a2->all[2] &&
a1->all[3] == a2->all[3];
+#endif
}
static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
union nf_inet_addr *result,
const union nf_inet_addr *mask)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ua = (const unsigned long *)a1;
+ unsigned long *ur = (unsigned long *)result;
+ const unsigned long *um = (const unsigned long *)mask;
+
+ ur[0] = ua[0] & um[0];
+ ur[1] = ua[1] & um[1];
+#else
result->all[0] = a1->all[0] & mask->all[0];
result->all[1] = a1->all[1] & mask->all[1];
result->all[2] = a1->all[2] & mask->all[2];
result->all[3] = a1->all[3] & mask->all[3];
+#endif
}
int netfilter_init(void);
@@ -360,7 +376,7 @@ extern struct nf_nat_hook __rcu *nf_nat_hook;
static inline void
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
{
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
struct nf_nat_hook *nat_hook;
rcu_read_lock();
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index f2e1e6b13ca4..e499d170f12d 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -401,33 +401,30 @@ ip_set_get_h16(const struct nlattr *attr)
return ntohs(nla_get_be16(attr));
}
-#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
-#define ipset_nest_end(skb, start) nla_nest_end(skb, start)
-
static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
{
- struct nlattr *__nested = ipset_nest_start(skb, type);
+ struct nlattr *__nested = nla_nest_start(skb, type);
int ret;
if (!__nested)
return -EMSGSIZE;
ret = nla_put_in_addr(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
if (!ret)
- ipset_nest_end(skb, __nested);
+ nla_nest_end(skb, __nested);
return ret;
}
static inline int nla_put_ipaddr6(struct sk_buff *skb, int type,
const struct in6_addr *ipaddrptr)
{
- struct nlattr *__nested = ipset_nest_start(skb, type);
+ struct nlattr *__nested = nla_nest_start(skb, type);
int ret;
if (!__nested)
return -EMSGSIZE;
ret = nla_put_in6_addr(skb, IPSET_ATTR_IPADDR_IPV6, ipaddrptr);
if (!ret)
- ipset_nest_end(skb, __nested);
+ nla_nest_end(skb, __nested);
return ret;
}
diff --git a/include/linux/netfilter/nfnetlink_osf.h b/include/linux/netfilter/nfnetlink_osf.h
index c6000046c966..788613f36935 100644
--- a/include/linux/netfilter/nfnetlink_osf.h
+++ b/include/linux/netfilter/nfnetlink_osf.h
@@ -21,13 +21,18 @@ struct nf_osf_finger {
struct nf_osf_user_finger finger;
};
+struct nf_osf_data {
+ const char *genre;
+ const char *version;
+};
+
bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
int hooknum, struct net_device *in, struct net_device *out,
const struct nf_osf_info *info, struct net *net,
const struct list_head *nf_osf_fingers);
-const char *nf_osf_find(const struct sk_buff *skb,
- const struct list_head *nf_osf_fingers,
- const int ttl_check);
+bool nf_osf_find(const struct sk_buff *skb,
+ const struct list_head *nf_osf_fingers,
+ const int ttl_check, struct nf_osf_data *data);
#endif /* _NFOSF_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index bf384b3eedb8..1f852ef7b098 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -317,7 +317,6 @@ struct xt_table_info *xt_replace_table(struct xt_table *table,
int *error);
struct xt_match *xt_find_match(u8 af, const char *name, u8 revision);
-struct xt_target *xt_find_target(u8 af, const char *name, u8 revision);
struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision);
struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision);
int xt_find_revision(u8 af, const char *name, u8 revision, int target,
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 471e9467105b..12113e502656 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -87,6 +87,21 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
}
int ip6_route_me_harder(struct net *net, struct sk_buff *skb);
+
+static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb)
+{
+#if IS_MODULE(CONFIG_IPV6)
+ const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
+
+ if (!v6_ops)
+ return -EHOSTUNREACH;
+
+ return v6_ops->route_me_harder(net, skb);
+#else
+ return ip6_route_me_harder(net, skb);
+#endif
+}
+
__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 40e30376130b..d363d5765cdf 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -76,7 +76,6 @@ struct nfs_open_context {
fmode_t mode;
unsigned long flags;
-#define NFS_CONTEXT_ERROR_WRITE (0)
#define NFS_CONTEXT_RESEND_WRITES (1)
#define NFS_CONTEXT_BAD (2)
#define NFS_CONTEXT_UNLOCK (3)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index c827d31298cc..1e78032a174b 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -139,6 +139,16 @@ struct nfs_server {
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
atomic_long_t writeback; /* number of writeback pages */
int flags; /* various flags */
+
+/* The following are for internal use only. Also see uapi/linux/nfs_mount.h */
+#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
+#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000
+#define NFS_MOUNT_NORESVPORT 0x40000
+#define NFS_MOUNT_LEGACY_INTERFACE 0x80000
+#define NFS_MOUNT_LOCAL_FLOCK 0x100000
+#define NFS_MOUNT_LOCAL_FCNTL 0x200000
+#define NFS_MOUNT_SOFTERR 0x400000
+
unsigned int caps; /* server capabilities */
unsigned int rsize; /* read size */
unsigned int rpages; /* read size (in pages) */
@@ -231,6 +241,9 @@ struct nfs_server {
/* XDR related information */
unsigned int read_hdrsize;
+
+ /* User namespace info */
+ const struct cred *cred;
};
/* Server capabilities */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index ad69430fd0eb..0bbd587fac6a 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -42,7 +42,6 @@ struct nfs_inode;
struct nfs_page {
struct list_head wb_list; /* Defines state of page: */
struct page *wb_page; /* page to read in/write out */
- struct nfs_open_context *wb_context; /* File state context info */
struct nfs_lock_context *wb_lock_context; /* lock context info */
pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
unsigned int wb_offset, /* Offset & ~PAGE_MASK */
@@ -53,6 +52,7 @@ struct nfs_page {
struct nfs_write_verifier wb_verf; /* Commit cookie */
struct nfs_page *wb_this_page; /* list of reqs for this page */
struct nfs_page *wb_head; /* head pointer for req list */
+ unsigned short wb_nio; /* Number of I/O attempts */
};
struct nfs_pageio_descriptor;
@@ -87,7 +87,6 @@ struct nfs_pgio_mirror {
};
struct nfs_pageio_descriptor {
- unsigned char pg_moreio : 1;
struct inode *pg_inode;
const struct nfs_pageio_ops *pg_ops;
const struct nfs_rw_ops *pg_rw_ops;
@@ -105,6 +104,8 @@ struct nfs_pageio_descriptor {
struct nfs_pgio_mirror pg_mirrors_static[1];
struct nfs_pgio_mirror *pg_mirrors_dynamic;
u32 pg_mirror_idx; /* current mirror */
+ unsigned short pg_maxretrans;
+ unsigned char pg_moreio : 1;
};
/* arbitrarily selected limit to number of mirrors */
@@ -114,7 +115,6 @@ struct nfs_pageio_descriptor {
extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
struct page *page,
- struct nfs_page *last,
unsigned int offset,
unsigned int count);
extern void nfs_release_request(struct nfs_page *);
@@ -199,4 +199,10 @@ loff_t req_offset(struct nfs_page *req)
return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset;
}
+static inline struct nfs_open_context *
+nfs_req_openctx(struct nfs_page *req)
+{
+ return req->wb_lock_context->open_context;
+}
+
#endif /* _LINUX_NFS_PAGE_H */
diff --git a/include/linux/node.h b/include/linux/node.h
index 257bb3d6d014..1a557c589ecb 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -17,14 +17,81 @@
#include <linux/device.h>
#include <linux/cpumask.h>
+#include <linux/list.h>
#include <linux/workqueue.h>
+/**
+ * struct node_hmem_attrs - heterogeneous memory performance attributes
+ *
+ * @read_bandwidth: Read bandwidth in MB/s
+ * @write_bandwidth: Write bandwidth in MB/s
+ * @read_latency: Read latency in nanoseconds
+ * @write_latency: Write latency in nanoseconds
+ */
+struct node_hmem_attrs {
+ unsigned int read_bandwidth;
+ unsigned int write_bandwidth;
+ unsigned int read_latency;
+ unsigned int write_latency;
+};
+
+enum cache_indexing {
+ NODE_CACHE_DIRECT_MAP,
+ NODE_CACHE_INDEXED,
+ NODE_CACHE_OTHER,
+};
+
+enum cache_write_policy {
+ NODE_CACHE_WRITE_BACK,
+ NODE_CACHE_WRITE_THROUGH,
+ NODE_CACHE_WRITE_OTHER,
+};
+
+/**
+ * struct node_cache_attrs - system memory caching attributes
+ *
+ * @indexing: The ways memory blocks may be placed in cache
+ * @write_policy: Write back or write through policy
+ * @size: Total size of cache in bytes
+ * @line_size: Number of bytes fetched on a cache miss
+ * @level: The cache hierarchy level
+ */
+struct node_cache_attrs {
+ enum cache_indexing indexing;
+ enum cache_write_policy write_policy;
+ u64 size;
+ u16 line_size;
+ u8 level;
+};
+
+#ifdef CONFIG_HMEM_REPORTING
+void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
+void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
+ unsigned access);
+#else
+static inline void node_add_cache(unsigned int nid,
+ struct node_cache_attrs *cache_attrs)
+{
+}
+
+static inline void node_set_perf_attrs(unsigned int nid,
+ struct node_hmem_attrs *hmem_attrs,
+ unsigned access)
+{
+}
+#endif
+
struct node {
struct device dev;
+ struct list_head access_list;
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
struct work_struct node_work;
#endif
+#ifdef CONFIG_HMEM_REPORTING
+ struct list_head cache_attrs;
+ struct device *cache_dev;
+#endif
};
struct memory_block;
@@ -75,6 +142,10 @@ extern int register_mem_sect_under_node(struct memory_block *mem_blk,
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
unsigned long phys_index);
+extern int register_memory_node_under_compute_node(unsigned int mem_nid,
+ unsigned int cpu_nid,
+ unsigned access);
+
#ifdef CONFIG_HUGETLBFS
extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
node_registration_func_t unregister);
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 2bb349035431..c48e96436f56 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -17,12 +17,6 @@
-/* FC Port role bitmask - can merge with FC Port Roles in fc transport */
-#define FC_PORT_ROLE_NVME_INITIATOR 0x10
-#define FC_PORT_ROLE_NVME_TARGET 0x20
-#define FC_PORT_ROLE_NVME_DISCOVERY 0x40
-
-
/**
* struct nvme_fc_port_info - port-specific ids and FC connection-specific
* data element used during NVME Host role
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h
index 3aa97b98dc89..3ec8e50efa16 100644
--- a/include/linux/nvme-rdma.h
+++ b/include/linux/nvme-rdma.h
@@ -77,7 +77,7 @@ struct nvme_rdma_cm_rep {
* struct nvme_rdma_cm_rej - rdma connect reject
*
* @recfmt: format of the RDMA Private Data
- * @fsts: error status for the associated connect request
+ * @sts: error status for the associated connect request
*/
struct nvme_rdma_cm_rej {
__le16 recfmt;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index baa49e6a23cc..c40720cb59ac 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -967,8 +967,13 @@ struct nvme_get_log_page_command {
__le16 numdl;
__le16 numdu;
__u16 rsvd11;
- __le32 lpol;
- __le32 lpou;
+ union {
+ struct {
+ __le32 lpol;
+ __le32 lpou;
+ };
+ __le64 lpo;
+ };
__u32 rsvd14[2];
};
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 312bfa5efd80..8f8be5b00060 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -61,6 +61,7 @@ void nvmem_cell_put(struct nvmem_cell *cell);
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val);
int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
/* direct nvmem device read/write interface */
@@ -122,6 +123,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_u16(struct device *dev,
+ const char *cell_id, u16 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int nvmem_cell_read_u32(struct device *dev,
const char *cell_id, u32 *val)
{
diff --git a/include/linux/of.h b/include/linux/of.h
index e240992e5cb6..0cf857012f11 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -234,8 +234,8 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
static inline u64 of_read_number(const __be32 *cell, int size)
{
u64 r = 0;
- while (size--)
- r = (r << 32) | be32_to_cpu(*(cell++));
+ for (; size--; cell++)
+ r = (r << 32) | be32_to_cpu(*cell);
return r;
}
@@ -1449,7 +1449,8 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_fdt_apply(void *overlay_fdt, int *ovcs_id)
+static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id)
{
return -ENOTSUPP;
}
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index d2fa9ca42e9a..7f30446348c4 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -93,6 +93,24 @@ enum OID {
OID_authorityKeyIdentifier, /* 2.5.29.35 */
OID_extKeyUsage, /* 2.5.29.37 */
+ /* EC-RDSA */
+ OID_gostCPSignA, /* 1.2.643.2.2.35.1 */
+ OID_gostCPSignB, /* 1.2.643.2.2.35.2 */
+ OID_gostCPSignC, /* 1.2.643.2.2.35.3 */
+ OID_gost2012PKey256, /* 1.2.643.7.1.1.1.1 */
+ OID_gost2012PKey512, /* 1.2.643.7.1.1.1.2 */
+ OID_gost2012Digest256, /* 1.2.643.7.1.1.2.2 */
+ OID_gost2012Digest512, /* 1.2.643.7.1.1.2.3 */
+ OID_gost2012Signature256, /* 1.2.643.7.1.1.3.2 */
+ OID_gost2012Signature512, /* 1.2.643.7.1.1.3.3 */
+ OID_gostTC26Sign256A, /* 1.2.643.7.1.2.1.1.1 */
+ OID_gostTC26Sign256B, /* 1.2.643.7.1.2.1.1.2 */
+ OID_gostTC26Sign256C, /* 1.2.643.7.1.2.1.1.3 */
+ OID_gostTC26Sign256D, /* 1.2.643.7.1.2.1.1.4 */
+ OID_gostTC26Sign512A, /* 1.2.643.7.1.2.1.2.1 */
+ OID_gostTC26Sign512B, /* 1.2.643.7.1.2.1.2.2 */
+ OID_gostTC26Sign512C, /* 1.2.643.7.1.2.1.2.3 */
+
OID__NR
};
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 40b48e2133cb..15eb85de9226 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -36,6 +36,12 @@
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
+/*
+ * Avoids triggering -Wtype-limits compilation warning,
+ * while using unsigned data types to check a < 0.
+ */
+#define is_non_negative(a) ((a) > 0 || (a) == 0)
+#define is_negative(a) (!(is_non_negative(a)))
#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
/*
@@ -227,10 +233,10 @@
typeof(d) _d = d; \
u64 _a_full = _a; \
unsigned int _to_shift = \
- _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \
+ is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
*_d = (_a_full << _to_shift); \
- (_to_shift != _s || *_d < 0 || _a < 0 || \
- (*_d >> _to_shift) != _a); \
+ (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
+ (*_d >> _to_shift) != _a); \
})
/**
diff --git a/include/linux/packing.h b/include/linux/packing.h
new file mode 100644
index 000000000000..54667735cc67
--- /dev/null
+++ b/include/linux/packing.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ */
+#ifndef _LINUX_PACKING_H
+#define _LINUX_PACKING_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define QUIRK_MSB_ON_THE_RIGHT BIT(0)
+#define QUIRK_LITTLE_ENDIAN BIT(1)
+#define QUIRK_LSW32_IS_FIRST BIT(2)
+
+enum packing_op {
+ PACK,
+ UNPACK,
+};
+
+/**
+ * packing - Convert numbers (currently u64) between a packed and an unpacked
+ * format. Unpacked means laid out in memory in the CPU's native
+ * understanding of integers, while packed means anything else that
+ * requires translation.
+ *
+ * @pbuf: Pointer to a buffer holding the packed value.
+ * @uval: Pointer to an u64 holding the unpacked value.
+ * @startbit: The index (in logical notation, compensated for quirks) where
+ * the packed value starts within pbuf. Must be larger than, or
+ * equal to, endbit.
+ * @endbit: The index (in logical notation, compensated for quirks) where
+ * the packed value ends within pbuf. Must be smaller than, or equal
+ * to, startbit.
+ * @op: If PACK, then uval will be treated as const pointer and copied (packed)
+ * into pbuf, between startbit and endbit.
+ * If UNPACK, then pbuf will be treated as const pointer and the logical
+ * value between startbit and endbit will be copied (unpacked) to uval.
+ * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and
+ * QUIRK_MSB_ON_THE_RIGHT.
+ *
+ * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming
+ * correct usage, return code may be discarded.
+ * If op is PACK, pbuf is modified.
+ * If op is UNPACK, uval is modified.
+ */
+int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen,
+ enum packing_op op, u8 quirks);
+
+#endif
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 4eb26d278046..280ae96dc4c3 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
/*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE or CMA,
- * this will fail with -EBUSY.
- *
- * For isolating all pages in the range finally, the caller have to
- * free all pages in the range. test_page_isolated() can be used for
- * test it.
- *
- * The following flags are allowed (they can be combined in a bit mask)
- * SKIP_HWPOISON - ignore hwpoison pages
- * REPORT_FAILURE - report details about the failure to isolate the range
*/
int
start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index bcf909d0de5f..9ec3544baee2 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -333,6 +333,19 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
+static inline struct page *find_subpage(struct page *page, pgoff_t offset)
+{
+ unsigned long mask;
+
+ if (PageHuge(page))
+ return page;
+
+ VM_BUG_ON_PAGE(PageTail(page), page);
+
+ mask = (1UL << compound_order(page)) - 1;
+ return page + (offset & mask);
+}
+
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
@@ -360,9 +373,6 @@ static inline unsigned find_get_pages_tag(struct address_space *mapping,
return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
nr_pages, pages);
}
-unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
- xa_mark_t tag, unsigned int nr_entries,
- struct page **entries, pgoff_t *indices);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags);
@@ -527,15 +537,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
extern void put_and_wait_on_page_locked(struct page *page);
-/*
- * Wait for a page to complete writeback
- */
-static inline void wait_on_page_writeback(struct page *page)
-{
- if (PageWriteback(page))
- wait_on_page_bit(page, PG_writeback);
-}
-
+void wait_on_page_writeback(struct page *page);
extern void end_page_writeback(struct page *page);
void wait_for_stable_page(struct page *page);
diff --git a/include/linux/parport.h b/include/linux/parport.h
index f41f1d041e2c..397607a0c0eb 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -460,7 +460,6 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
void *, size_t, int);
/* IEEE1284.3 functions */
-#define daisy_dev_name "Device ID probe"
extern int parport_daisy_init (struct parport *port);
extern void parport_daisy_fini (struct parport *port);
extern struct pardevice *parport_open (int devnum, const char *name);
@@ -469,18 +468,6 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
extern void parport_daisy_deselect_all (struct parport *port);
extern int parport_daisy_select (struct parport *port, int daisy, int mode);
-#ifdef CONFIG_PARPORT_1284
-extern int daisy_drv_init(void);
-extern void daisy_drv_exit(void);
-#else
-static inline int daisy_drv_init(void)
-{
- return 0;
-}
-
-static inline void daisy_drv_exit(void) {}
-#endif
-
/* Lowlevel drivers _can_ call this support function to handle irqs. */
static inline void parport_generic_irq(struct parport *port)
{
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index 29efa09d686b..a73164c85e78 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -56,6 +56,7 @@ extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */
extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */
extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
+extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */
#endif
#ifdef CONFIG_PCI_HOST_COMMON
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index c3ffa3917f88..f641badc2c61 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -109,6 +109,7 @@ struct pci_epc {
* @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver
* @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs
* @bar_fixed_size: Array specifying the size supported by each BAR
+ * @align: alignment size required for BAR buffer allocation
*/
struct pci_epc_features {
unsigned int linkup_notifier : 1;
@@ -117,6 +118,7 @@ struct pci_epc_features {
u8 reserved_bar;
u8 bar_fixed_64bit;
u64 bar_fixed_size[BAR_5 + 1];
+ size_t align;
};
#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index ec02f58758c8..2d6f07556682 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -149,7 +149,8 @@ void pci_epf_destroy(struct pci_epf *epf);
int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner);
void pci_epf_unregister_driver(struct pci_epf_driver *driver);
-void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar);
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar,
+ size_t align);
void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
int pci_epf_bind(struct pci_epf *epf);
void pci_epf_unbind(struct pci_epf *epf);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 77448215ef5b..4a5a84d7bdd4 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -348,6 +348,8 @@ struct pci_dev {
unsigned int hotplug_user_indicators:1; /* SlotCtl indicators
controlled exclusively by
user sysfs */
+ unsigned int clear_retrain_link:1; /* Need to clear Retrain Link
+ bit manually */
unsigned int d3_delay; /* D3->D0 transition time in ms */
unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
@@ -490,6 +492,7 @@ struct pci_host_bridge {
void *sysdata;
int busnr;
struct list_head windows; /* resource_entry */
+ struct list_head dma_ranges; /* dma ranges resource list */
u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */
int (*map_irq)(const struct pci_dev *, u8, u8);
void (*release_fn)(struct pci_host_bridge *);
@@ -596,6 +599,11 @@ struct pci_bus {
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
+static inline u16 pci_dev_id(struct pci_dev *dev)
+{
+ return PCI_DEVID(dev->bus->number, dev->devfn);
+}
+
/*
* Returns true if the PCI bus is root (behind host-PCI bridge),
* false otherwise
@@ -1233,7 +1241,6 @@ int __must_check pci_request_regions(struct pci_dev *, const char *);
int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
void pci_release_regions(struct pci_dev *);
int __must_check pci_request_region(struct pci_dev *, int, const char *);
-int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
void pci_release_region(struct pci_dev *, int);
int pci_request_selected_regions(struct pci_dev *, int, const char *);
int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
@@ -1521,21 +1528,6 @@ static inline void pcie_ecrc_get_policy(char *str) { }
bool pci_ats_disabled(void);
-#ifdef CONFIG_PCI_ATS
-/* Address Translation Service */
-void pci_ats_init(struct pci_dev *dev);
-int pci_enable_ats(struct pci_dev *dev, int ps);
-void pci_disable_ats(struct pci_dev *dev);
-int pci_ats_queue_depth(struct pci_dev *dev);
-int pci_ats_page_aligned(struct pci_dev *dev);
-#else
-static inline void pci_ats_init(struct pci_dev *d) { }
-static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
-static inline void pci_disable_ats(struct pci_dev *d) { }
-static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
-static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
-#endif
-
#ifdef CONFIG_PCIE_PTM
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
#else
@@ -1728,8 +1720,24 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
struct pci_dev *dev)
{ return NULL; }
+static inline bool pci_ats_disabled(void) { return true; }
#endif /* CONFIG_PCI */
+#ifdef CONFIG_PCI_ATS
+/* Address Translation Service */
+void pci_ats_init(struct pci_dev *dev);
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
+int pci_ats_page_aligned(struct pci_dev *dev);
+#else
+static inline void pci_ats_init(struct pci_dev *d) { }
+static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
+static inline void pci_disable_ats(struct pci_dev *d) { }
+static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
+static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
+#endif
+
/* Include architecture-dependent settings and functions */
#include <asm/pci.h>
@@ -2363,4 +2371,7 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg)
#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg)
+#define pci_notice_ratelimited(pdev, fmt, arg...) \
+ dev_notice_ratelimited(&(pdev)->dev, fmt, ##arg)
+
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 7acc9f91e72b..f694eb2ca978 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -124,26 +124,72 @@ struct hpp_type2 {
u32 sec_unc_err_mask_or;
};
-struct hotplug_params {
- struct hpp_type0 *t0; /* Type0: NULL if not available */
- struct hpp_type1 *t1; /* Type1: NULL if not available */
- struct hpp_type2 *t2; /* Type2: NULL if not available */
- struct hpp_type0 type0_data;
- struct hpp_type1 type1_data;
- struct hpp_type2 type2_data;
+/*
+ * _HPX PCI Express Setting Record (Type 3)
+ */
+struct hpx_type3 {
+ u16 device_type;
+ u16 function_type;
+ u16 config_space_location;
+ u16 pci_exp_cap_id;
+ u16 pci_exp_cap_ver;
+ u16 pci_exp_vendor_id;
+ u16 dvsec_id;
+ u16 dvsec_rev;
+ u16 match_offset;
+ u32 match_mask_and;
+ u32 match_value;
+ u16 reg_offset;
+ u32 reg_mask_and;
+ u32 reg_mask_or;
+};
+
+struct hotplug_program_ops {
+ void (*program_type0)(struct pci_dev *dev, struct hpp_type0 *hpp);
+ void (*program_type1)(struct pci_dev *dev, struct hpp_type1 *hpp);
+ void (*program_type2)(struct pci_dev *dev, struct hpp_type2 *hpp);
+ void (*program_type3)(struct pci_dev *dev, struct hpx_type3 *hpp);
+};
+
+enum hpx_type3_dev_type {
+ HPX_TYPE_ENDPOINT = BIT(0),
+ HPX_TYPE_LEG_END = BIT(1),
+ HPX_TYPE_RC_END = BIT(2),
+ HPX_TYPE_RC_EC = BIT(3),
+ HPX_TYPE_ROOT_PORT = BIT(4),
+ HPX_TYPE_UPSTREAM = BIT(5),
+ HPX_TYPE_DOWNSTREAM = BIT(6),
+ HPX_TYPE_PCI_BRIDGE = BIT(7),
+ HPX_TYPE_PCIE_BRIDGE = BIT(8),
+};
+
+enum hpx_type3_fn_type {
+ HPX_FN_NORMAL = BIT(0),
+ HPX_FN_SRIOV_PHYS = BIT(1),
+ HPX_FN_SRIOV_VIRT = BIT(2),
+};
+
+enum hpx_type3_cfg_loc {
+ HPX_CFG_PCICFG = 0,
+ HPX_CFG_PCIE_CAP = 1,
+ HPX_CFG_PCIE_CAP_EXT = 2,
+ HPX_CFG_VEND_CAP = 3,
+ HPX_CFG_DVSEC = 4,
+ HPX_CFG_MAX,
};
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
-int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp);
+int pci_acpi_program_hp_params(struct pci_dev *dev,
+ const struct hotplug_program_ops *hp_ops);
bool pciehp_is_native(struct pci_dev *bridge);
int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge);
bool shpchp_is_native(struct pci_dev *bridge);
int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle);
int acpi_pci_detect_ejectable(acpi_handle handle);
#else
-static inline int pci_get_hp_params(struct pci_dev *dev,
- struct hotplug_params *hpp)
+static inline int pci_acpi_program_hp_params(struct pci_dev *dev,
+ const struct hotplug_program_ops *hp_ops)
{
return -ENODEV;
}
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 70b7123f38c7..9909dc0e273a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -26,16 +26,10 @@
#define PCPU_MIN_ALLOC_SHIFT 2
#define PCPU_MIN_ALLOC_SIZE (1 << PCPU_MIN_ALLOC_SHIFT)
-/* number of bits per page, used to trigger a scan if blocks are > PAGE_SIZE */
-#define PCPU_BITS_PER_PAGE (PAGE_SIZE >> PCPU_MIN_ALLOC_SHIFT)
-
/*
- * This determines the size of each metadata block. There are several subtle
- * constraints around this constant. The reserved region must be a multiple of
- * PCPU_BITMAP_BLOCK_SIZE. Additionally, PCPU_BITMAP_BLOCK_SIZE must be a
- * multiple of PAGE_SIZE or PAGE_SIZE must be a multiple of
- * PCPU_BITMAP_BLOCK_SIZE to align with the populated page map. The unit_size
- * also has to be a multiple of PCPU_BITMAP_BLOCK_SIZE to ensure full blocks.
+ * The PCPU_BITMAP_BLOCK_SIZE must be the same size as PAGE_SIZE as the
+ * updating of hints is used to manage the nr_empty_pop_pages in both
+ * the chunk and globally.
*/
#define PCPU_BITMAP_BLOCK_SIZE PAGE_SIZE
#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e47ef764f613..15a82ff0aefe 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -240,7 +240,6 @@ struct perf_event;
#define PERF_PMU_CAP_NO_INTERRUPT 0x01
#define PERF_PMU_CAP_NO_NMI 0x02
#define PERF_PMU_CAP_AUX_NO_SG 0x04
-#define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08
#define PERF_PMU_CAP_EXCLUSIVE 0x10
#define PERF_PMU_CAP_ITRACE 0x20
#define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40
@@ -464,7 +463,7 @@ enum perf_addr_filter_action_t {
/**
* struct perf_addr_filter - address range filter definition
* @entry: event's filter list linkage
- * @inode: object file's inode for file-based filters
+ * @path: object file's path for file-based filters
* @offset: filter range offset
* @size: filter range size (size==0 means single address trigger)
* @action: filter/start/stop
@@ -888,6 +887,9 @@ extern void perf_sched_cb_dec(struct pmu *pmu);
extern void perf_sched_cb_inc(struct pmu *pmu);
extern int perf_event_task_disable(void);
extern int perf_event_task_enable(void);
+
+extern void perf_pmu_resched(struct pmu *pmu);
+
extern int perf_event_refresh(struct perf_event *event, int refresh);
extern void perf_event_update_userpage(struct perf_event *event);
extern int perf_event_release_kernel(struct perf_event *event);
@@ -1055,12 +1057,18 @@ static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned lo
#endif
/*
- * Take a snapshot of the regs. Skip ip and frame pointer to
- * the nth caller. We only need a few of the regs:
+ * When generating a perf sample in-line, instead of from an interrupt /
+ * exception, we lack a pt_regs. This is typically used from software events
+ * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
+ *
+ * We typically don't need a full set, but (for x86) do require:
* - ip for PERF_SAMPLE_IP
* - cs for user_mode() tests
- * - bp for callchains
- * - eflags, for future purposes, just in case
+ * - sp for PERF_SAMPLE_CALLCHAIN
+ * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
+ *
+ * NOTE: assumes @regs is otherwise already 0 filled; this is important for
+ * things like PERF_SAMPLE_REGS_INTR.
*/
static inline void perf_fetch_caller_regs(struct pt_regs *regs)
{
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 34084892a466..073fb151b5a9 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -308,13 +308,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
*
* HALTED: PHY is up, but no polling or interrupts are done. Or
* PHY is in an error state.
- *
- * - phy_start moves to RESUMING
- *
- * RESUMING: PHY was halted, but now wants to run again.
- * - If we are forcing, or aneg is done, timer moves to RUNNING
- * - If aneg is not done, timer moves to AN
- * - phy_stop moves to HALTED
+ * - phy_start moves to UP
*/
enum phy_state {
PHY_DOWN = 0,
@@ -324,7 +318,6 @@ enum phy_state {
PHY_RUNNING,
PHY_NOLINK,
PHY_FORCING,
- PHY_RESUMING
};
/**
@@ -345,6 +338,7 @@ struct phy_c45_device_ids {
* is_c45: Set to true if this phy uses clause 45 addressing.
* is_internal: Set to true if this phy is internal to a MAC.
* is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
+ * is_gigabit_capable: Set to true if PHY supports 1000Mbps
* has_fixups: Set to true if this phy has fixups/quirks.
* suspended: Set to true if this phy has been suspended successfully.
* sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
@@ -382,6 +376,7 @@ struct phy_device {
unsigned is_c45:1;
unsigned is_internal:1;
unsigned is_pseudo_fixed_link:1;
+ unsigned is_gigabit_capable:1;
unsigned has_fixups:1;
unsigned suspended:1;
unsigned sysfs_links:1;
@@ -390,6 +385,7 @@ struct phy_device {
unsigned autoneg:1;
/* The most recently read link state */
unsigned link:1;
+ unsigned autoneg_complete:1;
/* Interrupts are enabled */
unsigned interrupts:1;
@@ -1075,6 +1071,7 @@ void phy_attached_info(struct phy_device *phydev);
/* Clause 22 PHY */
int genphy_config_init(struct phy_device *phydev);
+int genphy_read_abilities(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_config_eee_advert(struct phy_device *phydev);
@@ -1150,6 +1147,7 @@ void phy_request_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode);
+void phy_advertise_supported(struct phy_device *phydev);
void phy_support_sym_pause(struct phy_device *phydev);
void phy_support_asym_pause(struct phy_device *phydev);
void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 3f350e2749fe..ef13aea1d370 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -64,6 +64,7 @@ union phy_configure_opts {
* @set_mode: set the mode of the phy
* @reset: resetting the phy
* @calibrate: calibrate the phy
+ * @release: ops to be performed while the consumer relinquishes the PHY
* @owner: the module owner containing the ops
*/
struct phy_ops {
@@ -105,6 +106,7 @@ struct phy_ops {
union phy_configure_opts *opts);
int (*reset)(struct phy *phy);
int (*calibrate)(struct phy *phy);
+ void (*release)(struct phy *phy);
struct module *owner;
};
diff --git a/include/linux/pid.h b/include/linux/pid.h
index b6f4ba16065a..3c8ef5a199ca 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -66,6 +66,8 @@ struct pid
extern struct pid init_struct_pid;
+extern const struct file_operations pidfd_fops;
+
static inline struct pid *get_pid(struct pid *pid)
{
if (pid)
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 787d224ff43e..5c626fdc10db 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -101,18 +101,20 @@ struct pipe_buf_operations {
/*
* Get a reference to the pipe buffer.
*/
- void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
+ bool (*get)(struct pipe_inode_info *, struct pipe_buffer *);
};
/**
* pipe_buf_get - get a reference to a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
+ *
+ * Return: %true if the reference was successfully obtained.
*/
-static inline void pipe_buf_get(struct pipe_inode_info *pipe,
+static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
- buf->ops->get(pipe, buf);
+ return buf->ops->get(pipe, buf);
}
/**
@@ -171,9 +173,10 @@ struct pipe_inode_info *alloc_pipe_info(void);
void free_pipe_info(struct pipe_inode_info *);
/* Generic pipe buffer ops functions */
-void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
+bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
void pipe_buf_mark_unmergeable(struct pipe_buffer *buf);
diff --git a/include/linux/platform_data/ads7828.h b/include/linux/platform_data/ads7828.h
index 3245f45f9d77..a3370a007702 100644
--- a/include/linux/platform_data/ads7828.h
+++ b/include/linux/platform_data/ads7828.h
@@ -4,7 +4,7 @@
* Copyright (c) 2012 Savoir-faire Linux Inc.
* Vivien Didelot <vivien.didelot@savoirfairelinux.com>
*
- * For further information, see the Documentation/hwmon/ads7828 file.
+ * For further information, see the Documentation/hwmon/ads7828.rst file.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/platform_data/ds620.h b/include/linux/platform_data/ds620.h
index 6ef58bb77e46..f0ce22a78bb8 100644
--- a/include/linux/platform_data/ds620.h
+++ b/include/linux/platform_data/ds620.h
@@ -14,7 +14,7 @@ struct ds620_platform_data {
* 1 = PO_LOW
* 2 = PO_HIGH
*
- * (see Documentation/hwmon/ds620)
+ * (see Documentation/hwmon/ds620.rst)
*/
int pomode;
};
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
index b8686c00f15f..fef4b081b736 100644
--- a/include/linux/platform_data/elm.h
+++ b/include/linux/platform_data/elm.h
@@ -60,6 +60,6 @@ static inline int elm_config(struct device *dev, enum bch_ecc bch_type,
{
return -ENOSYS;
}
-#endif /* CONFIG_MTD_NAND_ECC_BCH */
+#endif /* CONFIG_MTD_NAND_OMAP_BCH */
#endif /* __ELM_H */
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index 6d07eebb3f75..7c36370c062e 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -200,8 +200,6 @@ struct omap_gpio_platform_data {
bool is_mpuio; /* whether the bank is of type MPUIO */
u32 non_wakeup_gpios;
- u32 quirks; /* Version specific quirks mask */
-
struct omap_gpio_reg_offs *regs;
/* Return context loss count due to PM states changing */
diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h
index a867637e172d..9e46678edb2a 100644
--- a/include/linux/platform_data/gpio/gpio-amd-fch.h
+++ b/include/linux/platform_data/gpio/gpio-amd-fch.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL+ */
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* AMD FCH gpio driver platform-data
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9f0aa1b48c78..dde59fd3590f 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -7,7 +7,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * For further information, see the Documentation/hwmon/ina2xx file.
+ * For further information, see the Documentation/hwmon/ina2xx.rst file.
*/
/**
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index 7815d50c26ff..2bc51b822956 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -12,19 +12,10 @@
/**
* struct macb_platform_data - platform data for MACB Ethernet
- * @phy_mask: phy mask passed when register the MDIO bus
- * within the driver
- * @phy_irq_pin: PHY IRQ
- * @is_rmii: using RMII interface?
- * @rev_eth_addr: reverse Ethernet address byte order
* @pclk: platform clock
* @hclk: AHB clock
*/
struct macb_platform_data {
- u32 phy_mask;
- int phy_irq_pin;
- u8 is_rmii;
- u8 rev_eth_addr;
struct clk *pclk;
struct clk *hclk;
};
diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h
index 8da8f94ee15c..2bbd0919bc89 100644
--- a/include/linux/platform_data/max197.h
+++ b/include/linux/platform_data/max197.h
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * For further information, see the Documentation/hwmon/max197 file.
+ * For further information, see the Documentation/hwmon/max197.rst file.
*/
#ifndef _PDATA_MAX197_H
diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h
index 963730b44aea..21452a9365e1 100644
--- a/include/linux/platform_data/mv88e6xxx.h
+++ b/include/linux/platform_data/mv88e6xxx.h
@@ -13,6 +13,7 @@ struct dsa_mv88e6xxx_pdata {
unsigned int enabled_ports;
struct net_device *netdev;
u32 eeprom_len;
+ int irq;
};
#endif
diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h
index ee03d429742b..5fa115d3ea4b 100644
--- a/include/linux/platform_data/ntc_thermistor.h
+++ b/include/linux/platform_data/ntc_thermistor.h
@@ -42,7 +42,7 @@ struct ntc_thermistor_platform_data {
* read_uV()
*
* How to setup pullup_ohm, pulldown_ohm, and connect is
- * described at Documentation/hwmon/ntc_thermistor
+ * described at Documentation/hwmon/ntc_thermistor.rst
*
* pullup/down_ohm: 0 for infinite / not-connected
*
diff --git a/include/linux/platform_data/pca954x.h b/include/linux/platform_data/pca954x.h
deleted file mode 100644
index 1712677d5904..000000000000
--- a/include/linux/platform_data/pca954x.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *
- * pca954x.h - I2C multiplexer/switch support
- *
- * Copyright (c) 2008-2009 Rodolfo Giometti <giometti@linux.it>
- * Copyright (c) 2008-2009 Eurotech S.p.A. <info@eurotech.it>
- * Michael Lawnick <michael.lawnick.ext@nsn.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-
-#ifndef _LINUX_I2C_PCA954X_H
-#define _LINUX_I2C_PCA954X_H
-
-/* Platform data for the PCA954x I2C multiplexers */
-
-/* Per channel initialisation data:
- * @adap_id: bus number for the adapter. 0 = don't care
- * @deselect_on_exit: set this entry to 1, if your H/W needs deselection
- * of this channel after transaction.
- *
- */
-struct pca954x_platform_mode {
- int adap_id;
- unsigned int deselect_on_exit:1;
- unsigned int class;
-};
-
-/* Per mux/switch data, used with i2c_register_board_info */
-struct pca954x_platform_data {
- struct pca954x_platform_mode *modes;
- int num_modes;
-};
-
-#endif /* _LINUX_I2C_PCA954X_H */
diff --git a/include/linux/platform_data/spi-ep93xx.h b/include/linux/platform_data/spi-ep93xx.h
index eb16c6739ac2..b439f2a896e0 100644
--- a/include/linux/platform_data/spi-ep93xx.h
+++ b/include/linux/platform_data/spi-ep93xx.h
@@ -6,13 +6,9 @@ struct spi_device;
/**
* struct ep93xx_spi_info - EP93xx specific SPI descriptor
- * @chipselect: array of gpio numbers to use as chip selects
- * @num_chipselect: ARRAY_SIZE(chipselect)
* @use_dma: use DMA for the transfers
*/
struct ep93xx_spi_info {
- int *chipselect;
- int num_chipselect;
bool use_dma;
};
diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h
index 446473a46b88..1ff224793c99 100644
--- a/include/linux/platform_data/wilco-ec.h
+++ b/include/linux/platform_data/wilco-ec.h
@@ -14,10 +14,6 @@
/* Message flags for using the mailbox() interface */
#define WILCO_EC_FLAG_NO_RESPONSE BIT(0) /* EC does not respond */
#define WILCO_EC_FLAG_EXTENDED_DATA BIT(1) /* EC returns 256 data bytes */
-#define WILCO_EC_FLAG_RAW_REQUEST BIT(2) /* Do not trim request data */
-#define WILCO_EC_FLAG_RAW_RESPONSE BIT(3) /* Do not trim response data */
-#define WILCO_EC_FLAG_RAW (WILCO_EC_FLAG_RAW_REQUEST | \
- WILCO_EC_FLAG_RAW_RESPONSE)
/* Normal commands have a maximum 32 bytes of data */
#define EC_MAILBOX_DATA_SIZE 32
@@ -56,10 +52,7 @@ struct wilco_ec_device {
* @mailbox_id: Mailbox identifier, specifies the command set.
* @mailbox_version: Mailbox interface version %EC_MAILBOX_VERSION
* @reserved: Set to zero.
- * @data_size: Length of request, data + last 2 bytes of the header.
- * @command: Mailbox command code, unique for each mailbox_id set.
- * @reserved_raw: Set to zero for most commands, but is used by
- * some command types and for raw commands.
+ * @data_size: Length of following data.
*/
struct wilco_ec_request {
u8 struct_version;
@@ -68,8 +61,6 @@ struct wilco_ec_request {
u8 mailbox_version;
u8 reserved;
u16 data_size;
- u8 command;
- u8 reserved_raw;
} __packed;
/**
@@ -79,8 +70,6 @@ struct wilco_ec_request {
* @result: Result code from the EC. Non-zero indicates an error.
* @data_size: Length of the response data buffer.
* @reserved: Set to zero.
- * @mbox0: EC returned data at offset 0 is unused (always 0) so this byte
- * is treated as part of the header instead of the data.
* @data: Response data buffer. Max size is %EC_MAILBOX_DATA_SIZE_EXTENDED.
*/
struct wilco_ec_response {
@@ -89,7 +78,6 @@ struct wilco_ec_response {
u16 result;
u16 data_size;
u8 reserved[2];
- u8 mbox0;
u8 data[0];
} __packed;
@@ -111,21 +99,15 @@ enum wilco_ec_msg_type {
* struct wilco_ec_message - Request and response message.
* @type: Mailbox message type.
* @flags: Message flags, e.g. %WILCO_EC_FLAG_NO_RESPONSE.
- * @command: Mailbox command code.
- * @result: Result code from the EC. Non-zero indicates an error.
* @request_size: Number of bytes to send to the EC.
* @request_data: Buffer containing the request data.
- * @response_size: Number of bytes expected from the EC.
- * This is 32 by default and 256 if the flag
- * is set for %WILCO_EC_FLAG_EXTENDED_DATA
+ * @response_size: Number of bytes to read from EC.
* @response_data: Buffer containing the response data, should be
* response_size bytes and allocated by caller.
*/
struct wilco_ec_message {
enum wilco_ec_msg_type type;
u8 flags;
- u8 command;
- u8 result;
size_t request_size;
void *request_data;
size_t response_size;
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 53dfc2541960..bfba245636a7 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -67,6 +67,7 @@
/* Input */
#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011
#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012
+#define ASUS_WMI_DEVID_FNLOCK 0x00100023
/* Fan, Thermal */
#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h
index 3ab892208343..7a37ac27d0fb 100644
--- a/include/linux/platform_data/x86/clk-pmc-atom.h
+++ b/include/linux/platform_data/x86/clk-pmc-atom.h
@@ -35,10 +35,13 @@ struct pmc_clk {
*
* @base: PMC clock register base offset
* @clks: pointer to set of registered clocks, typically 0..5
+ * @critical: flag to indicate if firmware enabled pmc_plt_clks
+ * should be marked as critial or not
*/
struct pmc_clk_data {
void __iomem *base;
const struct pmc_clk *clks;
+ bool critical;
};
#endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */
diff --git a/include/linux/platform_data/xilinx-ll-temac.h b/include/linux/platform_data/xilinx-ll-temac.h
new file mode 100644
index 000000000000..368530f98176
--- /dev/null
+++ b/include/linux/platform_data/xilinx-ll-temac.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_XILINX_LL_TEMAC_H
+#define __LINUX_XILINX_LL_TEMAC_H
+
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+
+struct ll_temac_platform_data {
+ bool txcsum; /* Enable/disable TX checksum */
+ bool rxcsum; /* Enable/disable RX checksum */
+ u8 mac_addr[ETH_ALEN]; /* MAC address (6 bytes) */
+ /* Clock frequency for input to MDIO clock generator */
+ u32 mdio_clk_freq;
+ unsigned long long mdio_bus_id; /* Unique id for MDIO bus */
+ int phy_addr; /* Address of the PHY to connect to */
+ phy_interface_t phy_interface; /* PHY interface mode */
+ bool reg_little_endian; /* Little endian TEMAC register access */
+ bool dma_little_endian; /* Little endian DMA register access */
+ /* Pre-initialized mutex to use for synchronizing indirect
+ * register access. When using both interfaces of a single
+ * TEMAC IP block, the same mutex should be passed here, as
+ * they share the same DCR bus bridge.
+ */
+ struct mutex *indirect_mutex;
+ /* DMA channel control setup */
+ u8 tx_irq_timeout; /* TX Interrupt Delay Time-out */
+ u8 tx_irq_count; /* TX Interrupt Coalescing Threshold Count */
+ u8 rx_irq_timeout; /* RX Interrupt Delay Time-out */
+ u8 rx_irq_count; /* RX Interrupt Coalescing Threshold Count */
+};
+
+#endif /* __LINUX_XILINX_LL_TEMAC_H */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 1ed5874bcee0..0e8e356bed6a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
+#include <linux/cpumask.h>
/*
* Flags to control the behaviour of a genpd.
@@ -42,11 +43,22 @@
* GENPD_FLAG_ACTIVE_WAKEUP: Instructs genpd to keep the PM domain powered
* on, in case any of its attached devices is used
* in the wakeup path to serve system wakeups.
+ *
+ * GENPD_FLAG_CPU_DOMAIN: Instructs genpd that it should expect to get
+ * devices attached, which may belong to CPUs or
+ * possibly have subdomains with CPUs attached.
+ * This flag enables the genpd backend driver to
+ * deploy idle power management support for CPUs
+ * and groups of CPUs. Note that, the backend
+ * driver must then comply with the so called,
+ * last-man-standing algorithm, for the CPUs in the
+ * PM domain.
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
#define GENPD_FLAG_ALWAYS_ON (1U << 2)
#define GENPD_FLAG_ACTIVE_WAKEUP (1U << 3)
+#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -69,6 +81,7 @@ struct genpd_power_state {
s64 residency_ns;
struct fwnode_handle *fwnode;
ktime_t idle_time;
+ void *data;
};
struct genpd_lock_ops;
@@ -93,6 +106,7 @@ struct generic_pm_domain {
unsigned int suspended_count; /* System suspend device counter */
unsigned int prepared_count; /* Suspend counter of prepared devices */
unsigned int performance_state; /* Aggregated max performance state */
+ cpumask_var_t cpus; /* A cpumask of the attached CPUs */
int (*power_off)(struct generic_pm_domain *domain);
int (*power_on)(struct generic_pm_domain *domain);
struct opp_table *opp_table; /* OPP table of the genpd */
@@ -104,15 +118,17 @@ struct generic_pm_domain {
s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
bool max_off_time_changed;
bool cached_power_down_ok;
+ bool cached_power_down_state_idx;
int (*attach_dev)(struct generic_pm_domain *domain,
struct device *dev);
void (*detach_dev)(struct generic_pm_domain *domain,
struct device *dev);
unsigned int flags; /* Bit field of configs for genpd */
struct genpd_power_state *states;
+ void (*free_states)(struct genpd_power_state *states,
+ unsigned int state_count);
unsigned int state_count; /* number of states */
unsigned int state_idx; /* state that genpd will go to when off */
- void *free; /* Free the state that was allocated for default */
ktime_t on_time;
ktime_t accounting_time;
const struct genpd_lock_ops *lock_ops;
@@ -159,6 +175,7 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
+ int cpu;
unsigned int performance_state;
void *data;
};
@@ -187,6 +204,9 @@ int dev_pm_genpd_set_performance_state(struct device *dev, unsigned int state);
extern struct dev_power_governor simple_qos_governor;
extern struct dev_power_governor pm_domain_always_on_gov;
+#ifdef CONFIG_CPU_IDLE
+extern struct dev_power_governor pm_domain_cpu_gov;
+#endif
#else
static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 24c757a32a7b..b150fe97ce5a 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -102,6 +102,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq);
+struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
+ unsigned long u_volt);
struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq);
@@ -207,6 +209,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
return ERR_PTR(-ENOTSUPP);
}
+static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil_by_volt(struct device *dev,
+ unsigned long u_volt)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq)
{
diff --git a/include/linux/printk.h b/include/linux/printk.h
index d7c77ed1a4cb..84ea4d094af3 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -347,7 +347,7 @@ extern int kptr_restrict;
#ifdef CONFIG_PRINTK
#define printk_once(fmt, ...) \
({ \
- static bool __print_once __read_mostly; \
+ static bool __section(.data.once) __print_once; \
bool __ret_print_once = !__print_once; \
\
if (!__print_once) { \
@@ -358,7 +358,7 @@ extern int kptr_restrict;
})
#define printk_deferred_once(fmt, ...) \
({ \
- static bool __print_once __read_mostly; \
+ static bool __section(.data.once) __print_once; \
bool __ret_print_once = !__print_once; \
\
if (!__print_once) { \
diff --git a/include/linux/property.h b/include/linux/property.h
index 65d3420dd5d1..a29369c89e6e 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -13,6 +13,7 @@
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/bits.h>
#include <linux/fwnode.h>
#include <linux/types.h>
@@ -304,6 +305,23 @@ struct fwnode_handle *
fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port,
u32 endpoint);
+/*
+ * Fwnode lookup flags
+ *
+ * @FWNODE_GRAPH_ENDPOINT_NEXT: In the case of no exact match, look for the
+ * closest endpoint ID greater than the specified
+ * one.
+ * @FWNODE_GRAPH_DEVICE_DISABLED: That the device to which the remote
+ * endpoint of the given endpoint belongs to,
+ * may be disabled.
+ */
+#define FWNODE_GRAPH_ENDPOINT_NEXT BIT(0)
+#define FWNODE_GRAPH_DEVICE_DISABLED BIT(1)
+
+struct fwnode_handle *
+fwnode_graph_get_endpoint_by_id(const struct fwnode_handle *fwnode,
+ u32 port, u32 endpoint, unsigned long flags);
+
#define fwnode_graph_for_each_endpoint(fwnode, child) \
for (child = NULL; \
(child = fwnode_graph_get_next_endpoint(fwnode, child)); )
diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
index 827c601841c4..6f89fc8d4b8e 100644
--- a/include/linux/psp-sev.h
+++ b/include/linux/psp-sev.h
@@ -5,8 +5,7 @@
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*
- * SEV spec 0.14 is available at:
- * http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf
+ * SEV API spec is available at https://developer.amd.com/sev
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index edb9b040c94c..d5084ebd9f03 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -9,6 +9,13 @@
#include <linux/bug.h> /* For BUG_ON. */
#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
#include <uapi/linux/ptrace.h>
+#include <linux/seccomp.h>
+
+/* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
+struct syscall_info {
+ __u64 sp;
+ struct seccomp_data data;
+};
extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
@@ -407,9 +414,7 @@ static inline void user_single_step_report(struct pt_regs *regs)
#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
#endif
-extern int task_current_syscall(struct task_struct *target, long *callno,
- unsigned long args[6], unsigned int maxargs,
- unsigned long *sp, unsigned long *pc);
+extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index b628abfffacc..eaa5c6e3fc9f 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -596,7 +596,6 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
#ifdef CONFIG_PWM_SYSFS
void pwmchip_sysfs_export(struct pwm_chip *chip);
void pwmchip_sysfs_unexport(struct pwm_chip *chip);
-void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
#else
static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
{
@@ -605,10 +604,6 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
{
}
-
-static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
-{
-}
#endif /* CONFIG_PWM_SYSFS */
#endif /* __LINUX_PWM_H */
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index f6165d304b4d..48841e5dab90 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -1338,7 +1338,6 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
}
/* Let SB update */
- mmiowb();
return rc;
}
@@ -1374,7 +1373,6 @@ static inline void qed_sb_ack(struct qed_sb_info *sb_info,
/* Both segments (interrupts & acks) are written to same place address;
* Need to guarantee all commands will be received (in-order) by HW.
*/
- mmiowb();
barrier();
}
diff --git a/include/linux/random.h b/include/linux/random.h
index 445a0ea4ff49..13aeaf5a4bd4 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -36,6 +36,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int wait_for_random_bytes(void);
+extern int __init rand_initialize(void);
extern bool rng_is_initialized(void);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 6cdb1db776cf..922bb6848813 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -878,9 +878,11 @@ static inline void rcu_head_init(struct rcu_head *rhp)
static inline bool
rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
{
- if (READ_ONCE(rhp->func) == f)
+ rcu_callback_t func = READ_ONCE(rhp->func);
+
+ if (func == f)
return true;
- WARN_ON_ONCE(READ_ONCE(rhp->func) != (rcu_callback_t)~0L);
+ WARN_ON_ONCE(func != (rcu_callback_t)~0L);
return false;
}
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 90bfa3279a01..563290fc194f 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -18,7 +18,7 @@
* awoken.
*/
struct rcuwait {
- struct task_struct *task;
+ struct task_struct __rcu *task;
};
#define __RCUWAIT_INITIALIZER(name) \
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f3f76051e8b0..aaf3cee70439 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -478,6 +478,11 @@ static inline int regulator_is_supported_voltage(struct regulator *regulator,
return 0;
}
+static inline unsigned int regulator_get_linear_step(struct regulator *regulator)
+{
+ return 0;
+}
+
static inline int regulator_set_current_limit(struct regulator *regulator,
int min_uA, int max_uA)
{
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 2f0ffca35780..ee750765cc94 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -228,7 +228,8 @@ reservation_object_unlock(struct reservation_object *obj)
* @obj: the reservation object
*
* Returns the exclusive fence (if any). Does NOT take a
- * reference. The obj->lock must be held.
+ * reference. Writers must hold obj->lock, readers may only
+ * hold a RCU read side lock.
*
* RETURNS
* The exclusive fence or NULL
diff --git a/include/linux/reset.h b/include/linux/reset.h
index c1901b61ca30..95d555c2130a 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -14,23 +14,26 @@ int reset_control_reset(struct reset_control *rstc);
int reset_control_assert(struct reset_control *rstc);
int reset_control_deassert(struct reset_control *rstc);
int reset_control_status(struct reset_control *rstc);
+int reset_control_acquire(struct reset_control *rstc);
+void reset_control_release(struct reset_control *rstc);
struct reset_control *__of_reset_control_get(struct device_node *node,
const char *id, int index, bool shared,
- bool optional);
+ bool optional, bool acquired);
struct reset_control *__reset_control_get(struct device *dev, const char *id,
int index, bool shared,
- bool optional);
+ bool optional, bool acquired);
void reset_control_put(struct reset_control *rstc);
int __device_reset(struct device *dev, bool optional);
struct reset_control *__devm_reset_control_get(struct device *dev,
const char *id, int index, bool shared,
- bool optional);
+ bool optional, bool acquired);
struct reset_control *devm_reset_control_array_get(struct device *dev,
bool shared, bool optional);
struct reset_control *of_reset_control_array_get(struct device_node *np,
- bool shared, bool optional);
+ bool shared, bool optional,
+ bool acquired);
int reset_control_get_count(struct device *dev);
@@ -56,6 +59,15 @@ static inline int reset_control_status(struct reset_control *rstc)
return 0;
}
+static inline int reset_control_acquire(struct reset_control *rstc)
+{
+ return 0;
+}
+
+static inline void reset_control_release(struct reset_control *rstc)
+{
+}
+
static inline void reset_control_put(struct reset_control *rstc)
{
}
@@ -68,21 +80,23 @@ static inline int __device_reset(struct device *dev, bool optional)
static inline struct reset_control *__of_reset_control_get(
struct device_node *node,
const char *id, int index, bool shared,
- bool optional)
+ bool optional, bool acquired)
{
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *__reset_control_get(
struct device *dev, const char *id,
- int index, bool shared, bool optional)
+ int index, bool shared, bool optional,
+ bool acquired)
{
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *__devm_reset_control_get(
struct device *dev, const char *id,
- int index, bool shared, bool optional)
+ int index, bool shared, bool optional,
+ bool acquired)
{
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
@@ -94,7 +108,8 @@ devm_reset_control_array_get(struct device *dev, bool shared, bool optional)
}
static inline struct reset_control *
-of_reset_control_array_get(struct device_node *np, bool shared, bool optional)
+of_reset_control_array_get(struct device_node *np, bool shared, bool optional,
+ bool acquired)
{
return optional ? NULL : ERR_PTR(-ENOTSUPP);
}
@@ -134,7 +149,28 @@ static inline int device_reset_optional(struct device *dev)
static inline struct reset_control *
__must_check reset_control_get_exclusive(struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, false, false);
+ return __reset_control_get(dev, id, 0, false, false, true);
+}
+
+/**
+ * reset_control_get_exclusive_released - Lookup and obtain a temoprarily
+ * exclusive reference to a reset
+ * controller.
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ * reset-controls returned by this function must be acquired via
+ * reset_control_acquire() before they can be used and should be released
+ * via reset_control_release() afterwards.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *
+__must_check reset_control_get_exclusive_released(struct device *dev,
+ const char *id)
+{
+ return __reset_control_get(dev, id, 0, false, false, false);
}
/**
@@ -162,19 +198,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id)
static inline struct reset_control *reset_control_get_shared(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, true, false);
+ return __reset_control_get(dev, id, 0, true, false, false);
}
static inline struct reset_control *reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, false, true);
+ return __reset_control_get(dev, id, 0, false, true, true);
}
static inline struct reset_control *reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __reset_control_get(dev, id, 0, true, true);
+ return __reset_control_get(dev, id, 0, true, true, false);
}
/**
@@ -190,7 +226,7 @@ static inline struct reset_control *reset_control_get_optional_shared(
static inline struct reset_control *of_reset_control_get_exclusive(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, false, false);
+ return __of_reset_control_get(node, id, 0, false, false, true);
}
/**
@@ -215,7 +251,7 @@ static inline struct reset_control *of_reset_control_get_exclusive(
static inline struct reset_control *of_reset_control_get_shared(
struct device_node *node, const char *id)
{
- return __of_reset_control_get(node, id, 0, true, false);
+ return __of_reset_control_get(node, id, 0, true, false, false);
}
/**
@@ -232,7 +268,7 @@ static inline struct reset_control *of_reset_control_get_shared(
static inline struct reset_control *of_reset_control_get_exclusive_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, false, false);
+ return __of_reset_control_get(node, NULL, index, false, false, true);
}
/**
@@ -260,7 +296,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index(
static inline struct reset_control *of_reset_control_get_shared_by_index(
struct device_node *node, int index)
{
- return __of_reset_control_get(node, NULL, index, true, false);
+ return __of_reset_control_get(node, NULL, index, true, false, false);
}
/**
@@ -279,7 +315,26 @@ static inline struct reset_control *
__must_check devm_reset_control_get_exclusive(struct device *dev,
const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, false);
+ return __devm_reset_control_get(dev, id, 0, false, false, true);
+}
+
+/**
+ * devm_reset_control_get_exclusive_released - resource managed
+ * reset_control_get_exclusive_released()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_exclusive_released(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_get_exclusive_released() for more information.
+ */
+static inline struct reset_control *
+__must_check devm_reset_control_get_exclusive_released(struct device *dev,
+ const char *id)
+{
+ return __devm_reset_control_get(dev, id, 0, false, false, false);
}
/**
@@ -294,19 +349,19 @@ __must_check devm_reset_control_get_exclusive(struct device *dev,
static inline struct reset_control *devm_reset_control_get_shared(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, true, false);
+ return __devm_reset_control_get(dev, id, 0, true, false, false);
}
static inline struct reset_control *devm_reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, false, true);
+ return __devm_reset_control_get(dev, id, 0, false, true, true);
}
static inline struct reset_control *devm_reset_control_get_optional_shared(
struct device *dev, const char *id)
{
- return __devm_reset_control_get(dev, id, 0, true, true);
+ return __devm_reset_control_get(dev, id, 0, true, true, false);
}
/**
@@ -324,7 +379,7 @@ static inline struct reset_control *devm_reset_control_get_optional_shared(
static inline struct reset_control *
devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
{
- return __devm_reset_control_get(dev, NULL, index, false, false);
+ return __devm_reset_control_get(dev, NULL, index, false, false, true);
}
/**
@@ -340,7 +395,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index)
static inline struct reset_control *
devm_reset_control_get_shared_by_index(struct device *dev, int index)
{
- return __devm_reset_control_get(dev, NULL, index, true, false);
+ return __devm_reset_control_get(dev, NULL, index, true, false, false);
}
/*
@@ -412,24 +467,30 @@ devm_reset_control_array_get_optional_shared(struct device *dev)
static inline struct reset_control *
of_reset_control_array_get_exclusive(struct device_node *node)
{
- return of_reset_control_array_get(node, false, false);
+ return of_reset_control_array_get(node, false, false, true);
+}
+
+static inline struct reset_control *
+of_reset_control_array_get_exclusive_released(struct device_node *node)
+{
+ return of_reset_control_array_get(node, false, false, false);
}
static inline struct reset_control *
of_reset_control_array_get_shared(struct device_node *node)
{
- return of_reset_control_array_get(node, true, false);
+ return of_reset_control_array_get(node, true, false, true);
}
static inline struct reset_control *
of_reset_control_array_get_optional_exclusive(struct device_node *node)
{
- return of_reset_control_array_get(node, false, true);
+ return of_reset_control_array_get(node, false, true, true);
}
static inline struct reset_control *
of_reset_control_array_get_optional_shared(struct device_node *node)
{
- return of_reset_control_array_get(node, true, true);
+ return of_reset_control_array_get(node, true, true, true);
}
#endif
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
index 763d613ce2c2..57467cbf4c5b 100644
--- a/include/linux/rhashtable-types.h
+++ b/include/linux/rhashtable-types.h
@@ -48,7 +48,6 @@ typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
* @head_offset: Offset of rhash_head in struct to be hashed
* @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking
- * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
* @automatic_shrinking: Enable automatic shrinking of tables
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
@@ -62,7 +61,6 @@ struct rhashtable_params {
unsigned int max_size;
u16 min_size;
bool automatic_shrinking;
- u8 locks_mul;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
rht_obj_cmpfn_t obj_cmpfn;
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index ae9c0f71f311..f7714d3b46bd 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -24,12 +24,27 @@
#include <linux/list_nulls.h>
#include <linux/workqueue.h>
#include <linux/rculist.h>
+#include <linux/bit_spinlock.h>
#include <linux/rhashtable-types.h>
/*
+ * Objects in an rhashtable have an embedded struct rhash_head
+ * which is linked into as hash chain from the hash table - or one
+ * of two or more hash tables when the rhashtable is being resized.
* The end of the chain is marked with a special nulls marks which has
- * the least significant bit set.
+ * the least significant bit set but otherwise stores the address of
+ * the hash bucket. This allows us to be be sure we've found the end
+ * of the right list.
+ * The value stored in the hash bucket has BIT(0) used as a lock bit.
+ * This bit must be atomically set before any changes are made to
+ * the chain. To avoid dereferencing this pointer without clearing
+ * the bit first, we use an opaque 'struct rhash_lock_head *' for the
+ * pointer stored in the bucket. This struct needs to be defined so
+ * that rcu_dereference() works on it, but it has no content so a
+ * cast is needed for it to be useful. This ensures it isn't
+ * used by mistake with clearing the lock bit first.
*/
+struct rhash_lock_head {};
/* Maximum chain length before rehash
*
@@ -52,8 +67,6 @@
* @nest: Number of bits of first-level nested table.
* @rehash: Current bucket being rehashed
* @hash_rnd: Random seed to fold into hash
- * @locks_mask: Mask to apply before accessing locks[]
- * @locks: Array of spinlocks protecting individual buckets
* @walkers: List of active walkers
* @rcu: RCU structure for freeing the table
* @future_tbl: Table under construction during rehashing
@@ -63,31 +76,34 @@
struct bucket_table {
unsigned int size;
unsigned int nest;
- unsigned int rehash;
u32 hash_rnd;
- unsigned int locks_mask;
- spinlock_t *locks;
struct list_head walkers;
struct rcu_head rcu;
struct bucket_table __rcu *future_tbl;
- struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
+ struct lockdep_map dep_map;
+
+ struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
/*
* NULLS_MARKER() expects a hash value with the low
* bits mostly likely to be significant, and it discards
* the msb.
- * We git it an address, in which the bottom 2 bits are
+ * We give it an address, in which the bottom bit is
* always 0, and the msb might be significant.
* So we shift the address down one bit to align with
* expectations and avoid losing a significant bit.
+ *
+ * We never store the NULLS_MARKER in the hash table
+ * itself as we need the lsb for locking.
+ * Instead we store a NULL
*/
#define RHT_NULLS_MARKER(ptr) \
((void *)NULLS_MARKER(((unsigned long) (ptr)) >> 1))
#define INIT_RHT_NULLS_HEAD(ptr) \
- ((ptr) = RHT_NULLS_MARKER(&(ptr)))
+ ((ptr) = NULL)
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{
@@ -207,25 +223,6 @@ static inline bool rht_grow_above_max(const struct rhashtable *ht,
return atomic_read(&ht->nelems) >= ht->max_elems;
}
-/* The bucket lock is selected based on the hash and protects mutations
- * on a group of hash buckets.
- *
- * A maximum of tbl->size/2 bucket locks is allocated. This ensures that
- * a single lock always covers both buckets which may both contains
- * entries which link to the same bucket of the old table during resizing.
- * This allows to simplify the locking as locking the bucket in both
- * tables during resize always guarantee protection.
- *
- * IMPORTANT: When holding the bucket lock of both the old and new table
- * during expansions and shrinking, the old bucket lock must always be
- * acquired first.
- */
-static inline spinlock_t *rht_bucket_lock(const struct bucket_table *tbl,
- unsigned int hash)
-{
- return &tbl->locks[hash & tbl->locks_mask];
-}
-
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rht_mutex_is_held(struct rhashtable *ht);
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
@@ -264,11 +261,13 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg);
void rhashtable_destroy(struct rhashtable *ht);
-struct rhash_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash);
-struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
- struct bucket_table *tbl,
+struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash);
+struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
unsigned int hash);
+struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -285,37 +284,136 @@ struct rhash_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
-static inline struct rhash_head __rcu *const *rht_bucket(
+static inline struct rhash_lock_head __rcu *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
-static inline struct rhash_head __rcu **rht_bucket_var(
+static inline struct rhash_lock_head __rcu **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
- return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
+ return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
-static inline struct rhash_head __rcu **rht_bucket_insert(
+static inline struct rhash_lock_head __rcu **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
&tbl->buckets[hash];
}
+/*
+ * We lock a bucket by setting BIT(0) in the pointer - this is always
+ * zero in real pointers. The NULLS mark is never stored in the bucket,
+ * rather we store NULL if the bucket is empty.
+ * bit_spin_locks do not handle contention well, but the whole point
+ * of the hashtable design is to achieve minimum per-bucket contention.
+ * A nested hash table might not have a bucket pointer. In that case
+ * we cannot get a lock. For remove and replace the bucket cannot be
+ * interesting and doesn't need locking.
+ * For insert we allocate the bucket if this is the last bucket_table,
+ * and then take the lock.
+ * Sometimes we unlock a bucket by writing a new pointer there. In that
+ * case we don't need to unlock, but we do need to reset state such as
+ * local_bh. For that we have rht_assign_unlock(). As rcu_assign_pointer()
+ * provides the same release semantics that bit_spin_unlock() provides,
+ * this is safe.
+ * When we write to a bucket without unlocking, we use rht_assign_locked().
+ */
+
+static inline void rht_lock(struct bucket_table *tbl,
+ struct rhash_lock_head **bkt)
+{
+ local_bh_disable();
+ bit_spin_lock(0, (unsigned long *)bkt);
+ lock_map_acquire(&tbl->dep_map);
+}
+
+static inline void rht_lock_nested(struct bucket_table *tbl,
+ struct rhash_lock_head **bucket,
+ unsigned int subclass)
+{
+ local_bh_disable();
+ bit_spin_lock(0, (unsigned long *)bucket);
+ lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
+}
+
+static inline void rht_unlock(struct bucket_table *tbl,
+ struct rhash_lock_head **bkt)
+{
+ lock_map_release(&tbl->dep_map);
+ bit_spin_unlock(0, (unsigned long *)bkt);
+ local_bh_enable();
+}
+
+/*
+ * Where 'bkt' is a bucket and might be locked:
+ * rht_ptr() dereferences that pointer and clears the lock bit.
+ * rht_ptr_exclusive() dereferences in a context where exclusive
+ * access is guaranteed, such as when destroying the table.
+ */
+static inline struct rhash_head *rht_ptr(
+ struct rhash_lock_head __rcu * const *bkt,
+ struct bucket_table *tbl,
+ unsigned int hash)
+{
+ const struct rhash_lock_head *p =
+ rht_dereference_bucket_rcu(*bkt, tbl, hash);
+
+ if ((((unsigned long)p) & ~BIT(0)) == 0)
+ return RHT_NULLS_MARKER(bkt);
+ return (void *)(((unsigned long)p) & ~BIT(0));
+}
+
+static inline struct rhash_head *rht_ptr_exclusive(
+ struct rhash_lock_head __rcu * const *bkt)
+{
+ const struct rhash_lock_head *p =
+ rcu_dereference_protected(*bkt, 1);
+
+ if (!p)
+ return RHT_NULLS_MARKER(bkt);
+ return (void *)(((unsigned long)p) & ~BIT(0));
+}
+
+static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
+ struct rhash_head *obj)
+{
+ struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
+
+ if (rht_is_a_nulls(obj))
+ obj = NULL;
+ rcu_assign_pointer(*p, (void *)((unsigned long)obj | BIT(0)));
+}
+
+static inline void rht_assign_unlock(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bkt,
+ struct rhash_head *obj)
+{
+ struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
+
+ if (rht_is_a_nulls(obj))
+ obj = NULL;
+ lock_map_release(&tbl->dep_map);
+ rcu_assign_pointer(*p, obj);
+ preempt_enable();
+ __release(bitlock);
+ local_bh_enable();
+}
+
/**
- * rht_for_each_continue - continue iterating over hash chain
+ * rht_for_each_from - iterate over hash chain from given head
* @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
+ * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
*/
-#define rht_for_each_continue(pos, head, tbl, hash) \
- for (pos = rht_dereference_bucket(head, tbl, hash); \
- !rht_is_a_nulls(pos); \
+#define rht_for_each_from(pos, head, tbl, hash) \
+ for (pos = head; \
+ !rht_is_a_nulls(pos); \
pos = rht_dereference_bucket((pos)->next, tbl, hash))
/**
@@ -325,19 +423,20 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @hash: the hash value / bucket index
*/
#define rht_for_each(pos, tbl, hash) \
- rht_for_each_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
+ rht_for_each_from(pos, rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
+ tbl, hash)
/**
- * rht_for_each_entry_continue - continue iterating over hash chain
+ * rht_for_each_entry_from - iterate over hash chain from given head
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
+ * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct.
*/
-#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
- for (pos = rht_dereference_bucket(head, tbl, hash); \
+#define rht_for_each_entry_from(tpos, pos, head, tbl, hash, member) \
+ for (pos = head; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = rht_dereference_bucket((pos)->next, tbl, hash))
@@ -350,8 +449,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* @member: name of the &struct rhash_head within the hashable struct.
*/
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_continue(tpos, pos, *rht_bucket(tbl, hash), \
- tbl, hash, member)
+ rht_for_each_entry_from(tpos, pos, \
+ rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
+ tbl, hash, member)
/**
* rht_for_each_entry_safe - safely iterate over hash chain of given type
@@ -366,7 +466,7 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* remove the loop cursor from the list.
*/
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
- for (pos = rht_dereference_bucket(*rht_bucket(tbl, hash), tbl, hash), \
+ for (pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
next = !rht_is_a_nulls(pos) ? \
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
@@ -375,9 +475,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
rht_dereference_bucket(pos->next, tbl, hash) : NULL)
/**
- * rht_for_each_rcu_continue - continue iterating over rcu hash chain
+ * rht_for_each_rcu_from - iterate over rcu hash chain from given head
* @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
+ * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
*
@@ -385,9 +485,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
+#define rht_for_each_rcu_from(pos, head, tbl, hash) \
for (({barrier(); }), \
- pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ pos = head; \
!rht_is_a_nulls(pos); \
pos = rcu_dereference_raw(pos->next))
@@ -401,14 +501,17 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_rcu(pos, tbl, hash) \
- rht_for_each_rcu_continue(pos, *rht_bucket(tbl, hash), tbl, hash)
+#define rht_for_each_rcu(pos, tbl, hash) \
+ for (({barrier(); }), \
+ pos = rht_ptr(rht_bucket(tbl, hash), tbl, hash); \
+ !rht_is_a_nulls(pos); \
+ pos = rcu_dereference_raw(pos->next))
/**
- * rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
+ * rht_for_each_entry_rcu_from - iterated over rcu hash chain from given head
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct rhash_head to use as a loop cursor.
- * @head: the previous &struct rhash_head to continue from
+ * @head: the &struct rhash_head to start from
* @tbl: the &struct bucket_table
* @hash: the hash value / bucket index
* @member: name of the &struct rhash_head within the hashable struct.
@@ -417,9 +520,9 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* the _rcu mutation primitives such as rhashtable_insert() as long as the
* traversal is guarded by rcu_read_lock().
*/
-#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
+#define rht_for_each_entry_rcu_from(tpos, pos, head, tbl, hash, member) \
for (({barrier(); }), \
- pos = rht_dereference_bucket_rcu(head, tbl, hash); \
+ pos = head; \
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
@@ -436,8 +539,10 @@ static inline struct rhash_head __rcu **rht_bucket_insert(
* traversal is guarded by rcu_read_lock().
*/
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
- rht_for_each_entry_rcu_continue(tpos, pos, *rht_bucket(tbl, hash), \
- tbl, hash, member)
+ rht_for_each_entry_rcu_from(tpos, pos, \
+ rht_ptr(rht_bucket(tbl, hash), \
+ tbl, hash), \
+ tbl, hash, member)
/**
* rhl_for_each_rcu - iterate over rcu hash table list
@@ -482,7 +587,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
- struct rhash_head __rcu * const *head;
+ struct rhash_lock_head __rcu * const *bkt;
struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -490,9 +595,9 @@ static inline struct rhash_head *__rhashtable_lookup(
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
- head = rht_bucket(tbl, hash);
+ bkt = rht_bucket(tbl, hash);
do {
- rht_for_each_rcu_continue(he, *head, tbl, hash) {
+ rht_for_each_rcu_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
if (params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
@@ -502,7 +607,7 @@ restart:
/* An object might have been moved to a different hash chain,
* while we walk along it - better check and retry.
*/
- } while (he != RHT_NULLS_MARKER(head));
+ } while (he != RHT_NULLS_MARKER(bkt));
/* Ensure we see any new tables. */
smp_rmb();
@@ -598,10 +703,10 @@ static inline void *__rhashtable_insert_fast(
.ht = ht,
.key = key,
};
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
- spinlock_t *lock;
unsigned int hash;
int elasticity;
void *data;
@@ -610,23 +715,22 @@ static inline void *__rhashtable_insert_fast(
tbl = rht_dereference_rcu(ht->tbl, ht);
hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
- spin_lock_bh(lock);
+ elasticity = RHT_ELASTICITY;
+ bkt = rht_bucket_insert(ht, tbl, hash);
+ data = ERR_PTR(-ENOMEM);
+ if (!bkt)
+ goto out;
+ pprev = NULL;
+ rht_lock(tbl, bkt);
if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path:
- spin_unlock_bh(lock);
+ rht_unlock(tbl, bkt);
rcu_read_unlock();
return rhashtable_insert_slow(ht, key, obj);
}
- elasticity = RHT_ELASTICITY;
- pprev = rht_bucket_insert(ht, tbl, hash);
- data = ERR_PTR(-ENOMEM);
- if (!pprev)
- goto out;
-
- rht_for_each_continue(head, *pprev, tbl, hash) {
+ rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *plist;
struct rhlist_head *list;
@@ -642,7 +746,7 @@ slow_path:
data = rht_obj(ht, head);
if (!rhlist)
- goto out;
+ goto out_unlock;
list = container_of(obj, struct rhlist_head, rhead);
@@ -651,9 +755,13 @@ slow_path:
RCU_INIT_POINTER(list->next, plist);
head = rht_dereference_bucket(head->next, tbl, hash);
RCU_INIT_POINTER(list->rhead.next, head);
- rcu_assign_pointer(*pprev, obj);
-
- goto good;
+ if (pprev) {
+ rcu_assign_pointer(*pprev, obj);
+ rht_unlock(tbl, bkt);
+ } else
+ rht_assign_unlock(tbl, bkt, obj);
+ data = NULL;
+ goto out;
}
if (elasticity <= 0)
@@ -661,12 +769,13 @@ slow_path:
data = ERR_PTR(-E2BIG);
if (unlikely(rht_grow_above_max(ht, tbl)))
- goto out;
+ goto out_unlock;
if (unlikely(rht_grow_above_100(ht, tbl)))
goto slow_path;
- head = rht_dereference_bucket(*pprev, tbl, hash);
+ /* Inserting at head of list makes unlocking free. */
+ head = rht_ptr(bkt, tbl, hash);
RCU_INIT_POINTER(obj->next, head);
if (rhlist) {
@@ -676,20 +785,21 @@ slow_path:
RCU_INIT_POINTER(list->next, NULL);
}
- rcu_assign_pointer(*pprev, obj);
-
atomic_inc(&ht->nelems);
+ rht_assign_unlock(tbl, bkt, obj);
+
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
-good:
data = NULL;
-
out:
- spin_unlock_bh(lock);
rcu_read_unlock();
return data;
+
+out_unlock:
+ rht_unlock(tbl, bkt);
+ goto out;
}
/**
@@ -698,9 +808,9 @@ out:
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
- * Will take a per bucket spinlock to protect against mutual mutations
+ * Will take the per bucket bitlock to protect against mutual mutations
* on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
+ * they map to the same bucket.
*
* It is safe to call this function from atomic context.
*
@@ -727,9 +837,9 @@ static inline int rhashtable_insert_fast(
* @list: pointer to hash list head inside object
* @params: hash table parameters
*
- * Will take a per bucket spinlock to protect against mutual mutations
+ * Will take the per bucket bitlock to protect against mutual mutations
* on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
+ * they map to the same bucket.
*
* It is safe to call this function from atomic context.
*
@@ -750,9 +860,9 @@ static inline int rhltable_insert_key(
* @list: pointer to hash list head inside object
* @params: hash table parameters
*
- * Will take a per bucket spinlock to protect against mutual mutations
+ * Will take the per bucket bitlock to protect against mutual mutations
* on the same bucket. Multiple insertions may occur in parallel unless
- * they map to the same bucket lock.
+ * they map to the same bucket.
*
* It is safe to call this function from atomic context.
*
@@ -776,12 +886,6 @@ static inline int rhltable_insert(
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
* This lookup function may only be used for fixed key hash table (key_len
* parameter set). It will BUG() if used inappropriately.
*
@@ -837,12 +941,6 @@ static inline void *rhashtable_lookup_get_insert_fast(
* @obj: pointer to hash head inside object
* @params: hash table parameters
*
- * Locks down the bucket chain in both the old and new table if a resize
- * is in progress to ensure that writers can't remove from the old table
- * and can't insert to the new table during the atomic operation of search
- * and insertion. Searches for duplicates in both the old and new table if
- * a resize is in progress.
- *
* Lookups may occur in parallel with hashtable mutations and resizing.
*
* Will trigger an automatic deferred table resizing if residency in the
@@ -891,19 +989,20 @@ static inline int __rhashtable_remove_fast_one(
struct rhash_head *obj, const struct rhashtable_params params,
bool rhlist)
{
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
- spinlock_t * lock;
unsigned int hash;
int err = -ENOENT;
hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
-
- spin_lock_bh(lock);
+ bkt = rht_bucket_var(tbl, hash);
+ if (!bkt)
+ return -ENOENT;
+ pprev = NULL;
+ rht_lock(tbl, bkt);
- pprev = rht_bucket_var(tbl, hash);
- rht_for_each_continue(he, *pprev, tbl, hash) {
+ rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *list;
list = container_of(he, struct rhlist_head, rhead);
@@ -943,12 +1042,17 @@ static inline int __rhashtable_remove_fast_one(
}
}
- rcu_assign_pointer(*pprev, obj);
- break;
+ if (pprev) {
+ rcu_assign_pointer(*pprev, obj);
+ rht_unlock(tbl, bkt);
+ } else {
+ rht_assign_unlock(tbl, bkt, obj);
+ }
+ goto unlocked;
}
- spin_unlock_bh(lock);
-
+ rht_unlock(tbl, bkt);
+unlocked:
if (err > 0) {
atomic_dec(&ht->nelems);
if (unlikely(ht->p.automatic_shrinking &&
@@ -1037,9 +1141,9 @@ static inline int __rhashtable_replace_fast(
struct rhash_head *obj_old, struct rhash_head *obj_new,
const struct rhashtable_params params)
{
+ struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
- spinlock_t *lock;
unsigned int hash;
int err = -ENOENT;
@@ -1050,25 +1154,33 @@ static inline int __rhashtable_replace_fast(
if (hash != rht_head_hashfn(ht, tbl, obj_new, params))
return -EINVAL;
- lock = rht_bucket_lock(tbl, hash);
+ bkt = rht_bucket_var(tbl, hash);
+ if (!bkt)
+ return -ENOENT;
- spin_lock_bh(lock);
+ pprev = NULL;
+ rht_lock(tbl, bkt);
- pprev = rht_bucket_var(tbl, hash);
- rht_for_each_continue(he, *pprev, tbl, hash) {
+ rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
if (he != obj_old) {
pprev = &he->next;
continue;
}
rcu_assign_pointer(obj_new->next, obj_old->next);
- rcu_assign_pointer(*pprev, obj_new);
+ if (pprev) {
+ rcu_assign_pointer(*pprev, obj_new);
+ rht_unlock(tbl, bkt);
+ } else {
+ rht_assign_unlock(tbl, bkt, obj_new);
+ }
err = 0;
- break;
+ goto unlocked;
}
- spin_unlock_bh(lock);
+ rht_unlock(tbl, bkt);
+unlocked:
return err;
}
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index f89bfbb54902..df666cf29ef1 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -79,8 +79,6 @@ struct rtc_class_ops {
int (*read_alarm)(struct device *, struct rtc_wkalrm *);
int (*set_alarm)(struct device *, struct rtc_wkalrm *);
int (*proc)(struct device *, struct seq_file *);
- int (*set_mmss64)(struct device *, time64_t secs);
- int (*set_mmss)(struct device *, unsigned long secs);
int (*alarm_irq_enable)(struct device *, unsigned int enabled);
int (*read_offset)(struct device *, long *offset);
int (*set_offset)(struct device *, long offset);
@@ -162,9 +160,11 @@ struct rtc_device {
#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
/* useful timestamps */
-#define RTC_TIMESTAMP_BEGIN_1900 -2208989361LL /* 1900-01-01 00:00:00 */
+#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
+#define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */
#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */
+#define RTC_TIMESTAMP_END_9999 253402300799LL /* 9999-12-31 23:59:59 */
extern struct rtc_device *devm_rtc_device_register(struct device *dev,
const char *name,
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
index e6337a56d741..a00b332c505f 100644
--- a/include/linux/rtc/ds1685.h
+++ b/include/linux/rtc/ds1685.h
@@ -48,8 +48,6 @@ struct ds1685_priv {
u32 regstep;
resource_size_t baseaddr;
size_t size;
- spinlock_t lock;
- struct work_struct work;
int irq_num;
bool bcd_mode;
bool no_irq;
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
deleted file mode 100644
index e47568363e5e..000000000000
--- a/include/linux/rwsem-spinlock.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* rwsem-spinlock.h: fallback C implementation
- *
- * Copyright (c) 2001 David Howells (dhowells@redhat.com).
- * - Derived partially from ideas by Andrea Arcangeli <andrea@suse.de>
- * - Derived also from comments by Linus
- */
-
-#ifndef _LINUX_RWSEM_SPINLOCK_H
-#define _LINUX_RWSEM_SPINLOCK_H
-
-#ifndef _LINUX_RWSEM_H
-#error "please don't include linux/rwsem-spinlock.h directly, use linux/rwsem.h instead"
-#endif
-
-#ifdef __KERNEL__
-/*
- * the rw-semaphore definition
- * - if count is 0 then there are no active readers or writers
- * - if count is +ve then that is the number of active readers
- * - if count is -1 then there is one active writer
- * - if wait_list is not empty, then there are processes waiting for the semaphore
- */
-struct rw_semaphore {
- __s32 count;
- raw_spinlock_t wait_lock;
- struct list_head wait_list;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif
-};
-
-#define RWSEM_UNLOCKED_VALUE 0x00000000
-
-extern void __down_read(struct rw_semaphore *sem);
-extern int __must_check __down_read_killable(struct rw_semaphore *sem);
-extern int __down_read_trylock(struct rw_semaphore *sem);
-extern void __down_write(struct rw_semaphore *sem);
-extern int __must_check __down_write_killable(struct rw_semaphore *sem);
-extern int __down_write_trylock(struct rw_semaphore *sem);
-extern void __up_read(struct rw_semaphore *sem);
-extern void __up_write(struct rw_semaphore *sem);
-extern void __downgrade_write(struct rw_semaphore *sem);
-extern int rwsem_is_locked(struct rw_semaphore *sem);
-
-#endif /* __KERNEL__ */
-#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 67dbb57508b1..2ea18a3def04 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -20,25 +20,30 @@
#include <linux/osq_lock.h>
#endif
-struct rw_semaphore;
-
-#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-#include <linux/rwsem-spinlock.h> /* use a generic implementation */
-#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE
-#else
-/* All arch specific implementations share the same struct */
+/*
+ * For an uncontended rwsem, count and owner are the only fields a task
+ * needs to touch when acquiring the rwsem. So they are put next to each
+ * other to increase the chance that they will share the same cacheline.
+ *
+ * In a contended rwsem, the owner is likely the most frequently accessed
+ * field in the structure as the optimistic waiter that holds the osq lock
+ * will spin on owner. For an embedded rwsem, other hot fields in the
+ * containing structure should be moved further away from the rwsem to
+ * reduce the chance that they will share the same cacheline causing
+ * cacheline bouncing problem.
+ */
struct rw_semaphore {
atomic_long_t count;
- struct list_head wait_list;
- raw_spinlock_t wait_lock;
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- struct optimistic_spin_queue osq; /* spinner MCS lock */
/*
* Write owner. Used as a speculative check to see
* if the owner is running on the cpu.
*/
struct task_struct *owner;
+ struct optimistic_spin_queue osq; /* spinner MCS lock */
#endif
+ raw_spinlock_t wait_lock;
+ struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
@@ -50,24 +55,14 @@ struct rw_semaphore {
*/
#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-2L)
-extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem);
-extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
-extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
-
-/* Include the arch specific part */
-#include <asm/rwsem.h>
-
/* In all implementations count != 0 means locked */
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
return atomic_long_read(&sem->count) != 0;
}
+#define RWSEM_UNLOCKED_VALUE 0L
#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
-#endif
/* Common initializer macros and functions */
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index 14d558146aea..20f3e3f029b9 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -330,7 +330,7 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
/*
* This one is special, since it doesn't actually clear the bit, rather it
* sets the corresponding bit in the ->cleared mask instead. Paired with
- * the caller doing sbitmap_batch_clear() if a given index is full, which
+ * the caller doing sbitmap_deferred_clear() if a given index is full, which
* will clear the previously freed entries in the corresponding ->word.
*/
static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index b4be960c7e5d..30a9a55c28ba 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -340,11 +340,11 @@ int sg_alloc_table_chained(struct sg_table *table, int nents,
* sg page iterator
*
* Iterates over sg entries page-by-page. On each successful iteration, you
- * can call sg_page_iter_page(@piter) to get the current page and its dma
- * address. @piter->sg will point to the sg holding this page and
- * @piter->sg_pgoffset to the page's page offset within the sg. The iteration
- * will stop either when a maximum number of sg entries was reached or a
- * terminating sg (sg_last(sg) == true) was reached.
+ * can call sg_page_iter_page(@piter) to get the current page.
+ * @piter->sg will point to the sg holding this page and @piter->sg_pgoffset to
+ * the page's page offset within the sg. The iteration will stop either when a
+ * maximum number of sg entries was reached or a terminating sg
+ * (sg_last(sg) == true) was reached.
*/
struct sg_page_iter {
struct scatterlist *sg; /* sg holding the page */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1549584a1538..a2cd15855bad 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -726,6 +726,8 @@ struct task_struct {
#ifdef CONFIG_CGROUPS
/* disallow userland-initiated cgroup migration */
unsigned no_cgroup_migration:1;
+ /* task is frozen/stopped (used by the cgroup freezer) */
+ unsigned frozen:1;
#endif
#ifdef CONFIG_BLK_CGROUP
/* to be used once the psi infrastructure lands upstream. */
@@ -1057,7 +1059,6 @@ struct task_struct {
#ifdef CONFIG_RSEQ
struct rseq __user *rseq;
- u32 rseq_len;
u32 rseq_sig;
/*
* RmW on rseq_event_mask must be performed atomically
@@ -1855,12 +1856,10 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
if (clone_flags & CLONE_THREAD) {
t->rseq = NULL;
- t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
} else {
t->rseq = current->rseq;
- t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
}
@@ -1869,7 +1868,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
static inline void rseq_execve(struct task_struct *t)
{
t->rseq = NULL;
- t->rseq_len = 0;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
}
diff --git a/include/linux/sched/jobctl.h b/include/linux/sched/jobctl.h
index 98228bd48aee..fa067de9f1a9 100644
--- a/include/linux/sched/jobctl.h
+++ b/include/linux/sched/jobctl.h
@@ -18,6 +18,7 @@ struct task_struct;
#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
+#define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
@@ -26,6 +27,7 @@ struct task_struct;
#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
+#define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 0cd9f10423fb..a3fda9f024c3 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm)
__mmdrop(mm);
}
+/*
+ * This has to be called after a get_task_mm()/mmget_not_zero()
+ * followed by taking the mmap_sem for writing before modifying the
+ * vmas or anything the coredump pretends not to change from under it.
+ *
+ * NOTE: find_extend_vma() called from GUP context is the only place
+ * that can modify the "mm" (notably the vm_start/end) under mmap_sem
+ * for reading and outside the context of the process, so it is also
+ * the only case that holds the mmap_sem for reading that must call
+ * this function. Generally if the mmap_sem is hold for reading
+ * there's no need of this check after get_task_mm()/mmget_not_zero().
+ *
+ * This function can be obsoleted and the check can be removed, after
+ * the coredump code will hold the mmap_sem for writing before
+ * invoking the ->core_dump methods.
+ */
+static inline bool mmget_still_valid(struct mm_struct *mm)
+{
+ return likely(!mm->core_state);
+}
+
/**
* mmget() - Pin the address space associated with a &struct mm_struct.
* @mm: The address space to pin.
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index ae5655197698..e412c092c1e8 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -418,10 +418,20 @@ static inline void set_restore_sigmask(void)
set_thread_flag(TIF_RESTORE_SIGMASK);
WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
+
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
+
static inline void clear_restore_sigmask(void)
{
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
static inline bool test_restore_sigmask(void)
{
return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -439,6 +449,10 @@ static inline void set_restore_sigmask(void)
current->restore_sigmask = true;
WARN_ON(!test_thread_flag(TIF_SIGPENDING));
}
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ tsk->restore_sigmask = false;
+}
static inline void clear_restore_sigmask(void)
{
current->restore_sigmask = false;
@@ -447,6 +461,10 @@ static inline bool test_restore_sigmask(void)
{
return current->restore_sigmask;
}
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+ return tsk->restore_sigmask;
+}
static inline bool test_and_clear_restore_sigmask(void)
{
if (!current->restore_sigmask)
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 2e97a2227045..f1227f2c38a4 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -76,6 +76,7 @@ extern void exit_itimers(struct signal_struct *);
extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
+struct mm_struct *copy_init_mm(void);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 57c7ed3fe465..cfc0a89a7159 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -76,8 +76,8 @@ struct sched_domain_shared {
struct sched_domain {
/* These fields must be setup */
- struct sched_domain *parent; /* top domain must be null terminated */
- struct sched_domain *child; /* bottom domain must be null terminated */
+ struct sched_domain __rcu *parent; /* top domain must be null terminated */
+ struct sched_domain __rcu *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index c7b5f86b91a1..468d2565a9fe 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -31,6 +31,13 @@ struct user_struct {
atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
#ifdef CONFIG_KEYS
+ /*
+ * These pointers can only change from NULL to a non-NULL value once.
+ * Writes are protected by key_user_keyring_mutex.
+ * Unlocked readers should use READ_ONCE() unless they know that
+ * install_user_keyrings() has been called successfully (which sets
+ * these members to non-NULL values, preventing further modifications).
+ */
struct key *uid_keyring; /* UID specific keyring */
struct key *session_keyring; /* UID's default session keyring */
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 49f2685324b0..659071c2e57c 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -51,6 +51,7 @@ struct fown_struct;
struct file_operations;
struct msg_msg;
struct xattr;
+struct kernfs_node;
struct xfrm_sec_ctx;
struct mm_struct;
struct fs_context;
@@ -250,6 +251,7 @@ int security_sb_clone_mnt_opts(const struct super_block *oldsb,
unsigned long *set_kern_flags);
int security_add_mnt_opt(const char *option, const char *val,
int len, void **mnt_opts);
+int security_move_mount(const struct path *from_path, const struct path *to_path);
int security_dentry_init_security(struct dentry *dentry, int mode,
const struct qstr *name, void **ctx,
u32 *ctxlen);
@@ -299,6 +301,8 @@ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer
void security_inode_getsecid(struct inode *inode, u32 *secid);
int security_inode_copy_up(struct dentry *src, struct cred **new);
int security_inode_copy_up_xattr(const char *name);
+int security_kernfs_init_security(struct kernfs_node *kn_dir,
+ struct kernfs_node *kn);
int security_file_permission(struct file *file, int mask);
int security_file_alloc(struct file *file);
void security_file_free(struct file *file);
@@ -611,6 +615,12 @@ static inline int security_add_mnt_opt(const char *option, const char *val,
return 0;
}
+static inline int security_move_mount(const struct path *from_path,
+ const struct path *to_path)
+{
+ return 0;
+}
+
static inline int security_inode_alloc(struct inode *inode)
{
return 0;
@@ -801,6 +811,12 @@ static inline int security_inode_copy_up(struct dentry *src, struct cred **new)
return 0;
}
+static inline int security_kernfs_init_security(struct kernfs_node *kn_dir,
+ struct kernfs_node *kn)
+{
+ return 0;
+}
+
static inline int security_inode_copy_up_xattr(const char *name)
{
return -EOPNOTSUPP;
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 04b124fca51e..3e76b6d7d97f 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -1,18 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright © 2016 Intel Corporation
*
* Authors:
* Rafael Antognolli <rafael.antognolli@intel.com>
* Scott Bauer <scott.bauer@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef LINUX_OPAL_H
diff --git a/include/linux/selection.h b/include/linux/selection.h
index a8f5b97b216f..e2c1f96bf059 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -11,13 +11,14 @@
#include <linux/tiocl.h>
#include <linux/vt_buffer.h>
-struct tty_struct;
-
extern struct vc_data *sel_cons;
struct tty_struct;
extern void clear_selection(void);
-extern int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *tty);
+extern int set_selection_user(const struct tiocl_selection __user *sel,
+ struct tty_struct *tty);
+extern int set_selection_kernel(struct tiocl_selection *v,
+ struct tty_struct *tty);
extern int paste_selection(struct tty_struct *tty);
extern int sel_loadlut(char __user *p);
extern int mouse_reporting(void);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 5fe2b037e833..fea2216a893f 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -45,7 +45,7 @@ struct device;
/*
* This structure describes all the operations that can be done on the
- * physical hardware. See Documentation/serial/driver for details.
+ * physical hardware. See Documentation/serial/driver.rst for details.
*/
struct uart_ops {
unsigned int (*tx_empty)(struct uart_port *);
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 2a986d282a97..b5071497b8cb 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -17,6 +17,17 @@ static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
+#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
+static inline int set_direct_map_invalid_noflush(struct page *page)
+{
+ return 0;
+}
+static inline int set_direct_map_default_noflush(struct page *page)
+{
+ return 0;
+}
+#endif
+
#ifndef set_mce_nospec
static inline int set_mce_nospec(unsigned long pfn)
{
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index f3fb1edb3526..20d815a33145 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -21,6 +21,7 @@ struct shmem_inode_info {
struct list_head swaplist; /* chain of maybes on swap */
struct shared_policy policy; /* NUMA memory alloc policy */
struct simple_xattrs xattrs; /* list of xattrs */
+ atomic_t stop_eviction; /* hold when working on inode */
struct inode vfs_inode;
};
diff --git a/include/linux/siphash.h b/include/linux/siphash.h
index fa7a6b9cedbf..bf21591a9e5e 100644
--- a/include/linux/siphash.h
+++ b/include/linux/siphash.h
@@ -21,6 +21,11 @@ typedef struct {
u64 key[2];
} siphash_key_t;
+static inline bool siphash_key_is_zero(const siphash_key_t *key)
+{
+ return !(key->key[0] | key->key[1]);
+}
+
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9027a8c4219f..6d58fa8a65fd 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -657,7 +657,6 @@ typedef unsigned char *sk_buff_data_t;
* @tc_index: Traffic control index
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
- * @xmit_more: More SKBs are pending for this queue
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
* @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
@@ -764,7 +763,6 @@ struct sk_buff {
fclone:2,
peeked:1,
head_frag:1,
- xmit_more:1,
pfmemalloc:1;
#ifdef CONFIG_SKB_EXTENSIONS
__u8 active_extensions;
@@ -1044,6 +1042,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
+struct sk_buff *build_skb_around(struct sk_buff *skb,
+ void *data, unsigned int frag_size);
/**
* alloc_skb - allocate a network buffer
@@ -1258,11 +1258,19 @@ void skb_flow_dissector_init(struct flow_dissector *flow_dissector,
unsigned int key_count);
#ifdef CONFIG_NET
+int skb_flow_dissector_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
struct bpf_prog *prog);
int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr);
#else
+static inline int skb_flow_dissector_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int skb_flow_dissector_bpf_prog_attach(const union bpf_attr *attr,
struct bpf_prog *prog)
{
@@ -1275,12 +1283,12 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
}
#endif
-struct bpf_flow_keys;
-bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
- const struct sk_buff *skb,
- struct flow_dissector *flow_dissector,
- struct bpf_flow_keys *flow_keys);
-bool __skb_flow_dissect(const struct sk_buff *skb,
+struct bpf_flow_dissector;
+bool bpf_flow_dissect(struct bpf_prog *prog, struct bpf_flow_dissector *ctx,
+ __be16 proto, int nhoff, int hlen);
+
+bool __skb_flow_dissect(const struct net *net,
+ const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container,
void *data, __be16 proto, int nhoff, int hlen,
@@ -1290,8 +1298,8 @@ static inline bool skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector *flow_dissector,
void *target_container, unsigned int flags)
{
- return __skb_flow_dissect(skb, flow_dissector, target_container,
- NULL, 0, 0, 0, flags);
+ return __skb_flow_dissect(NULL, skb, flow_dissector,
+ target_container, NULL, 0, 0, 0, flags);
}
static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
@@ -1299,18 +1307,19 @@ static inline bool skb_flow_dissect_flow_keys(const struct sk_buff *skb,
unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
- return __skb_flow_dissect(skb, &flow_keys_dissector, flow,
- NULL, 0, 0, 0, flags);
+ return __skb_flow_dissect(NULL, skb, &flow_keys_dissector,
+ flow, NULL, 0, 0, 0, flags);
}
static inline bool
-skb_flow_dissect_flow_keys_basic(const struct sk_buff *skb,
+skb_flow_dissect_flow_keys_basic(const struct net *net,
+ const struct sk_buff *skb,
struct flow_keys_basic *flow, void *data,
__be16 proto, int nhoff, int hlen,
unsigned int flags)
{
memset(flow, 0, sizeof(*flow));
- return __skb_flow_dissect(skb, &flow_keys_basic_dissector, flow,
+ return __skb_flow_dissect(net, skb, &flow_keys_basic_dissector, flow,
data, proto, nhoff, hlen, flags);
}
@@ -2102,8 +2111,6 @@ void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
unsigned int truesize);
-#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
-#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
#ifdef NET_SKBUFF_DATA_USES_OFFSET
@@ -2490,7 +2497,8 @@ static inline void skb_probe_transport_header(struct sk_buff *skb)
if (skb_transport_header_was_set(skb))
return;
- if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
+ if (skb_flow_dissect_flow_keys_basic(NULL, skb, &keys,
+ NULL, 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff);
}
@@ -3372,17 +3380,17 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
- int *peeked, int *off, int *err,
+ int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
- int *peeked, int *off, int *err,
+ int *off, int *err,
struct sk_buff **last);
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
- int *peeked, int *off, int *err);
+ int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
__poll_t datagram_poll(struct file *file, struct socket *sock,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 11b45f7ae405..9449b19c5f10 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -32,6 +32,8 @@
#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
/* Use GFP_DMA memory */
#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
+/* Use GFP_DMA32 memory */
+#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
/* DEBUG: Store the last owner for bug hunting */
#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
/* Panic if kmem_cache_create() fails */
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index d0884b525001..9d1bc65d226c 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -29,7 +29,7 @@ struct smpboot_thread_data;
* @thread_comm: The base name of the thread
*/
struct smp_hotplug_thread {
- struct task_struct __percpu **store;
+ struct task_struct * __percpu *store;
struct list_head list;
int (*thread_should_run)(unsigned int cpu);
void (*thread_fn)(unsigned int cpu);
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 6016daeecee4..b57cd8bf96e2 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -26,7 +26,7 @@ typedef __kernel_sa_family_t sa_family_t;
/*
* 1003.1g requires sa_family_t and that sa_data is char.
*/
-
+
struct sockaddr {
sa_family_t sa_family; /* address family, AF_xxx */
char sa_data[14]; /* 14 bytes of protocol address */
@@ -44,7 +44,7 @@ struct linger {
* system, not 4.3. Thus msg_accrights(len) are now missing. They
* belong in an obscure libc emulation or the bin.
*/
-
+
struct msghdr {
void *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
@@ -54,7 +54,7 @@ struct msghdr {
unsigned int msg_flags; /* flags on received message */
struct kiocb *msg_iocb; /* ptr to iocb for async requests */
};
-
+
struct user_msghdr {
void __user *msg_name; /* ptr to socket address structure */
int msg_namelen; /* size of socket address structure */
@@ -122,7 +122,7 @@ struct cmsghdr {
* inside range, given by msg->msg_controllen before using
* ancillary object DATA. --ANK (980731)
*/
-
+
static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
struct cmsghdr *__cmsg)
{
@@ -264,10 +264,10 @@ struct ucred {
/* Maximum queue length specifiable by listen. */
#define SOMAXCONN 128
-/* Flags we can use with send/ and recv.
+/* Flags we can use with send/ and recv.
Added those for 1003.1g not all are supported yet
*/
-
+
#define MSG_OOB 1
#define MSG_PEEK 2
#define MSG_DONTROUTE 4
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index df313913e856..35662d9c2c62 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -1,5 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
-// Copyright(c) 2015-17 Intel Corporation.
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* Copyright(c) 2015-17 Intel Corporation. */
#ifndef __SOUNDWIRE_H
#define __SOUNDWIRE_H
@@ -36,7 +36,7 @@ struct sdw_slave;
#define SDW_FRAME_CTRL_BITS 48
#define SDW_MAX_DEVICES 11
-#define SDW_VALID_PORT_RANGE(n) (n <= 14 && n >= 1)
+#define SDW_VALID_PORT_RANGE(n) ((n) <= 14 && (n) >= 1)
#define SDW_DAI_ID_RANGE_START 100
#define SDW_DAI_ID_RANGE_END 200
@@ -470,14 +470,14 @@ struct sdw_bus_params {
struct sdw_slave_ops {
int (*read_prop)(struct sdw_slave *sdw);
int (*interrupt_callback)(struct sdw_slave *slave,
- struct sdw_slave_intr_status *status);
+ struct sdw_slave_intr_status *status);
int (*update_status)(struct sdw_slave *slave,
- enum sdw_slave_status status);
+ enum sdw_slave_status status);
int (*bus_config)(struct sdw_slave *slave,
- struct sdw_bus_params *params);
+ struct sdw_bus_params *params);
int (*port_prep)(struct sdw_slave *slave,
- struct sdw_prepare_ch *prepare_ch,
- enum sdw_port_prep_ops pre_ops);
+ struct sdw_prepare_ch *prepare_ch,
+ enum sdw_port_prep_ops pre_ops);
};
/**
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 2b9573b8aedd..4d70da45363d 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -1,5 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
-// Copyright(c) 2015-17 Intel Corporation.
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* Copyright(c) 2015-17 Intel Corporation. */
#ifndef __SDW_INTEL_H
#define __SDW_INTEL_H
@@ -11,7 +11,7 @@
*/
struct sdw_intel_ops {
int (*config_stream)(void *arg, void *substream,
- void *dai, void *hw_params, int stream_num);
+ void *dai, void *hw_params, int stream_num);
};
/**
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h
index df472b1ab410..a686f7988156 100644
--- a/include/linux/soundwire/sdw_registers.h
+++ b/include/linux/soundwire/sdw_registers.h
@@ -1,5 +1,5 @@
-// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
-// Copyright(c) 2015-17 Intel Corporation.
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* Copyright(c) 2015-17 Intel Corporation. */
#ifndef __SDW_REGISTERS_H
#define __SDW_REGISTERS_H
@@ -73,7 +73,6 @@
#define SDW_SCP_INTSTAT2_SCP3_CASCADE BIT(7)
#define SDW_SCP_INTSTAT2_PORT4_10 GENMASK(6, 0)
-
#define SDW_SCP_INTSTAT3 0x43
#define SDW_SCP_INTSTAT3_PORT11_14 GENMASK(3, 0)
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h
index 9fd553e553e9..9c756b5a0dfe 100644
--- a/include/linux/soundwire/sdw_type.h
+++ b/include/linux/soundwire/sdw_type.h
@@ -1,5 +1,5 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright(c) 2015-17 Intel Corporation.
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2015-17 Intel Corporation. */
#ifndef __SOUNDWIRE_TYPES_H
#define __SOUNDWIRE_TYPES_H
@@ -11,7 +11,7 @@ extern struct bus_type sdw_bus_type;
#define sdw_register_driver(drv) \
__sdw_register_driver(drv, THIS_MODULE)
-int __sdw_register_driver(struct sdw_driver *drv, struct module *);
+int __sdw_register_driver(struct sdw_driver *drv, struct module *owner);
void sdw_unregister_driver(struct sdw_driver *drv);
int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index c1c59473cef9..6005f0126631 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -25,6 +25,7 @@ struct dma_chan;
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
+ u8 dma_burst_size;
bool is_slave;
/* DMA engine specific config */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index 3703d0dcac2e..af9ff2f0f1b2 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -295,6 +295,10 @@ int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
const struct spi_mem_op *op,
struct sg_table *sg);
+
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op);
+
#else
static inline int
spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
@@ -310,6 +314,14 @@ spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
struct sg_table *sg)
{
}
+
+static inline
+bool spi_mem_default_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ return false;
+}
+
#endif /* CONFIG_SPI_MEM */
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 662b336aa2e4..053abd22ad31 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -143,7 +143,7 @@ struct spi_device {
u32 max_speed_hz;
u8 chip_select;
u8 bits_per_word;
- u16 mode;
+ u32 mode;
#define SPI_CPHA 0x01 /* clock phase */
#define SPI_CPOL 0x02 /* clock polarity */
#define SPI_MODE_0 (0|0) /* (original MicroWire) */
@@ -330,6 +330,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* must fail if an unrecognized or unsupported mode is requested.
* It's always safe to call this unless transfers are pending on
* the device whose settings are being modified.
+ * @set_cs_timing: optional hook for SPI devices to request SPI master
+ * controller for configuring specific CS setup time, hold time and inactive
+ * delay interms of clock counts
* @transfer: adds a message to the controller's transfer queue.
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
@@ -363,6 +366,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @unprepare_transfer_hardware: there are currently no more messages on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
+ *
* @set_cs: set the logic level of the chip select line. May be called
* from interrupt context.
* @prepare_message: set up the controller to transfer a single message,
@@ -439,13 +443,12 @@ struct spi_controller {
u16 dma_alignment;
/* spi_device.mode flags understood by this controller driver */
- u16 mode_bits;
+ u32 mode_bits;
/* bitmask of supported bits_per_word for transfers */
u32 bits_per_word_mask;
#define SPI_BPW_MASK(bits) BIT((bits) - 1)
-#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1))
-#define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1))
+#define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1)
/* limits on transfer speed */
u32 min_speed_hz;
@@ -489,6 +492,17 @@ struct spi_controller {
*/
int (*setup)(struct spi_device *spi);
+ /*
+ * set_cs_timing() method is for SPI controllers that supports
+ * configuring CS timing.
+ *
+ * This hook allows SPI client drivers to request SPI controllers
+ * to configure specific CS timing through spi_set_cs_timing() after
+ * spi_setup().
+ */
+ void (*set_cs_timing)(struct spi_device *spi, u8 setup_clk_cycles,
+ u8 hold_clk_cycles, u8 inactive_clk_cycles);
+
/* bidirectional bulk transfers
*
* + The transfer() method may not sleep; its main role is
@@ -1277,7 +1291,7 @@ struct spi_board_info {
/* mode becomes spi_device.mode, and is essential for chips
* where the default of SPI_CS_HIGH = 0 is wrong.
*/
- u16 mode;
+ u32 mode;
/* ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index b7e021b274dc..4444c2a992cb 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -44,6 +44,7 @@ extern int spi_bitbang_setup_transfer(struct spi_device *spi,
/* start or stop queue processing */
extern int spi_bitbang_start(struct spi_bitbang *spi);
+extern int spi_bitbang_init(struct spi_bitbang *spi);
extern void spi_bitbang_stop(struct spi_bitbang *spi);
#endif /* __SPI_BITBANG_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index e089157dcf97..ed7c4d6b8235 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -57,6 +57,7 @@
#include <linux/stringify.h>
#include <linux/bottom_half.h>
#include <asm/barrier.h>
+#include <asm/mmiowb.h>
/*
@@ -178,6 +179,7 @@ static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
{
__acquire(lock);
arch_spin_lock(&lock->raw_lock);
+ mmiowb_spin_lock();
}
#ifndef arch_spin_lock_flags
@@ -189,15 +191,22 @@ do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lo
{
__acquire(lock);
arch_spin_lock_flags(&lock->raw_lock, *flags);
+ mmiowb_spin_lock();
}
static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
{
- return arch_spin_trylock(&(lock)->raw_lock);
+ int ret = arch_spin_trylock(&(lock)->raw_lock);
+
+ if (ret)
+ mmiowb_spin_lock();
+
+ return ret;
}
static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
{
+ mmiowb_spin_unlock();
arch_spin_unlock(&lock->raw_lock);
__release(lock);
}
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index c495b2d51569..e432cc92c73d 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -56,45 +56,11 @@ struct srcu_struct { };
void call_srcu(struct srcu_struct *ssp, struct rcu_head *head,
void (*func)(struct rcu_head *head));
-void _cleanup_srcu_struct(struct srcu_struct *ssp, bool quiesced);
+void cleanup_srcu_struct(struct srcu_struct *ssp);
int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp);
void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp);
void synchronize_srcu(struct srcu_struct *ssp);
-/**
- * cleanup_srcu_struct - deconstruct a sleep-RCU structure
- * @ssp: structure to clean up.
- *
- * Must invoke this after you are finished using a given srcu_struct that
- * was initialized via init_srcu_struct(), else you leak memory.
- */
-static inline void cleanup_srcu_struct(struct srcu_struct *ssp)
-{
- _cleanup_srcu_struct(ssp, false);
-}
-
-/**
- * cleanup_srcu_struct_quiesced - deconstruct a quiesced sleep-RCU structure
- * @ssp: structure to clean up.
- *
- * Must invoke this after you are finished using a given srcu_struct that
- * was initialized via init_srcu_struct(), else you leak memory. Also,
- * all grace-period processing must have completed.
- *
- * "Completed" means that the last synchronize_srcu() and
- * synchronize_srcu_expedited() calls must have returned before the call
- * to cleanup_srcu_struct_quiesced(). It also means that the callback
- * from the last call_srcu() must have been invoked before the call to
- * cleanup_srcu_struct_quiesced(), but you can use srcu_barrier() to help
- * with this last. Violating these rules will get you a WARN_ON() splat
- * (with high probability, anyway), and will also cause the srcu_struct
- * to be leaked.
- */
-static inline void cleanup_srcu_struct_quiesced(struct srcu_struct *ssp)
-{
- _cleanup_srcu_struct(ssp, true);
-}
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/**
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 7978b3e2c1e1..0805dee1b6b8 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -23,10 +23,10 @@
typedef u32 depot_stack_handle_t;
-struct stack_trace;
+depot_stack_handle_t stack_depot_save(unsigned long *entries,
+ unsigned int nr_entries, gfp_t gfp_flags);
-depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
-
-void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
+unsigned int stack_depot_fetch(depot_stack_handle_t handle,
+ unsigned long **entries);
#endif
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index ba29a0613e66..f0cfd12cb45e 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -3,11 +3,64 @@
#define __LINUX_STACKTRACE_H
#include <linux/types.h>
+#include <asm/errno.h>
struct task_struct;
struct pt_regs;
#ifdef CONFIG_STACKTRACE
+void stack_trace_print(unsigned long *trace, unsigned int nr_entries,
+ int spaces);
+int stack_trace_snprint(char *buf, size_t size, unsigned long *entries,
+ unsigned int nr_entries, int spaces);
+unsigned int stack_trace_save(unsigned long *store, unsigned int size,
+ unsigned int skipnr);
+unsigned int stack_trace_save_tsk(struct task_struct *task,
+ unsigned long *store, unsigned int size,
+ unsigned int skipnr);
+unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
+ unsigned int size, unsigned int skipnr);
+unsigned int stack_trace_save_user(unsigned long *store, unsigned int size);
+
+/* Internal interfaces. Do not use in generic code */
+#ifdef CONFIG_ARCH_STACKWALK
+
+/**
+ * stack_trace_consume_fn - Callback for arch_stack_walk()
+ * @cookie: Caller supplied pointer handed back by arch_stack_walk()
+ * @addr: The stack entry address to consume
+ * @reliable: True when the stack entry is reliable. Required by
+ * some printk based consumers.
+ *
+ * Return: True, if the entry was consumed or skipped
+ * False, if there is no space left to store
+ */
+typedef bool (*stack_trace_consume_fn)(void *cookie, unsigned long addr,
+ bool reliable);
+/**
+ * arch_stack_walk - Architecture specific function to walk the stack
+ * @consume_entry: Callback which is invoked by the architecture code for
+ * each entry.
+ * @cookie: Caller supplied pointer which is handed back to
+ * @consume_entry
+ * @task: Pointer to a task struct, can be NULL
+ * @regs: Pointer to registers, can be NULL
+ *
+ * ============ ======= ============================================
+ * task regs
+ * ============ ======= ============================================
+ * task NULL Stack trace from task (can be current)
+ * current regs Stack trace starting on regs->stackpointer
+ * ============ ======= ============================================
+ */
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs);
+int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task);
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs);
+
+#else /* CONFIG_ARCH_STACKWALK */
struct stack_trace {
unsigned int nr_entries, max_entries;
unsigned long *entries;
@@ -21,24 +74,20 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
struct stack_trace *trace);
-
-extern void print_stack_trace(struct stack_trace *trace, int spaces);
-extern int snprint_stack_trace(char *buf, size_t size,
- struct stack_trace *trace, int spaces);
-
-#ifdef CONFIG_USER_STACKTRACE_SUPPORT
extern void save_stack_trace_user(struct stack_trace *trace);
+#endif /* !CONFIG_ARCH_STACKWALK */
+#endif /* CONFIG_STACKTRACE */
+
+#if defined(CONFIG_STACKTRACE) && defined(CONFIG_HAVE_RELIABLE_STACKTRACE)
+int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
+ unsigned int size);
#else
-# define save_stack_trace_user(trace) do { } while (0)
+static inline int stack_trace_save_tsk_reliable(struct task_struct *tsk,
+ unsigned long *store,
+ unsigned int size)
+{
+ return -ENOSYS;
+}
#endif
-#else /* !CONFIG_STACKTRACE */
-# define save_stack_trace(trace) do { } while (0)
-# define save_stack_trace_tsk(tsk, trace) do { } while (0)
-# define save_stack_trace_user(trace) do { } while (0)
-# define print_stack_trace(trace, spaces) do { } while (0)
-# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
-# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; })
-#endif /* CONFIG_STACKTRACE */
-
#endif /* __LINUX_STACKTRACE_H */
diff --git a/include/linux/string.h b/include/linux/string.h
index 7927b875f80c..4deb11f7976b 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -31,6 +31,10 @@ size_t strlcpy(char *, const char *, size_t);
#ifndef __HAVE_ARCH_STRSCPY
ssize_t strscpy(char *, const char *, size_t);
#endif
+
+/* Wraps calls to strscpy()/memset(), no arch specific code required */
+ssize_t strscpy_pad(char *dest, const char *src, size_t count);
+
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
#endif
@@ -150,6 +154,9 @@ extern void * memscan(void *,int,__kernel_size_t);
#ifndef __HAVE_ARCH_MEMCMP
extern int memcmp(const void *,const void *,__kernel_size_t);
#endif
+#ifndef __HAVE_ARCH_BCMP
+extern int bcmp(const void *,const void *,__kernel_size_t);
+#endif
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 98bc9883b230..6e8073140a5d 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -50,6 +50,7 @@ struct rpc_clnt {
struct rpc_iostats * cl_metrics; /* per-client statistics */
unsigned int cl_softrtry : 1,/* soft timeouts */
+ cl_softerr : 1,/* Timeouts return errors */
cl_discrtry : 1,/* disconnect before retry */
cl_noretranstimeo: 1,/* No retransmit timeouts */
cl_autobind : 1,/* use getport() */
@@ -71,6 +72,7 @@ struct rpc_clnt {
struct dentry *cl_debugfs; /* debugfs directory */
#endif
struct rpc_xprt_iter cl_xpi;
+ const struct cred *cl_cred;
};
/*
@@ -125,6 +127,7 @@ struct rpc_create_args {
unsigned long flags;
char *client_name;
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
+ const struct cred *cred;
};
struct rpc_add_xprt_test {
@@ -144,6 +147,7 @@ struct rpc_add_xprt_test {
#define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7)
#define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8)
#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
+#define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index ec861cd0cfe8..d0e451868f02 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -35,7 +35,6 @@ struct rpc_wait {
struct list_head list; /* wait queue links */
struct list_head links; /* Links to related tasks */
struct list_head timer_list; /* Timer list */
- unsigned long expires;
};
/*
@@ -62,6 +61,8 @@ struct rpc_task {
struct rpc_wait tk_wait; /* RPC wait */
} u;
+ int tk_rpc_status; /* Result of last RPC operation */
+
/*
* RPC call state
*/
@@ -125,7 +126,6 @@ struct rpc_task_setup {
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
-#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_TASK_SENT 0x0800 /* message was sent */
@@ -135,7 +135,6 @@ struct rpc_task_setup {
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
-#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
@@ -146,6 +145,7 @@ struct rpc_task_setup {
#define RPC_TASK_NEED_XMIT 3
#define RPC_TASK_NEED_RECV 4
#define RPC_TASK_MSG_PIN_WAIT 5
+#define RPC_TASK_SIGNALLED 6
#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
@@ -169,6 +169,8 @@ struct rpc_task_setup {
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
+#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
+
/*
* Task priorities.
* Note: if you change these, you must also change
@@ -183,7 +185,6 @@ struct rpc_task_setup {
struct rpc_timer {
struct timer_list timer;
struct list_head list;
- unsigned long expires;
};
/*
@@ -217,6 +218,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *);
+void rpc_signal_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
@@ -225,11 +227,19 @@ void rpc_execute(struct rpc_task *);
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_destroy_wait_queue(struct rpc_wait_queue *);
+unsigned long rpc_task_timeout(const struct rpc_task *task);
+void rpc_sleep_on_timeout(struct rpc_wait_queue *queue,
+ struct rpc_task *task,
+ rpc_action action,
+ unsigned long timeout);
void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
rpc_action action);
+void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *queue,
+ struct rpc_task *task,
+ unsigned long timeout,
+ int priority);
void rpc_sleep_on_priority(struct rpc_wait_queue *,
struct rpc_task *,
- rpc_action action,
int priority);
void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
struct rpc_wait_queue *queue,
@@ -304,12 +314,4 @@ rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
}
#endif /* CONFIG_SUNRPC_SWAP */
-static inline bool
-rpc_task_need_resched(const struct rpc_task *task)
-{
- if (RPC_IS_QUEUED(task) || task->tk_callback)
- return true;
- return false;
-}
-
#endif /* _LINUX_SUNRPC_SCHED_H_ */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 3a391544299e..a6d9fce7f20e 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -143,7 +143,7 @@ struct rpc_xprt_ops {
void (*buf_free)(struct rpc_task *task);
void (*prepare_request)(struct rpc_rqst *req);
int (*send_request)(struct rpc_rqst *req);
- void (*set_retrans_timeout)(struct rpc_task *task);
+ void (*wait_for_reply_request)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
void (*release_request)(struct rpc_task *task);
void (*close)(struct rpc_xprt *xprt);
@@ -378,8 +378,8 @@ xprt_disable_swap(struct rpc_xprt *xprt)
int xprt_register_transport(struct xprt_class *type);
int xprt_unregister_transport(struct xprt_class *type);
int xprt_load_transport(const char *);
-void xprt_set_retrans_timeout_def(struct rpc_task *task);
-void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
+void xprt_wait_for_reply_request_def(struct rpc_task *task);
+void xprt_wait_for_reply_request_rtt(struct rpc_task *task);
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
void xprt_wait_for_buffer_space(struct rpc_xprt *xprt);
bool xprt_write_space(struct rpc_xprt *xprt);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 3f529ad9a9d2..6b3ea9ea6a9e 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -425,6 +425,7 @@ void restore_processor_state(void);
/* kernel/power/main.c */
extern int register_pm_notifier(struct notifier_block *nb);
extern int unregister_pm_notifier(struct notifier_block *nb);
+extern void ksys_sync_helper(void);
#define pm_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
@@ -462,6 +463,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
return 0;
}
+static inline void ksys_sync_helper(void) {}
+
#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
static inline bool pm_wakeup_pending(void) { return false; }
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 52a079b3a9a6..0cfc34ac37fb 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -20,7 +20,7 @@
#include <linux/cdev.h>
#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
-#define SWITCHTEC_MAX_PFF_CSR 48
+#define SWITCHTEC_MAX_PFF_CSR 255
#define SWITCHTEC_EVENT_OCCURRED BIT(0)
#define SWITCHTEC_EVENT_CLEAR BIT(0)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index e446806a561f..e2870fe1be5b 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -985,6 +985,15 @@ asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags,
unsigned mask, struct statx __user *buffer);
asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len,
int flags, uint32_t sig);
+asmlinkage long sys_open_tree(int dfd, const char __user *path, unsigned flags);
+asmlinkage long sys_move_mount(int from_dfd, const char __user *from_path,
+ int to_dfd, const char __user *to_path,
+ unsigned int ms_flags);
+asmlinkage long sys_fsopen(const char __user *fs_name, unsigned int flags);
+asmlinkage long sys_fsconfig(int fs_fd, unsigned int cmd, const char __user *key,
+ const void __user *value, int aux);
+asmlinkage long sys_fsmount(int fs_fd, unsigned int flags, unsigned int ms_flags);
+asmlinkage long sys_fspick(int dfd, const char __user *path, unsigned int flags);
asmlinkage long sys_pidfd_send_signal(int pidfd, int sig,
siginfo_t __user *info,
unsigned int flags);
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index bf6ec83e60ee..2d7e012db03f 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -181,6 +181,8 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @device_name: Name of the device (or %NULL if not known)
* @is_unplugged: The XDomain is unplugged
* @resume: The XDomain is being resumed
+ * @needs_uuid: If the XDomain does not have @remote_uuid it will be
+ * queried first
* @transmit_path: HopID which the remote end expects us to transmit
* @transmit_ring: Local ring (hop) where outgoing packets are pushed
* @receive_path: HopID which we expect the remote end to transmit
@@ -189,6 +191,9 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* @properties: Properties exported by the remote domain
* @property_block_gen: Generation of @properties
* @properties_lock: Lock protecting @properties.
+ * @get_uuid_work: Work used to retrieve @remote_uuid
+ * @uuid_retries: Number of times left @remote_uuid is requested before
+ * giving up
* @get_properties_work: Work used to get remote domain properties
* @properties_retries: Number of times left to read properties
* @properties_changed_work: Work used to notify the remote domain that
@@ -220,6 +225,7 @@ struct tb_xdomain {
const char *device_name;
bool is_unplugged;
bool resume;
+ bool needs_uuid;
u16 transmit_path;
u16 transmit_ring;
u16 receive_path;
@@ -227,6 +233,8 @@ struct tb_xdomain {
struct ida service_ids;
struct tb_property_dir *properties;
u32 property_block_gen;
+ struct delayed_work get_uuid_work;
+ int uuid_retries;
struct delayed_work get_properties_work;
int properties_retries;
struct delayed_work properties_changed_work;
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 55388ab45fd4..f92a10b5e112 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -68,6 +68,12 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode);
static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { }
#endif /* BROADCAST */
+#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU)
+extern void tick_offline_cpu(unsigned int cpu);
+#else
+static inline void tick_offline_cpu(unsigned int cpu) { }
+#endif
+
#ifdef CONFIG_GENERIC_CLOCKEVENTS
extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state);
#else
@@ -122,6 +128,7 @@ extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
extern bool tick_nohz_idle_got_tick(void);
+extern ktime_t tick_nohz_get_next_hrtimer(void);
extern ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next);
extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
@@ -145,7 +152,11 @@ static inline void tick_nohz_idle_restart_tick(void) { }
static inline void tick_nohz_idle_enter(void) { }
static inline void tick_nohz_idle_exit(void) { }
static inline bool tick_nohz_idle_got_tick(void) { return false; }
-
+static inline ktime_t tick_nohz_get_next_hrtimer(void)
+{
+ /* Next wake up is the tick period, assume it starts now */
+ return ktime_add(ktime_get(), TICK_NSEC);
+}
static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
{
*delta_next = TICK_NSEC;
diff --git a/include/linux/time64.h b/include/linux/time64.h
index f38d382ffec1..a620ee610b9f 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -33,6 +33,17 @@ struct itimerspec64 {
#define KTIME_MAX ((s64)~((u64)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
+/*
+ * Limits for settimeofday():
+ *
+ * To prevent setting the time close to the wraparound point time setting
+ * is limited so a reasonable uptime can be accomodated. Uptime of 30 years
+ * should be really sufficient, which means the cutoff is 2232. At that
+ * point the cutoff is just a small part of the larger problem.
+ */
+#define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600)
+#define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX)
+
static inline int timespec64_equal(const struct timespec64 *a,
const struct timespec64 *b)
{
@@ -100,6 +111,16 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts)
return true;
}
+static inline bool timespec64_valid_settod(const struct timespec64 *ts)
+{
+ if (!timespec64_valid(ts))
+ return false;
+ /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */
+ if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX)
+ return false;
+ return true;
+}
+
/**
* timespec64_to_ns - Convert timespec64 to nanoseconds
* @ts: pointer to the timespec64 variable to be converted
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h
index 49ba9cde7e4b..b29950a19205 100644
--- a/include/linux/tracepoint-defs.h
+++ b/include/linux/tracepoint-defs.h
@@ -45,6 +45,7 @@ struct bpf_raw_event_map {
struct tracepoint *tp;
void *bpf_func;
u32 num_args;
+ u32 writable_size;
} __aligned(32);
#endif
diff --git a/include/linux/types.h b/include/linux/types.h
index cc0dbbe551d5..231114ae38f4 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -127,13 +127,8 @@ typedef s64 int64_t;
*
* blkcnt_t is the type of the inode's block count.
*/
-#ifdef CONFIG_LBDAF
typedef u64 sector_t;
typedef u64 blkcnt_t;
-#else
-typedef unsigned long sector_t;
-typedef unsigned long blkcnt_t;
-#endif
/*
* The type of an index into the pagecache.
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 37b226e8df13..2b70130af585 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#define user_access_end() do { } while (0)
#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
+static inline unsigned long user_access_save(void) { return 0UL; }
+static inline void user_access_restore(unsigned long flags) { }
#endif
#ifdef CONFIG_HARDENED_USERCOPY
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 87477e1640f9..2d0131ad4604 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -23,14 +23,23 @@ struct kvec {
};
enum iter_type {
- ITER_IOVEC = 0,
- ITER_KVEC = 2,
- ITER_BVEC = 4,
- ITER_PIPE = 8,
- ITER_DISCARD = 16,
+ /* set if ITER_BVEC doesn't hold a bv_page ref */
+ ITER_BVEC_FLAG_NO_REF = 2,
+
+ /* iter types */
+ ITER_IOVEC = 4,
+ ITER_KVEC = 8,
+ ITER_BVEC = 16,
+ ITER_PIPE = 32,
+ ITER_DISCARD = 64,
};
struct iov_iter {
+ /*
+ * Bit 0 is the read/write bit, set if we're writing.
+ * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and
+ * the caller isn't expecting to drop a page reference when done.
+ */
unsigned int type;
size_t iov_offset;
size_t count;
@@ -51,7 +60,7 @@ struct iov_iter {
static inline enum iter_type iov_iter_type(const struct iov_iter *i)
{
- return i->type & ~(READ | WRITE);
+ return i->type & ~(READ | WRITE | ITER_BVEC_FLAG_NO_REF);
}
static inline bool iter_is_iovec(const struct iov_iter *i)
@@ -84,6 +93,11 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
return i->type & (READ | WRITE);
}
+static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i)
+{
+ return (i->type & ITER_BVEC_FLAG_NO_REF) != 0;
+}
+
/*
* Total number of bytes covered by an iovec.
*
diff --git a/include/linux/unicode.h b/include/linux/unicode.h
new file mode 100644
index 000000000000..aec2c6d800aa
--- /dev/null
+++ b/include/linux/unicode.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UNICODE_H
+#define _LINUX_UNICODE_H
+
+#include <linux/init.h>
+#include <linux/dcache.h>
+
+struct unicode_map {
+ const char *charset;
+ int version;
+};
+
+int utf8_validate(const struct unicode_map *um, const struct qstr *str);
+
+int utf8_strncmp(const struct unicode_map *um,
+ const struct qstr *s1, const struct qstr *s2);
+
+int utf8_strncasecmp(const struct unicode_map *um,
+ const struct qstr *s1, const struct qstr *s2);
+
+int utf8_normalize(const struct unicode_map *um, const struct qstr *str,
+ unsigned char *dest, size_t dlen);
+
+int utf8_casefold(const struct unicode_map *um, const struct qstr *str,
+ unsigned char *dest, size_t dlen);
+
+struct unicode_map *utf8_load(const char *version);
+void utf8_unload(struct unicode_map *um);
+
+#endif /* _LINUX_UNICODE_H */
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 103a48a48872..12bf0b68ed92 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -115,6 +115,7 @@ struct uprobes_state {
struct xol_area *xol_area;
};
+extern void __init uprobes_init(void);
extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
extern bool is_swbp_insn(uprobe_opcode_t *insn);
@@ -154,6 +155,10 @@ extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
struct uprobes_state {
};
+static inline void uprobes_init(void)
+{
+}
+
#define uprobe_get_trap_addr(regs) instruction_pointer(regs)
static inline int
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 5e49e82c4368..ae82d9d1112b 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -200,7 +200,6 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
* @dev: driver model's view of this device
* @usb_dev: if an interface is bound to the USB major, this will point
* to the sysfs representation for that device.
- * @pm_usage_cnt: PM usage counter for this interface
* @reset_ws: Used for scheduling resets from atomic context.
* @resetting_device: USB core reset the device, so use alt setting 0 as
* current; needs bandwidth alloc after reset.
@@ -257,7 +256,6 @@ struct usb_interface {
struct device dev; /* interface specific device info */
struct device *usb_dev;
- atomic_t pm_usage_cnt; /* usage counter for autosuspend */
struct work_struct reset_ws; /* for resets in atomic context */
};
#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
@@ -1545,10 +1543,10 @@ typedef void (*usb_complete_t)(struct urb *);
struct urb {
/* private: usb core and host controller only fields in the urb */
struct kref kref; /* reference count of the URB */
+ int unlinked; /* unlink error code */
void *hcpriv; /* private data for host controller */
atomic_t use_count; /* concurrent submissions counter */
atomic_t reject; /* submissions will fail */
- int unlinked; /* unlink error code */
/* public: documented fields in the urb that can be used by drivers */
struct list_head urb_list; /* list head for use by the urb's
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 523aa088f6ab..da82606be605 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -37,6 +37,14 @@
#include <uapi/linux/usb/ch9.h>
/**
+ * usb_ep_type_string() - Returns human readable-name of the endpoint type.
+ * @ep_type: The endpoint type to return human-readable name for. If it's not
+ * any of the types: USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT},
+ * usually got by usb_endpoint_type(), the string 'unknown' will be returned.
+ */
+extern const char *usb_ep_type_string(int ep_type);
+
+/**
* usb_speed_string() - Returns human readable-name of the speed.
* @speed: The speed to return human-readable name for. If it's not
* any of the speeds defined in usb_device_speed enum, string for
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 695931b03684..bb57b5af4700 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -98,6 +98,7 @@ struct usb_hcd {
#ifdef CONFIG_PM
struct work_struct wakeup_work; /* for remote wakeup */
#endif
+ struct work_struct died_work; /* for when the device dies */
/*
* hardware info/state
@@ -652,11 +653,16 @@ extern wait_queue_head_t usb_kill_urb_queue;
#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
#ifdef CONFIG_PM
+extern unsigned usb_wakeup_enabled_descendants(struct usb_device *udev);
extern void usb_root_hub_lost_power(struct usb_device *rhdev);
extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
#else
+static inline unsigned usb_wakeup_enabled_descendants(struct usb_device *udev)
+{
+ return 0;
+}
static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
{
return;
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 1c19f77ed541..14cac4a1ae8f 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -23,11 +23,9 @@
/* The maximum number of ports one device can grab at once */
#define MAX_NUM_PORTS 16
-/* parity check flag */
-#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
-
/* USB serial flags */
#define USB_SERIAL_WRITE_BUSY 0
+#define USB_SERIAL_THROTTLED 1
/**
* usb_serial_port: structure for the specific ports of a device.
@@ -67,8 +65,6 @@
* @flags: usb serial port flags
* @write_wait: a wait_queue_head_t used by the port.
* @work: work queue entry for the line discipline waking up.
- * @throttled: nonzero if the read urb is inactive to throttle the device
- * @throttle_req: nonzero if the tty wants to throttle us
* @dev: pointer to the serial device
*
* This structure is used by the usb-serial core and drivers for the specific
@@ -115,8 +111,6 @@ struct usb_serial_port {
unsigned long flags;
wait_queue_head_t write_wait;
struct work_struct work;
- char throttled;
- char throttle_req;
unsigned long sysrq; /* sysrq timeout */
struct device dev;
};
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 0c532ca3f079..36a15dcadc53 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -121,10 +121,10 @@ struct tcpc_config {
* with partner.
* @set_pd_rx: Called to enable or disable reception of PD messages
* @set_roles: Called to set power and data roles
- * @start_drp_toggling:
- * Optional; if supported by hardware, called to start DRP
- * toggling. DRP toggling is stopped automatically if
- * a connection is established.
+ * @start_toggling:
+ * Optional; if supported by hardware, called to start dual-role
+ * toggling or single-role connection detection. Toggling stops
+ * automatically if a connection is established.
* @try_role: Optional; called to set a preferred role
* @pd_transmit:Called to transmit PD message
* @mux: Pointer to multiplexer data
@@ -147,8 +147,9 @@ struct tcpc_dev {
int (*set_pd_rx)(struct tcpc_dev *dev, bool on);
int (*set_roles)(struct tcpc_dev *dev, bool attached,
enum typec_role role, enum typec_data_role data);
- int (*start_drp_toggling)(struct tcpc_dev *dev,
- enum typec_cc_status cc);
+ int (*start_toggling)(struct tcpc_dev *dev,
+ enum typec_port_type port_type,
+ enum typec_cc_status cc);
int (*try_role)(struct tcpc_dev *dev, int role);
int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
const struct pd_message *msg);
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index 7fa12ef8d09a..fc4c7edb2e8a 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -5,6 +5,11 @@
#include <linux/usb/typec_altmode.h>
#define USB_TYPEC_DP_SID 0xff01
+/* USB IF has not assigned a Standard ID (SID) for VirtualLink,
+ * so the manufacturers of VirtualLink adapters use their Vendor
+ * IDs as the SVID.
+ */
+#define USB_TYPEC_NVIDIA_VLINK_SID 0x955 /* NVIDIA VirtualLink */
#define USB_TYPEC_DP_MODE 1
/*
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 37c9eba75c98..ac9d71e24b81 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -28,6 +28,8 @@
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
+extern int sysctl_unprivileged_userfaultfd;
+
extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
index a240ed2a0372..ff56c443180c 100644
--- a/include/linux/vbox_utils.h
+++ b/include/linux/vbox_utils.h
@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
#define vbg_debug pr_debug
#endif
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
struct vmmdev_hgcm_service_location *loc,
u32 *client_id, int *vbox_status);
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+ u32 client_id, int *vbox_status);
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
- u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
- u32 parm_count, int *vbox_status);
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+ u32 function, u32 timeout_ms,
+ struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+ int *vbox_status);
/**
* Convert a VirtualBox status code to a standard Linux kernel return value.
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index fab02133a919..3dc70adfe5f5 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -63,7 +63,7 @@ struct virtqueue;
/*
* Creates a virtqueue and allocates the descriptor ring. If
* may_reduce_num is set, then this may allocate a smaller ring than
- * expected. The caller should query virtqueue_get_ring_size to learn
+ * expected. The caller should query virtqueue_get_vring_size to learn
* the actual size of the ring.
*/
struct virtqueue *vring_create_virtqueue(unsigned int index,
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 398e9c95cd61..c6eebb839552 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -21,6 +21,11 @@ struct notifier_block; /* in notifier.h */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
+/*
+ * Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
+ * vfree_atomic().
+ */
+#define VM_FLUSH_RESET_PERMS 0x00000100 /* Reset direct map and flush TLB on unmap */
/* bits [20..32] reserved for arch specific ioremap internals */
/*
@@ -142,6 +147,13 @@ extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
+static inline void set_vm_flush_reset_perms(void *addr)
+{
+ struct vm_struct *vm = find_vm_area(addr);
+
+ if (vm)
+ vm->flags |= VM_FLUSH_RESET_PERMS;
+}
#else
static inline int
map_kernel_range_noflush(unsigned long start, unsigned long size,
@@ -157,6 +169,9 @@ static inline void
unmap_kernel_range(unsigned long addr, unsigned long size)
{
}
+static inline void set_vm_flush_reset_perms(void *addr)
+{
+}
#endif
/* Allocate/destroy a 'vmalloc' VM area. */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2db8d60981fe..bdeda4b079fe 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -26,7 +26,7 @@ struct reclaim_stat {
unsigned nr_congested;
unsigned nr_writeback;
unsigned nr_immediate;
- unsigned nr_activate;
+ unsigned nr_activate[2];
unsigned nr_ref_keep;
unsigned nr_unmap_fail;
};
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index eaa1e762bf06..0c06178e4985 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -17,6 +17,7 @@
#define _VMW_VMCI_DEF_H_
#include <linux/atomic.h>
+#include <linux/bits.h>
/* Register offsets. */
#define VMCI_STATUS_ADDR 0x00
@@ -33,27 +34,27 @@
#define VMCI_MAX_DEVICES 1
/* Status register bits. */
-#define VMCI_STATUS_INT_ON 0x1
+#define VMCI_STATUS_INT_ON BIT(0)
/* Control register bits. */
-#define VMCI_CONTROL_RESET 0x1
-#define VMCI_CONTROL_INT_ENABLE 0x2
-#define VMCI_CONTROL_INT_DISABLE 0x4
+#define VMCI_CONTROL_RESET BIT(0)
+#define VMCI_CONTROL_INT_ENABLE BIT(1)
+#define VMCI_CONTROL_INT_DISABLE BIT(2)
/* Capabilities register bits. */
-#define VMCI_CAPS_HYPERCALL 0x1
-#define VMCI_CAPS_GUESTCALL 0x2
-#define VMCI_CAPS_DATAGRAM 0x4
-#define VMCI_CAPS_NOTIFICATIONS 0x8
-#define VMCI_CAPS_PPN64 0x10
+#define VMCI_CAPS_HYPERCALL BIT(0)
+#define VMCI_CAPS_GUESTCALL BIT(1)
+#define VMCI_CAPS_DATAGRAM BIT(2)
+#define VMCI_CAPS_NOTIFICATIONS BIT(3)
+#define VMCI_CAPS_PPN64 BIT(4)
/* Interrupt Cause register bits. */
-#define VMCI_ICR_DATAGRAM 0x1
-#define VMCI_ICR_NOTIFICATION 0x2
+#define VMCI_ICR_DATAGRAM BIT(0)
+#define VMCI_ICR_NOTIFICATION BIT(1)
/* Interrupt Mask register bits. */
-#define VMCI_IMR_DATAGRAM 0x1
-#define VMCI_IMR_NOTIFICATION 0x2
+#define VMCI_IMR_DATAGRAM BIT(0)
+#define VMCI_IMR_NOTIFICATION BIT(1)
/* Maximum MSI/MSI-X interrupt vectors in the device. */
#define VMCI_MAX_INTRS 2
@@ -463,9 +464,9 @@ struct vmci_datagram {
* datagram callback is invoked in a delayed context (not interrupt context).
*/
#define VMCI_FLAG_DG_NONE 0
-#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
-#define VMCI_FLAG_ANYCID_DG_HND 0x2
-#define VMCI_FLAG_DG_DELAYED_CB 0x4
+#define VMCI_FLAG_WELLKNOWN_DG_HND BIT(0)
+#define VMCI_FLAG_ANYCID_DG_HND BIT(1)
+#define VMCI_FLAG_DG_DELAYED_CB BIT(2)
/*
* Maximum supported size of a VMCI datagram for routable datagrams.
@@ -694,7 +695,7 @@ struct vmci_qp_detach_msg {
};
/* VMCI Doorbell API. */
-#define VMCI_FLAG_DELAYED_CB 0x01
+#define VMCI_FLAG_DELAYED_CB BIT(0)
typedef void (*vmci_callback) (void *client_data);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 5f3efabc36f4..b6f77cf60dd7 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -101,7 +101,7 @@ init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t f
* lead to sporadic and non-obvious failure.
*
* Use either while holding wait_queue_head::lock or when used for wakeups
- * with an extra smp_mb() like:
+ * with an extra smp_mb() like::
*
* CPU0 - waker CPU1 - waiter
*
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index 814eeef35a5c..57b3a9f6ea1d 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -9,7 +9,7 @@
#ifndef LINUX_CEC_NOTIFIER_H
#define LINUX_CEC_NOTIFIER_H
-#include <linux/types.h>
+#include <linux/err.h>
#include <media/cec.h>
struct device;
@@ -87,6 +87,17 @@ void cec_notifier_unregister(struct cec_notifier *n);
void cec_register_cec_notifier(struct cec_adapter *adap,
struct cec_notifier *notifier);
+/**
+ * cec_notifier_parse_hdmi_phandle - find the hdmi device from "hdmi-phandle"
+ * @dev: the device with the "hdmi-phandle" device tree property
+ *
+ * Returns the device pointer referenced by the "hdmi-phandle" property.
+ * Note that the refcount of the returned device is not incremented.
+ * This device pointer is only used as a key value in the notifier
+ * list, but it is never accessed by the CEC driver.
+ */
+struct device *cec_notifier_parse_hdmi_phandle(struct device *dev);
+
#else
static inline struct cec_notifier *cec_notifier_get_conn(struct device *dev,
const char *conn)
@@ -122,6 +133,12 @@ static inline void cec_register_cec_notifier(struct cec_adapter *adap,
struct cec_notifier *notifier)
{
}
+
+static inline struct device *cec_notifier_parse_hdmi_phandle(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
#endif
/**
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
index f9e73b4a6e89..52875e3eee71 100644
--- a/include/media/dvb-usb-ids.h
+++ b/include/media/dvb-usb-ids.h
@@ -258,6 +258,7 @@
#define USB_PID_AVERMEDIA_A867 0xa867
#define USB_PID_AVERMEDIA_H335 0x0335
#define USB_PID_AVERMEDIA_TD110 0xa110
+#define USB_PID_AVERMEDIA_TD310 0x1871
#define USB_PID_AVERMEDIA_TWINSTAR 0x0825
#define USB_PID_TECHNOTREND_CONNECT_S2400 0x3006
#define USB_PID_TECHNOTREND_CONNECT_S2400_8KEEPROM 0x3009
diff --git a/include/media/fwht-ctrls.h b/include/media/fwht-ctrls.h
new file mode 100644
index 000000000000..615027410e47
--- /dev/null
+++ b/include/media/fwht-ctrls.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * These are the FWHT state controls for use with stateless FWHT
+ * codec drivers.
+ *
+ * It turns out that these structs are not stable yet and will undergo
+ * more changes. So keep them private until they are stable and ready to
+ * become part of the official public API.
+ */
+
+#ifndef _FWHT_CTRLS_H_
+#define _FWHT_CTRLS_H_
+
+#define V4L2_CTRL_TYPE_FWHT_PARAMS 0x0105
+
+#define V4L2_CID_MPEG_VIDEO_FWHT_PARAMS (V4L2_CID_MPEG_BASE + 292)
+
+struct v4l2_ctrl_fwht_params {
+ __u64 backward_ref_ts;
+ __u32 version;
+ __u32 width;
+ __u32 height;
+ __u32 flags;
+ __u32 colorspace;
+ __u32 xfer_func;
+ __u32 ycbcr_enc;
+ __u32 quantization;
+};
+
+
+#endif
diff --git a/include/media/media-dev-allocator.h b/include/media/media-dev-allocator.h
new file mode 100644
index 000000000000..b35ea6062596
--- /dev/null
+++ b/include/media/media-dev-allocator.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * media-dev-allocator.h - Media Controller Device Allocator API
+ *
+ * Copyright (c) 2019 Shuah Khan <shuah@kernel.org>
+ *
+ * Credits: Suggested by Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+/*
+ * This file adds a global ref-counted Media Controller Device Instance API.
+ * A system wide global media device list is managed and each media device
+ * includes a kref count. The last put on the media device releases the media
+ * device instance.
+ */
+
+#ifndef _MEDIA_DEV_ALLOCATOR_H
+#define _MEDIA_DEV_ALLOCATOR_H
+
+struct usb_device;
+
+#if defined(CONFIG_MEDIA_CONTROLLER) && defined(CONFIG_USB)
+/**
+ * media_device_usb_allocate() - Allocate and return struct &media device
+ *
+ * @udev: struct &usb_device pointer
+ * @module_name: should be filled with %KBUILD_MODNAME
+ * @owner: struct module pointer %THIS_MODULE for the driver.
+ * %THIS_MODULE is null for a built-in driver.
+ * It is safe even when %THIS_MODULE is null.
+ *
+ * This interface should be called to allocate a Media Device when multiple
+ * drivers share usb_device and the media device. This interface allocates
+ * &media_device structure and calls media_device_usb_init() to initialize
+ * it.
+ *
+ */
+struct media_device *media_device_usb_allocate(struct usb_device *udev,
+ const char *module_name,
+ struct module *owner);
+/**
+ * media_device_delete() - Release media device. Calls kref_put().
+ *
+ * @mdev: struct &media_device pointer
+ * @module_name: should be filled with %KBUILD_MODNAME
+ * @owner: struct module pointer %THIS_MODULE for the driver.
+ * %THIS_MODULE is null for a built-in driver.
+ * It is safe even when %THIS_MODULE is null.
+ *
+ * This interface should be called to put Media Device Instance kref.
+ */
+void media_device_delete(struct media_device *mdev, const char *module_name,
+ struct module *owner);
+#else
+static inline struct media_device *media_device_usb_allocate(
+ struct usb_device *udev, const char *module_name,
+ struct module *owner)
+ { return NULL; }
+static inline void media_device_delete(
+ struct media_device *mdev, const char *module_name,
+ struct module *owner) { }
+#endif /* CONFIG_MEDIA_CONTROLLER && CONFIG_USB */
+#endif /* _MEDIA_DEV_ALLOCATOR_H */
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index e5f6960d92f6..3cbad42e3693 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -865,19 +865,6 @@ struct media_link *media_entity_find_link(struct media_pad *source,
struct media_pad *media_entity_remote_pad(const struct media_pad *pad);
/**
- * media_entity_get - Get a reference to the parent module
- *
- * @entity: The entity
- *
- * Get a reference to the parent media device module.
- *
- * The function will return immediately if @entity is %NULL.
- *
- * Return: returns a pointer to the entity on success or %NULL on failure.
- */
-struct media_entity *media_entity_get(struct media_entity *entity);
-
-/**
* media_entity_get_fwnode_pad - Get pad number from fwnode
*
* @entity: The entity
@@ -917,17 +904,6 @@ __must_check int media_graph_walk_init(
void media_graph_walk_cleanup(struct media_graph *graph);
/**
- * media_entity_put - Release the reference to the parent module
- *
- * @entity: The entity
- *
- * Release the reference count acquired by media_entity_get().
- *
- * The function will return immediately if @entity is %NULL.
- */
-void media_entity_put(struct media_entity *entity);
-
-/**
* media_graph_walk_start - Start walking the media graph at a
* given entity
*
diff --git a/include/media/media-request.h b/include/media/media-request.h
index bd36d7431698..3cd25a2717ce 100644
--- a/include/media/media-request.h
+++ b/include/media/media-request.h
@@ -198,7 +198,7 @@ void media_request_put(struct media_request *req);
* Get the request represented by @request_fd that is owned
* by the media device.
*
- * Return a -EACCES error pointer if requests are not supported
+ * Return a -EBADR error pointer if requests are not supported
* by this driver. Return -EINVAL if the request was not found.
* Return the pointer to the request if found: the caller will
* have to call @media_request_put when it finished using the
@@ -231,7 +231,7 @@ static inline void media_request_put(struct media_request *req)
static inline struct media_request *
media_request_get_by_fd(struct media_device *mdev, int request_fd)
{
- return ERR_PTR(-EACCES);
+ return ERR_PTR(-EBADR);
}
#endif
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index 5e684bb0d64c..367d983188f7 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -40,6 +40,7 @@
#define RC_PROTO_BIT_RCMM12 BIT_ULL(RC_PROTO_RCMM12)
#define RC_PROTO_BIT_RCMM24 BIT_ULL(RC_PROTO_RCMM24)
#define RC_PROTO_BIT_RCMM32 BIT_ULL(RC_PROTO_RCMM32)
+#define RC_PROTO_BIT_XBOX_DVD BIT_ULL(RC_PROTO_XBOX_DVD)
#define RC_PROTO_BIT_ALL \
(RC_PROTO_BIT_UNKNOWN | RC_PROTO_BIT_OTHER | \
@@ -55,7 +56,8 @@
RC_PROTO_BIT_RC6_MCE | RC_PROTO_BIT_SHARP | \
RC_PROTO_BIT_XMP | RC_PROTO_BIT_CEC | \
RC_PROTO_BIT_IMON | RC_PROTO_BIT_RCMM12 | \
- RC_PROTO_BIT_RCMM24 | RC_PROTO_BIT_RCMM32)
+ RC_PROTO_BIT_RCMM24 | RC_PROTO_BIT_RCMM32 | \
+ RC_PROTO_BIT_XBOX_DVD)
/* All rc protocols for which we have decoders */
#define RC_PROTO_BIT_ALL_IR_DECODER \
(RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC5X_20 | \
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 2b93cb281fa5..0a41bbecf3d3 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -392,4 +392,37 @@ int v4l2_s_parm_cap(struct video_device *vdev,
((u64)(a).numerator * (b).denominator OP \
(u64)(b).numerator * (a).denominator)
+/* ------------------------------------------------------------------------- */
+
+/* Pixel format and FourCC helpers */
+
+/**
+ * struct v4l2_format_info - information about a V4L2 format
+ * @format: 4CC format identifier (V4L2_PIX_FMT_*)
+ * @mem_planes: Number of memory planes, which includes the alpha plane (1 to 4).
+ * @comp_planes: Number of component planes, which includes the alpha plane (1 to 4).
+ * @bpp: Array of per-plane bytes per pixel
+ * @hdiv: Horizontal chroma subsampling factor
+ * @vdiv: Vertical chroma subsampling factor
+ * @block_w: Per-plane macroblock pixel width (optional)
+ * @block_h: Per-plane macroblock pixel height (optional)
+ */
+struct v4l2_format_info {
+ u32 format;
+ u8 mem_planes;
+ u8 comp_planes;
+ u8 bpp[4];
+ u8 hdiv;
+ u8 vdiv;
+ u8 block_w[4];
+ u8 block_h[4];
+};
+
+const struct v4l2_format_info *v4l2_format_info(u32 format);
+
+int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, int pixelformat,
+ int width, int height);
+int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, int pixelformat,
+ int width, int height);
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index e5cae37ced2d..ee026387f513 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -23,10 +23,11 @@
#include <media/media-request.h>
/*
- * Include the mpeg2 stateless codec compound control definitions.
+ * Include the mpeg2 and fwht stateless codec compound control definitions.
* This will move to the public headers once this API is fully stable.
*/
#include <media/mpeg2-ctrls.h>
+#include <media/fwht-ctrls.h>
/* forward references */
struct file;
@@ -49,6 +50,7 @@ struct poll_table_struct;
* @p_char: Pointer to a string.
* @p_mpeg2_slice_params: Pointer to a MPEG2 slice parameters structure.
* @p_mpeg2_quantization: Pointer to a MPEG2 quantization data structure.
+ * @p_fwht_params: Pointer to a FWHT stateless parameters structure.
* @p: Pointer to a compound value.
*/
union v4l2_ctrl_ptr {
@@ -60,6 +62,7 @@ union v4l2_ctrl_ptr {
char *p_char;
struct v4l2_ctrl_mpeg2_slice_params *p_mpeg2_slice_params;
struct v4l2_ctrl_mpeg2_quantization *p_mpeg2_quantization;
+ struct v4l2_ctrl_fwht_params *p_fwht_params;
void *p;
};
@@ -934,7 +937,7 @@ s32 v4l2_ctrl_g_ctrl(struct v4l2_ctrl *ctrl);
* __v4l2_ctrl_s_ctrl() - Unlocked variant of v4l2_ctrl_s_ctrl().
*
* @ctrl: The control.
- * @val: TheControls name new value.
+ * @val: The new value.
*
* This sets the control's new value safely by going through the control
* framework. This function assumes the control's handler is already locked,
@@ -1039,7 +1042,7 @@ int __v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s);
*
* @ctrl: The control.
* @s: The new string.
- *Controls name
+ *
* This sets the control's new string safely by going through the control
* framework. This function will lock the control's handler, so it cannot be
* used from within the &v4l2_ctrl_ops functions.
@@ -1127,7 +1130,7 @@ __poll_t v4l2_ctrl_poll(struct file *file, struct poll_table_struct *wait);
* applying control values in a request is only applicable to memory-to-memory
* devices.
*/
-void v4l2_ctrl_request_setup(struct media_request *req,
+int v4l2_ctrl_request_setup(struct media_request *req,
struct v4l2_ctrl_handler *parent);
/**
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 349e1c18cf48..a7fa5b80915a 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -755,7 +755,17 @@ struct v4l2_subdev_ops {
*
* @open: called when the subdev device node is opened by an application.
*
- * @close: called when the subdev device node is closed.
+ * @close: called when the subdev device node is closed. Please note that
+ * it is possible for @close to be called after @unregistered!
+ *
+ * @release: called when the last user of the subdev device is gone. This
+ * happens after the @unregistered callback and when the last open
+ * filehandle to the v4l-subdevX device node was closed. If no device
+ * node was created for this sub-device, then the @release callback
+ * is called right after the @unregistered callback.
+ * The @release callback is typically used to free the memory containing
+ * the v4l2_subdev structure. It is almost certainly required for any
+ * sub-device that sets the V4L2_SUBDEV_FL_HAS_DEVNODE flag.
*
* .. note::
* Never call this from drivers, only the v4l2 framework can call
@@ -766,6 +776,7 @@ struct v4l2_subdev_internal_ops {
void (*unregistered)(struct v4l2_subdev *sd);
int (*open)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
int (*close)(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh);
+ void (*release)(struct v4l2_subdev *sd);
};
#define V4L2_SUBDEV_NAME_SIZE 32
@@ -899,9 +910,11 @@ struct v4l2_subdev {
*
* @vfh: pointer to &struct v4l2_fh
* @pad: pointer to &struct v4l2_subdev_pad_config
+ * @owner: module pointer to the owner of this file handle
*/
struct v4l2_subdev_fh {
struct v4l2_fh vfh;
+ struct module *owner;
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
struct v4l2_subdev_pad_config *pad;
#endif
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 910f3d469005..22f3ff76a8b5 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -207,7 +207,6 @@ enum vb2_io_modes {
* @VB2_BUF_STATE_IN_REQUEST: buffer is queued in media request.
* @VB2_BUF_STATE_PREPARING: buffer is being prepared in videobuf.
* @VB2_BUF_STATE_QUEUED: buffer queued in videobuf, but not in driver.
- * @VB2_BUF_STATE_REQUEUEING: re-queue a buffer to the driver.
* @VB2_BUF_STATE_ACTIVE: buffer queued in driver and possibly used
* in a hardware operation.
* @VB2_BUF_STATE_DONE: buffer returned from driver to videobuf, but
@@ -221,7 +220,6 @@ enum vb2_buffer_state {
VB2_BUF_STATE_IN_REQUEST,
VB2_BUF_STATE_PREPARING,
VB2_BUF_STATE_QUEUED,
- VB2_BUF_STATE_REQUEUEING,
VB2_BUF_STATE_ACTIVE,
VB2_BUF_STATE_DONE,
VB2_BUF_STATE_ERROR,
@@ -384,10 +382,10 @@ struct vb2_buffer {
* driver can return an error if hardware fails, in that
* case all buffers that have been already given by
* the @buf_queue callback are to be returned by the driver
- * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED
- * or %VB2_BUF_STATE_REQUEUEING. If you need a minimum
- * number of buffers before you can start streaming, then
- * set &vb2_queue->min_buffers_needed. If that is non-zero
+ * by calling vb2_buffer_done() with %VB2_BUF_STATE_QUEUED.
+ * If you need a minimum number of buffers before you can
+ * start streaming, then set
+ * &vb2_queue->min_buffers_needed. If that is non-zero
* then @start_streaming won't be called until at least
* that many buffers have been queued up by userspace.
* @stop_streaming: called when 'streaming' state must be disabled; driver
@@ -484,6 +482,8 @@ struct vb2_buf_ops {
* has not been called. This is a vb1 idiom that has been adopted
* also by vb2.
* @supports_requests: this queue supports the Request API.
+ * @requires_requests: this queue requires the Request API. If this is set to 1,
+ * then supports_requests must be set to 1 as well.
* @uses_qbuf: qbuf was used directly for this queue. Set to 1 the first
* time this is called. Set to 0 when the queue is canceled.
* If this is 1, then you cannot queue buffers from a request.
@@ -558,6 +558,7 @@ struct vb2_queue {
unsigned allow_zero_bytesused:1;
unsigned quirk_poll_must_check_waiting_for_buffers:1;
unsigned supports_requests:1;
+ unsigned requires_requests:1;
unsigned uses_qbuf:1;
unsigned uses_requests:1;
@@ -595,6 +596,7 @@ struct vb2_queue {
unsigned int start_streaming_called:1;
unsigned int error:1;
unsigned int waiting_for_buffers:1;
+ unsigned int waiting_in_dqbuf:1;
unsigned int is_multiplanar:1;
unsigned int is_output:1;
unsigned int copy_timestamp:1;
@@ -648,9 +650,7 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
* @state: state of the buffer, as defined by &enum vb2_buffer_state.
* Either %VB2_BUF_STATE_DONE if the operation finished
* successfully, %VB2_BUF_STATE_ERROR if the operation finished
- * with an error or any of %VB2_BUF_STATE_QUEUED or
- * %VB2_BUF_STATE_REQUEUEING if the driver wants to
- * requeue buffers (see below).
+ * with an error or %VB2_BUF_STATE_QUEUED.
*
* This function should be called by the driver after a hardware operation on
* a buffer is finished and the buffer may be returned to userspace. The driver
@@ -661,12 +661,7 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no);
* While streaming a buffer can only be returned in state DONE or ERROR.
* The &vb2_ops->start_streaming op can also return them in case the DMA engine
* cannot be started for some reason. In that case the buffers should be
- * returned with state QUEUED or REQUEUEING to put them back into the queue.
- *
- * %VB2_BUF_STATE_REQUEUEING is like %VB2_BUF_STATE_QUEUED, but it also calls
- * &vb2_ops->buf_queue to queue buffers back to the driver. Note that calling
- * vb2_buffer_done(..., VB2_BUF_STATE_REQUEUEING) from interrupt context will
- * result in &vb2_ops->buf_queue being called in interrupt context as well.
+ * returned with state QUEUED to put them back into the queue.
*/
void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 1cf868360701..cc1b0d42ce95 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -17,6 +17,9 @@ struct device;
int vsp1_du_init(struct device *dev);
+#define VSP1_DU_STATUS_COMPLETE BIT(0)
+#define VSP1_DU_STATUS_WRITEBACK BIT(1)
+
/**
* struct vsp1_du_lif_config - VSP LIF configuration
* @width: output frame width
@@ -32,7 +35,7 @@ struct vsp1_du_lif_config {
unsigned int height;
bool interlaced;
- void (*callback)(void *data, bool completed, u32 crc);
+ void (*callback)(void *data, unsigned int status, u32 crc);
void *callback_data;
};
@@ -82,11 +85,25 @@ struct vsp1_du_crc_config {
};
/**
+ * struct vsp1_du_writeback_config - VSP writeback configuration parameters
+ * @pixelformat: plane pixel format (V4L2 4CC)
+ * @pitch: line pitch in bytes for the first plane
+ * @mem: DMA memory address for each plane of the frame buffer
+ */
+struct vsp1_du_writeback_config {
+ u32 pixelformat;
+ unsigned int pitch;
+ dma_addr_t mem[3];
+};
+
+/**
* struct vsp1_du_atomic_pipe_config - VSP atomic pipe configuration parameters
* @crc: CRC computation configuration
+ * @writeback: writeback configuration
*/
struct vsp1_du_atomic_pipe_config {
struct vsp1_du_crc_config crc;
+ struct vsp1_du_writeback_config writeback;
};
void vsp1_du_atomic_begin(struct device *dev, unsigned int pipe_index);
diff --git a/include/misc/charlcd.h b/include/misc/charlcd.h
index 23f61850f363..1832402324ce 100644
--- a/include/misc/charlcd.h
+++ b/include/misc/charlcd.h
@@ -35,6 +35,7 @@ struct charlcd_ops {
};
struct charlcd *charlcd_alloc(unsigned int drvdata_size);
+void charlcd_free(struct charlcd *lcd);
int charlcd_register(struct charlcd *lcd);
int charlcd_unregister(struct charlcd *lcd);
diff --git a/include/misc/ocxl.h b/include/misc/ocxl.h
index 9ff6ddc28e22..5c4b4916e6be 100644
--- a/include/misc/ocxl.h
+++ b/include/misc/ocxl.h
@@ -16,11 +16,7 @@
#define OCXL_AFU_NAME_SZ (24+1) /* add 1 for NULL termination */
-/*
- * The following 2 structures are a fairly generic way of representing
- * the configuration data for a function and AFU, as read from the
- * configuration space.
- */
+
struct ocxl_afu_config {
u8 idx;
int dvsec_afu_control_pos; /* offset of AFU control DVSEC */
@@ -49,37 +45,314 @@ struct ocxl_fn_config {
s8 max_afu_index;
};
-/*
- * Read the configuration space of a function and fill in a
- * ocxl_fn_config structure with all the function details
+enum ocxl_endian {
+ OCXL_BIG_ENDIAN = 0, /**< AFU data is big-endian */
+ OCXL_LITTLE_ENDIAN = 1, /**< AFU data is little-endian */
+ OCXL_HOST_ENDIAN = 2, /**< AFU data is the same endianness as the host */
+};
+
+// These are opaque outside the ocxl driver
+struct ocxl_afu;
+struct ocxl_fn;
+struct ocxl_context;
+
+// Device detection & initialisation
+
+/**
+ * Open an OpenCAPI function on an OpenCAPI device
+ *
+ * @dev: The PCI device that contains the function
+ *
+ * Returns an opaque pointer to the function, or an error pointer (check with IS_ERR)
*/
-extern int ocxl_config_read_function(struct pci_dev *dev,
- struct ocxl_fn_config *fn);
+struct ocxl_fn *ocxl_function_open(struct pci_dev *dev);
-/*
- * Check if an AFU index is valid for the given function.
+/**
+ * Get the list of AFUs associated with a PCI function device
+ *
+ * Returns a list of struct ocxl_afu *
+ *
+ * @fn: The OpenCAPI function containing the AFUs
+ */
+struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn);
+
+/**
+ * Fetch an AFU instance from an OpenCAPI function
+ *
+ * @fn: The OpenCAPI function to get the AFU from
+ * @afu_idx: The index of the AFU to get
+ *
+ * If successful, the AFU should be released with ocxl_afu_put()
+ *
+ * Returns a pointer to the AFU, or NULL on error
+ */
+struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx);
+
+/**
+ * Take a reference to an AFU
+ *
+ * @afu: The AFU to increment the reference count on
+ */
+void ocxl_afu_get(struct ocxl_afu *afu);
+
+/**
+ * Release a reference to an AFU
+ *
+ * @afu: The AFU to decrement the reference count on
+ */
+void ocxl_afu_put(struct ocxl_afu *afu);
+
+
+/**
+ * Get the configuration information for an OpenCAPI function
+ *
+ * @fn: The OpenCAPI function to get the config for
+ *
+ * Returns the function config, or NULL on error
+ */
+const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn);
+
+/**
+ * Close an OpenCAPI function
+ *
+ * This will free any AFUs previously retrieved from the function, and
+ * detach and associated contexts. The contexts must by freed by the caller.
+ *
+ * @fn: The OpenCAPI function to close
+ *
+ */
+void ocxl_function_close(struct ocxl_fn *fn);
+
+// Context allocation
+
+/**
+ * Allocate an OpenCAPI context
+ *
+ * @context: The OpenCAPI context to allocate, must be freed with ocxl_context_free
+ * @afu: The AFU the context belongs to
+ * @mapping: The mapping to unmap when the context is closed (may be NULL)
+ */
+int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu,
+ struct address_space *mapping);
+
+/**
+ * Free an OpenCAPI context
+ *
+ * @ctx: The OpenCAPI context to free
+ */
+void ocxl_context_free(struct ocxl_context *ctx);
+
+/**
+ * Grant access to an MM to an OpenCAPI context
+ * @ctx: The OpenCAPI context to attach
+ * @amr: The value of the AMR register to restrict access
+ * @mm: The mm to attach to the context
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ocxl_context_attach(struct ocxl_context *ctx, u64 amr,
+ struct mm_struct *mm);
+
+/**
+ * Detach an MM from an OpenCAPI context
+ * @ctx: The OpenCAPI context to attach
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ocxl_context_detach(struct ocxl_context *ctx);
+
+// AFU IRQs
+
+/**
+ * Allocate an IRQ associated with an AFU context
+ * @ctx: the AFU context
+ * @irq_id: out, the IRQ ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+extern int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id);
+
+/**
+ * Frees an IRQ associated with an AFU context
+ * @ctx: the AFU context
+ * @irq_id: the IRQ ID
+ *
+ * Returns 0 on success, negative on failure
+ */
+extern int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id);
+
+/**
+ * Gets the address of the trigger page for an IRQ
+ * This can then be provided to an AFU which will write to that
+ * page to trigger the IRQ.
+ * @ctx: The AFU context that the IRQ is associated with
+ * @irq_id: The IRQ ID
*
- * AFU indexes can be sparse, so a driver should check all indexes up
- * to the maximum found in the function description
+ * returns the trigger page address, or 0 if the IRQ is not valid
*/
-extern int ocxl_config_check_afu_index(struct pci_dev *dev,
- struct ocxl_fn_config *fn, int afu_idx);
+extern u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id);
+
+/**
+ * Provide a callback to be called when an IRQ is triggered
+ * @ctx: The AFU context that the IRQ is associated with
+ * @irq_id: The IRQ ID
+ * @handler: the callback to be called when the IRQ is triggered
+ * @free_private: the callback to be called when the IRQ is freed (may be NULL)
+ * @private: Private data to be passed to the callbacks
+ *
+ * Returns 0 on success, negative on failure
+ */
+int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id,
+ irqreturn_t (*handler)(void *private),
+ void (*free_private)(void *private),
+ void *private);
+
+// AFU Metadata
+
+/**
+ * Get a pointer to the config for an AFU
+ *
+ * @afu: a pointer to the AFU to get the config for
+ *
+ * Returns a pointer to the AFU config
+ */
+struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu);
+
+/**
+ * Assign opaque hardware specific information to an OpenCAPI AFU.
+ *
+ * @dev: The PCI device associated with the OpenCAPI device
+ * @private: the opaque hardware specific information to assign to the driver
+ */
+void ocxl_afu_set_private(struct ocxl_afu *afu, void *private);
+
+/**
+ * Fetch the hardware specific information associated with an external OpenCAPI
+ * AFU. This may be consumed by an external OpenCAPI driver.
+ *
+ * @afu: The AFU
+ *
+ * Returns the opaque pointer associated with the device, or NULL if not set
+ */
+void *ocxl_afu_get_private(struct ocxl_afu *dev);
+
+// Global MMIO
+/**
+ * Read a 32 bit value from global MMIO
+ *
+ * @afu: The AFU
+ * @offset: The Offset from the start of MMIO
+ * @endian: the endianness that the MMIO data is in
+ * @val: returns the value
+ *
+ * Returns 0 for success, negative on error
+ */
+int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u32 *val);
+
+/**
+ * Read a 64 bit value from global MMIO
+ *
+ * @afu: The AFU
+ * @offset: The Offset from the start of MMIO
+ * @endian: the endianness that the MMIO data is in
+ * @val: returns the value
+ *
+ * Returns 0 for success, negative on error
+ */
+int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u64 *val);
+
+/**
+ * Write a 32 bit value to global MMIO
+ *
+ * @afu: The AFU
+ * @offset: The Offset from the start of MMIO
+ * @endian: the endianness that the MMIO data is in
+ * @val: The value to write
+ *
+ * Returns 0 for success, negative on error
+ */
+int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u32 val);
+
+/**
+ * Write a 64 bit value to global MMIO
+ *
+ * @afu: The AFU
+ * @offset: The Offset from the start of MMIO
+ * @endian: the endianness that the MMIO data is in
+ * @val: The value to write
+ *
+ * Returns 0 for success, negative on error
+ */
+int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u64 val);
+
+/**
+ * Set bits in a 32 bit global MMIO register
+ *
+ * @afu: The AFU
+ * @offset: The Offset from the start of MMIO
+ * @endian: the endianness that the MMIO data is in
+ * @mask: a mask of the bits to set
+ *
+ * Returns 0 for success, negative on error
+ */
+int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u32 mask);
+
+/**
+ * Set bits in a 64 bit global MMIO register
+ *
+ * @afu: The AFU
+ * @offset: The Offset from the start of MMIO
+ * @endian: the endianness that the MMIO data is in
+ * @mask: a mask of the bits to set
+ *
+ * Returns 0 for success, negative on error
+ */
+int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u64 mask);
+
+/**
+ * Set bits in a 32 bit global MMIO register
+ *
+ * @afu: The AFU
+ * @offset: The Offset from the start of MMIO
+ * @endian: the endianness that the MMIO data is in
+ * @mask: a mask of the bits to set
+ *
+ * Returns 0 for success, negative on error
+ */
+int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u32 mask);
+
+/**
+ * Set bits in a 64 bit global MMIO register
+ *
+ * @afu: The AFU
+ * @offset: The Offset from the start of MMIO
+ * @endian: the endianness that the MMIO data is in
+ * @mask: a mask of the bits to set
+ *
+ * Returns 0 for success, negative on error
+ */
+int ocxl_global_mmio_clear64(struct ocxl_afu *afu, size_t offset,
+ enum ocxl_endian endian, u64 mask);
+
+// Functions left here are for compatibility with the cxlflash driver
/*
* Read the configuration space of a function for the AFU specified by
* the index 'afu_idx'. Fills in a ocxl_afu_config structure
*/
-extern int ocxl_config_read_afu(struct pci_dev *dev,
+int ocxl_config_read_afu(struct pci_dev *dev,
struct ocxl_fn_config *fn,
struct ocxl_afu_config *afu,
u8 afu_idx);
/*
- * Get the max PASID value that can be used by the function
- */
-extern int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count);
-
-/*
* Tell an AFU, by writing in the configuration space, the PASIDs that
* it can use. Range starts at 'pasid_base' and its size is a multiple
* of 2
@@ -87,7 +360,7 @@ extern int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count);
* 'afu_control_offset' is the offset of the AFU control DVSEC which
* can be found in the function configuration
*/
-extern void ocxl_config_set_afu_pasid(struct pci_dev *dev,
+void ocxl_config_set_afu_pasid(struct pci_dev *dev,
int afu_control_offset,
int pasid_base, u32 pasid_count_log);
@@ -98,7 +371,7 @@ extern void ocxl_config_set_afu_pasid(struct pci_dev *dev,
* 'supported' is the total number of actags desired by all the AFUs
* of the function.
*/
-extern int ocxl_config_get_actag_info(struct pci_dev *dev,
+int ocxl_config_get_actag_info(struct pci_dev *dev,
u16 *base, u16 *enabled, u16 *supported);
/*
@@ -108,7 +381,7 @@ extern int ocxl_config_get_actag_info(struct pci_dev *dev,
* 'func_offset' is the offset of the Function DVSEC that can found in
* the function configuration
*/
-extern void ocxl_config_set_actag(struct pci_dev *dev, int func_offset,
+void ocxl_config_set_actag(struct pci_dev *dev, int func_offset,
u32 actag_base, u32 actag_count);
/*
@@ -118,7 +391,7 @@ extern void ocxl_config_set_actag(struct pci_dev *dev, int func_offset,
* 'afu_control_offset' is the offset of the AFU control DVSEC for the
* desired AFU. It can be found in the AFU configuration
*/
-extern void ocxl_config_set_afu_actag(struct pci_dev *dev,
+void ocxl_config_set_afu_actag(struct pci_dev *dev,
int afu_control_offset,
int actag_base, int actag_count);
@@ -128,7 +401,7 @@ extern void ocxl_config_set_afu_actag(struct pci_dev *dev,
* 'afu_control_offset' is the offset of the AFU control DVSEC for the
* desired AFU. It can be found in the AFU configuration
*/
-extern void ocxl_config_set_afu_state(struct pci_dev *dev,
+void ocxl_config_set_afu_state(struct pci_dev *dev,
int afu_control_offset, int enable);
/*
@@ -139,7 +412,7 @@ extern void ocxl_config_set_afu_state(struct pci_dev *dev,
* between the host and device, and set the Transaction Layer on both
* accordingly.
*/
-extern int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec);
+int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec);
/*
* Request an AFU to terminate a PASID.
@@ -152,10 +425,17 @@ extern int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec);
* 'afu_control_offset' is the offset of the AFU control DVSEC for the
* desired AFU. It can be found in the AFU configuration
*/
-extern int ocxl_config_terminate_pasid(struct pci_dev *dev,
+int ocxl_config_terminate_pasid(struct pci_dev *dev,
int afu_control_offset, int pasid);
/*
+ * Read the configuration space of a function and fill in a
+ * ocxl_fn_config structure with all the function details
+ */
+int ocxl_config_read_function(struct pci_dev *dev,
+ struct ocxl_fn_config *fn);
+
+/*
* Set up the opencapi link for the function.
*
* When called for the first time for a link, it sets up the Shared
@@ -165,13 +445,13 @@ extern int ocxl_config_terminate_pasid(struct pci_dev *dev,
* Returns a 'link handle' that should be used for further calls for
* the link
*/
-extern int ocxl_link_setup(struct pci_dev *dev, int PE_mask,
+int ocxl_link_setup(struct pci_dev *dev, int PE_mask,
void **link_handle);
/*
* Remove the association between the function and its link.
*/
-extern void ocxl_link_release(struct pci_dev *dev, void *link_handle);
+void ocxl_link_release(struct pci_dev *dev, void *link_handle);
/*
* Add a Process Element to the Shared Process Area for a link.
@@ -183,24 +463,15 @@ extern void ocxl_link_release(struct pci_dev *dev, void *link_handle);
* 'xsl_err_data' is an argument passed to the above callback, if
* defined
*/
-extern int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
+int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
u64 amr, struct mm_struct *mm,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data);
-/**
- * Update values within a Process Element
- *
- * link_handle: the link handle associated with the process element
- * pasid: the PASID for the AFU context
- * tid: the new thread id for the process element
- */
-extern int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid);
-
/*
* Remove a Process Element from the Shared Process Area for a link
*/
-extern int ocxl_link_remove_pe(void *link_handle, int pasid);
+int ocxl_link_remove_pe(void *link_handle, int pasid);
/*
* Allocate an AFU interrupt associated to the link.
@@ -212,12 +483,12 @@ extern int ocxl_link_remove_pe(void *link_handle, int pasid);
* interrupt. It is an MMIO address which needs to be remapped (one
* page).
*/
-extern int ocxl_link_irq_alloc(void *link_handle, int *hw_irq,
+int ocxl_link_irq_alloc(void *link_handle, int *hw_irq,
u64 *obj_handle);
/*
* Free a previously allocated AFU interrupt
*/
-extern void ocxl_link_free_irq(void *link_handle, int hw_irq);
+void ocxl_link_free_irq(void *link_handle, int hw_irq);
#endif /* _MISC_OCXL_H_ */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index c745e9ccfab2..c61a1bf4e3de 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -39,7 +39,7 @@ struct tc_action {
struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
- struct tcf_chain *goto_chain;
+ struct tcf_chain __rcu *goto_chain;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -90,7 +90,7 @@ struct tc_action_ops {
int (*lookup)(struct net *net, struct tc_action **a, u32 index);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
- int bind, bool rtnl_held,
+ int bind, bool rtnl_held, struct tcf_proto *tp,
struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
@@ -181,6 +181,11 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
+int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
+ struct tcf_chain **handle,
+ struct netlink_ext_ack *newchain);
+struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
+ struct tcf_chain *newchain);
#endif /* CONFIG_NET_CLS_ACT */
static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 269ec27385e9..2f67ae854ff0 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -238,53 +238,6 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
void ipv6_mc_dad_complete(struct inet6_dev *idev);
-/* A stub used by vxlan module. This is ugly, ideally these
- * symbols should be built into the core kernel.
- */
-struct ipv6_stub {
- int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex,
- const struct in6_addr *addr);
- int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
- const struct in6_addr *addr);
- int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
- struct dst_entry **dst, struct flowi6 *fl6);
- int (*ipv6_route_input)(struct sk_buff *skb);
-
- struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
- struct fib6_info *(*fib6_lookup)(struct net *net, int oif,
- struct flowi6 *fl6, int flags);
- struct fib6_info *(*fib6_table_lookup)(struct net *net,
- struct fib6_table *table,
- int oif, struct flowi6 *fl6,
- int flags);
- struct fib6_info *(*fib6_multipath_select)(const struct net *net,
- struct fib6_info *f6i,
- struct flowi6 *fl6, int oif,
- const struct sk_buff *skb,
- int strict);
- u32 (*ip6_mtu_from_fib6)(struct fib6_info *f6i, struct in6_addr *daddr,
- struct in6_addr *saddr);
-
- void (*udpv6_encap_enable)(void);
- void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
- const struct in6_addr *solicited_addr,
- bool router, bool solicited, bool override, bool inc_opt);
- struct neigh_table *nd_tbl;
-};
-extern const struct ipv6_stub *ipv6_stub __read_mostly;
-
-/* A stub used by bpf helpers. Similarly ugly as ipv6_stub */
-struct ipv6_bpf_stub {
- int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
- bool force_bind_address_no_port, bool with_lock);
- struct sock *(*udp6_lib_lookup)(struct net *net,
- const struct in6_addr *saddr, __be16 sport,
- const struct in6_addr *daddr, __be16 dport,
- int dif, int sdif, struct udp_table *tbl,
- struct sk_buff *skb);
-};
-extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
-
/*
* identify MLD packets for MLD filter exceptions
*/
@@ -425,6 +378,14 @@ static inline void in6_dev_hold(struct inet6_dev *idev)
refcount_inc(&idev->refcnt);
}
+/* called with rcu_read_lock held */
+static inline bool ip6_ignore_linkdown(const struct net_device *dev)
+{
+ const struct inet6_dev *idev = __in6_dev_get(dev);
+
+ return !!idev->cnf.ignore_routes_with_linkdown;
+}
+
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
static inline void in6_ifa_put(struct inet6_ifaddr *ifp)
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 2bfb87eb98ce..78c856cba4f5 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -61,10 +61,12 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
+ u32 *);
void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
ktime_t *);
+bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/arp.h b/include/net/arp.h
index 977aabfcdc03..c8f580a0e6b1 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -18,6 +18,7 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
return val * hash_rnd[0];
}
+#ifdef CONFIG_INET
static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
@@ -25,6 +26,13 @@ static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev
return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
}
+#else
+static inline
+struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
+{
+ return NULL;
+}
+#endif
static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
{
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index fbba43e9bef5..9a5330eed794 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -282,6 +282,7 @@ enum {
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
HCI_LL_RPA_RESOLUTION,
+ HCI_CMD_PENDING,
__HCI_NUM_FLAGS,
};
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 094e61e07030..05b1b96f4d9e 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -190,6 +190,9 @@ struct adv_info {
#define HCI_MAX_SHORT_NAME_LENGTH 10
+/* Min encryption key size to match with SMP */
+#define HCI_MIN_ENC_KEY_SIZE 7
+
/* Default LE RPA expiry time, 15 minutes */
#define HCI_DEFAULT_RPA_TIMEOUT (15 * 60)
diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
new file mode 100644
index 000000000000..b9dcb02e756b
--- /dev/null
+++ b/include/net/bpf_sk_storage.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019 Facebook */
+#ifndef _BPF_SK_STORAGE_H
+#define _BPF_SK_STORAGE_H
+
+struct sock;
+
+void bpf_sk_storage_free(struct sock *sk);
+
+extern const struct bpf_func_proto bpf_sk_storage_get_proto;
+extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+
+#endif /* _BPF_SK_STORAGE_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index bb307a11ee63..87dae868707e 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -485,6 +485,7 @@ struct vif_params {
* with the get_key() callback, must be in little endian,
* length given by @seq_len.
* @seq_len: length of @seq.
+ * @mode: key install mode (RX_TX, NO_TX or SET_TX)
*/
struct key_params {
const u8 *key;
@@ -492,6 +493,7 @@ struct key_params {
int key_len;
int seq_len;
u32 cipher;
+ enum nl80211_key_mode mode;
};
/**
@@ -973,6 +975,27 @@ enum station_parameters_apply_mask {
STATION_PARAM_APPLY_UAPSD = BIT(0),
STATION_PARAM_APPLY_CAPABILITY = BIT(1),
STATION_PARAM_APPLY_PLINK_STATE = BIT(2),
+ STATION_PARAM_APPLY_STA_TXPOWER = BIT(3),
+};
+
+/**
+ * struct sta_txpwr - station txpower configuration
+ *
+ * Used to configure txpower for station.
+ *
+ * @power: tx power (in dBm) to be used for sending data traffic. If tx power
+ * is not provided, the default per-interface tx power setting will be
+ * overriding. Driver should be picking up the lowest tx power, either tx
+ * power per-interface or per-station.
+ * @type: In particular if TPC %type is NL80211_TX_POWER_LIMITED then tx power
+ * will be less than or equal to specified from userspace, whereas if TPC
+ * %type is NL80211_TX_POWER_AUTOMATIC then it indicates default tx power.
+ * NL80211_TX_POWER_FIXED is not a valid configuration option for
+ * per peer TPC.
+ */
+struct sta_txpwr {
+ s16 power;
+ enum nl80211_tx_power_setting type;
};
/**
@@ -1047,6 +1070,7 @@ struct station_parameters {
const struct ieee80211_he_cap_elem *he_capa;
u8 he_capa_len;
u16 airtime_weight;
+ struct sta_txpwr txpwr;
};
/**
@@ -1327,6 +1351,7 @@ struct cfg80211_tid_stats {
* @fcs_err_count: number of packets (MPDUs) received from this station with
* an FCS error. This counter should be incremented only when TA of the
* received packet with an FCS error matches the peer MAC address.
+ * @airtime_link_metric: mesh airtime link metric.
*/
struct station_info {
u64 filled;
@@ -1381,6 +1406,8 @@ struct station_info {
u32 rx_mpdu_count;
u32 fcs_err_count;
+
+ u32 airtime_link_metric;
};
#if IS_ENABLED(CONFIG_CFG80211)
@@ -1832,11 +1859,19 @@ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
* @bssid: BSSID to be matched; may be all-zero BSSID in case of SSID match
* or no match (RSSI only)
* @rssi_thold: don't report scan results below this threshold (in s32 dBm)
+ * @per_band_rssi_thold: Minimum rssi threshold for each band to be applied
+ * for filtering out scan results received. Drivers advertize this support
+ * of band specific rssi based filtering through the feature capability
+ * %NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD. These band
+ * specific rssi thresholds take precedence over rssi_thold, if specified.
+ * If not specified for any band, it will be assigned with rssi_thold of
+ * corresponding matchset.
*/
struct cfg80211_match_set {
struct cfg80211_ssid ssid;
u8 bssid[ETH_ALEN];
s32 rssi_thold;
+ s32 per_band_rssi_thold[NUM_NL80211_BANDS];
};
/**
@@ -3100,6 +3135,32 @@ struct cfg80211_pmsr_request {
};
/**
+ * struct cfg80211_update_owe_info - OWE Information
+ *
+ * This structure provides information needed for the drivers to offload OWE
+ * (Opportunistic Wireless Encryption) processing to the user space.
+ *
+ * Commonly used across update_owe_info request and event interfaces.
+ *
+ * @peer: MAC address of the peer device for which the OWE processing
+ * has to be done.
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful OWE info
+ * processing, use %WLAN_STATUS_UNSPECIFIED_FAILURE if user space
+ * cannot give you the real status code for failures. Used only for
+ * OWE update request command interface (user space to driver).
+ * @ie: IEs obtained from the peer or constructed by the user space. These are
+ * the IEs of the remote peer in the event from the host driver and
+ * the constructed IEs by the user space in the request interface.
+ * @ie_len: Length of IEs in octets.
+ */
+struct cfg80211_update_owe_info {
+ u8 peer[ETH_ALEN] __aligned(2);
+ u16 status;
+ const u8 *ie;
+ size_t ie_len;
+};
+
+/**
* struct cfg80211_ops - backend description for wireless configuration
*
* This struct is registered by fullmac card drivers and/or wireless stacks
@@ -3436,6 +3497,13 @@ struct cfg80211_pmsr_request {
* Statistics should be cumulative, currently no way to reset is provided.
* @start_pmsr: start peer measurement (e.g. FTM)
* @abort_pmsr: abort peer measurement
+ *
+ * @update_owe_info: Provide updated OWE info to driver. Driver implementing SME
+ * but offloading OWE processing to the user space will get the updated
+ * DH IE through this interface.
+ *
+ * @probe_mesh_link: Probe direct Mesh peer's link quality by sending data frame
+ * and overrule HWMP path selection algorithm.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -3750,6 +3818,10 @@ struct cfg80211_ops {
struct cfg80211_pmsr_request *request);
void (*abort_pmsr)(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_pmsr_request *request);
+ int (*update_owe_info)(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_update_owe_info *owe_info);
+ int (*probe_mesh_link)(struct wiphy *wiphy, struct net_device *dev,
+ const u8 *buf, size_t len);
};
/*
@@ -5492,6 +5564,28 @@ static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
}
/**
+ * cfg80211_is_element_inherited - returns if element ID should be inherited
+ * @element: element to check
+ * @non_inherit_element: non inheritance element
+ */
+bool cfg80211_is_element_inherited(const struct element *element,
+ const struct element *non_inherit_element);
+
+/**
+ * cfg80211_merge_profile - merges a MBSSID profile if it is split between IEs
+ * @ie: ies
+ * @ielen: length of IEs
+ * @mbssid_elem: current MBSSID element
+ * @sub_elem: current MBSSID subelement (profile)
+ * @merged_ie: location of the merged profile
+ * @max_copy_len: max merged profile length
+ */
+size_t cfg80211_merge_profile(const u8 *ie, size_t ielen,
+ const struct element *mbssid_elem,
+ const struct element *sub_elem,
+ u8 *merged_ie, size_t max_copy_len);
+
+/**
* enum cfg80211_bss_frame_type - frame type that the BSS data came from
* @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is
* from a beacon or probe response
@@ -7183,6 +7277,11 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
#define wiphy_info(wiphy, format, args...) \
dev_info(&(wiphy)->dev, format, ##args)
+#define wiphy_err_ratelimited(wiphy, format, args...) \
+ dev_err_ratelimited(&(wiphy)->dev, format, ##args)
+#define wiphy_warn_ratelimited(wiphy, format, args...) \
+ dev_warn_ratelimited(&(wiphy)->dev, format, ##args)
+
#define wiphy_debug(wiphy, format, args...) \
wiphy_printk(KERN_DEBUG, wiphy, format, ##args)
@@ -7208,4 +7307,14 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
#define wiphy_WARN(wiphy, format, args...) \
WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args);
+/**
+ * cfg80211_update_owe_info_event - Notify the peer's OWE info to user space
+ * @netdev: network device
+ * @owe_info: peer's owe info
+ * @gfp: allocation flags
+ */
+void cfg80211_update_owe_info_event(struct net_device *netdev,
+ struct cfg80211_update_owe_info *owe_info,
+ gfp_t gfp);
+
#endif /* __NET_CFG80211_H */
diff --git a/include/net/compat.h b/include/net/compat.h
index 4c6d75612b6c..f277653c7e17 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -30,9 +30,6 @@ struct compat_cmsghdr {
compat_int_t cmsg_type;
};
-int compat_sock_get_timestamp(struct sock *, struct timeval __user *);
-int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
-
#else /* defined(CONFIG_COMPAT) */
/*
* To avoid compiler warnings:
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 63de99e09f04..1c4adfb4195a 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -16,6 +16,7 @@
#include <linux/gfp.h>
#include <linux/list.h>
#include <linux/netdevice.h>
+#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <uapi/linux/devlink.h>
@@ -31,6 +32,7 @@ struct devlink {
struct list_head region_list;
u32 snapshot_id;
struct list_head reporter_list;
+ struct mutex reporters_lock; /* protects reporter_list */
struct devlink_dpipe_headers *dpipe_headers;
const struct devlink_ops *ops;
struct device *dev;
@@ -40,11 +42,13 @@ struct devlink {
};
struct devlink_port_attrs {
- bool set;
+ u8 set:1,
+ split:1,
+ switch_port:1;
enum devlink_port_flavour flavour;
u32 port_number; /* same value as "split group" */
- bool split;
u32 split_subport_number;
+ struct netdev_phys_item_id switch_id;
};
struct devlink_port {
@@ -53,6 +57,9 @@ struct devlink_port {
struct devlink *devlink;
unsigned index;
bool registered;
+ spinlock_t type_lock; /* Protects type and type_dev
+ * pointer consistency.
+ */
enum devlink_port_type type;
enum devlink_port_type desired_type;
void *type_dev;
@@ -485,13 +492,14 @@ struct devlink_ops {
struct devlink_sb_pool_info *pool_info);
int (*sb_pool_set)(struct devlink *devlink, unsigned int sb_index,
u16 pool_index, u32 size,
- enum devlink_sb_threshold_type threshold_type);
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack);
int (*sb_port_pool_get)(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
u32 *p_threshold);
int (*sb_port_pool_set)(struct devlink_port *devlink_port,
unsigned int sb_index, u16 pool_index,
- u32 threshold);
+ u32 threshold, struct netlink_ext_ack *extack);
int (*sb_tc_pool_bind_get)(struct devlink_port *devlink_port,
unsigned int sb_index,
u16 tc_index,
@@ -501,7 +509,8 @@ struct devlink_ops {
unsigned int sb_index,
u16 tc_index,
enum devlink_sb_pool_type pool_type,
- u16 pool_index, u32 threshold);
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack);
int (*sb_occ_snapshot)(struct devlink *devlink,
unsigned int sb_index);
int (*sb_occ_max_clear)(struct devlink *devlink,
@@ -543,19 +552,25 @@ static inline struct devlink *priv_to_devlink(void *priv)
return container_of(priv, struct devlink, priv);
}
+static inline struct devlink_port *
+netdev_to_devlink_port(struct net_device *dev)
+{
+ if (dev->netdev_ops->ndo_get_devlink_port)
+ return dev->netdev_ops->ndo_get_devlink_port(dev);
+ return NULL;
+}
+
static inline struct devlink *netdev_to_devlink(struct net_device *dev)
{
-#if IS_ENABLED(CONFIG_NET_DEVLINK)
- if (dev->netdev_ops->ndo_get_devlink)
- return dev->netdev_ops->ndo_get_devlink(dev);
-#endif
+ struct devlink_port *devlink_port = netdev_to_devlink_port(dev);
+
+ if (devlink_port)
+ return devlink_port->devlink;
return NULL;
}
struct ib_device;
-#if IS_ENABLED(CONFIG_NET_DEVLINK)
-
struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
int devlink_register(struct devlink *devlink, struct device *dev);
void devlink_unregister(struct devlink *devlink);
@@ -572,9 +587,9 @@ void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_attrs_set(struct devlink_port *devlink_port,
enum devlink_port_flavour flavour,
u32 port_number, bool split,
- u32 split_subport_number);
-int devlink_port_get_phys_port_name(struct devlink_port *devlink_port,
- char *name, size_t len);
+ u32 split_subport_number,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
@@ -724,510 +739,43 @@ void
devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
enum devlink_health_reporter_state state);
+#if IS_ENABLED(CONFIG_NET_DEVLINK)
+
void devlink_compat_running_version(struct net_device *dev,
char *buf, size_t len);
int devlink_compat_flash_update(struct net_device *dev, const char *file_name);
+int devlink_compat_phys_port_name_get(struct net_device *dev,
+ char *name, size_t len);
+int devlink_compat_switch_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
#else
-static inline struct devlink *devlink_alloc(const struct devlink_ops *ops,
- size_t priv_size)
-{
- return kzalloc(sizeof(struct devlink) + priv_size, GFP_KERNEL);
-}
-
-static inline int devlink_register(struct devlink *devlink, struct device *dev)
-{
- return 0;
-}
-
-static inline void devlink_unregister(struct devlink *devlink)
-{
-}
-
-static inline void devlink_params_publish(struct devlink *devlink)
-{
-}
-
-static inline void devlink_params_unpublish(struct devlink *devlink)
-{
-}
-
-static inline void devlink_free(struct devlink *devlink)
-{
- kfree(devlink);
-}
-
-static inline int devlink_port_register(struct devlink *devlink,
- struct devlink_port *devlink_port,
- unsigned int port_index)
-{
- return 0;
-}
-
-static inline void devlink_port_unregister(struct devlink_port *devlink_port)
-{
-}
-
-static inline void devlink_port_type_eth_set(struct devlink_port *devlink_port,
- struct net_device *netdev)
-{
-}
-
-static inline void devlink_port_type_ib_set(struct devlink_port *devlink_port,
- struct ib_device *ibdev)
-{
-}
-
-static inline void devlink_port_type_clear(struct devlink_port *devlink_port)
-{
-}
-
-static inline void devlink_port_attrs_set(struct devlink_port *devlink_port,
- enum devlink_port_flavour flavour,
- u32 port_number, bool split,
- u32 split_subport_number)
-{
-}
-
-static inline int
-devlink_port_get_phys_port_name(struct devlink_port *devlink_port,
- char *name, size_t len)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int devlink_sb_register(struct devlink *devlink,
- unsigned int sb_index, u32 size,
- u16 ingress_pools_count,
- u16 egress_pools_count,
- u16 ingress_tc_count,
- u16 egress_tc_count)
-{
- return 0;
-}
-
-static inline void devlink_sb_unregister(struct devlink *devlink,
- unsigned int sb_index)
-{
-}
-
-static inline int
-devlink_dpipe_table_register(struct devlink *devlink,
- const char *table_name,
- struct devlink_dpipe_table_ops *table_ops,
- void *priv, bool counter_control_extern)
-{
- return 0;
-}
-
-static inline void devlink_dpipe_table_unregister(struct devlink *devlink,
- const char *table_name)
-{
-}
-
-static inline int devlink_dpipe_headers_register(struct devlink *devlink,
- struct devlink_dpipe_headers *
- dpipe_headers)
-{
- return 0;
-}
-
-static inline void devlink_dpipe_headers_unregister(struct devlink *devlink)
-{
-}
-
-static inline bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
- const char *table_name)
-{
- return false;
-}
-
-static inline int
-devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx)
-{
- return 0;
-}
-
-static inline int
-devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
- struct devlink_dpipe_entry *entry)
-{
- return 0;
-}
-
-static inline int
-devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx)
-{
- return 0;
-}
-
-static inline void
-devlink_dpipe_entry_clear(struct devlink_dpipe_entry *entry)
-{
-}
-
-static inline int
-devlink_dpipe_action_put(struct sk_buff *skb,
- struct devlink_dpipe_action *action)
-{
- return 0;
-}
-
-static inline int
-devlink_dpipe_match_put(struct sk_buff *skb,
- struct devlink_dpipe_match *match)
-{
- return 0;
-}
-
-static inline int
-devlink_resource_register(struct devlink *devlink,
- const char *resource_name,
- u64 resource_size,
- u64 resource_id,
- u64 parent_resource_id,
- const struct devlink_resource_size_params *size_params)
-{
- return 0;
-}
-
static inline void
-devlink_resources_unregister(struct devlink *devlink,
- struct devlink_resource *resource)
-{
-}
-
-static inline int
-devlink_resource_size_get(struct devlink *devlink, u64 resource_id,
- u64 *p_resource_size)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int
-devlink_dpipe_table_resource_set(struct devlink *devlink,
- const char *table_name, u64 resource_id,
- u64 resource_units)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void
-devlink_resource_occ_get_register(struct devlink *devlink,
- u64 resource_id,
- devlink_resource_occ_get_t *occ_get,
- void *occ_get_priv)
-{
-}
-
-static inline void
-devlink_resource_occ_get_unregister(struct devlink *devlink,
- u64 resource_id)
-{
-}
-
-static inline int
-devlink_params_register(struct devlink *devlink,
- const struct devlink_param *params,
- size_t params_count)
-{
- return 0;
-}
-
-static inline void
-devlink_params_unregister(struct devlink *devlink,
- const struct devlink_param *params,
- size_t params_count)
-{
-
-}
-
-static inline int
-devlink_port_params_register(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count)
-{
- return 0;
-}
-
-static inline void
-devlink_port_params_unregister(struct devlink_port *devlink_port,
- const struct devlink_param *params,
- size_t params_count)
-{
-}
-
-static inline int
-devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
- union devlink_param_value *init_val)
+devlink_compat_running_version(struct net_device *dev, char *buf, size_t len)
{
- return -EOPNOTSUPP;
}
static inline int
-devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
- union devlink_param_value init_val)
+devlink_compat_flash_update(struct net_device *dev, const char *file_name)
{
return -EOPNOTSUPP;
}
static inline int
-devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
- u32 param_id,
- union devlink_param_value *init_val)
+devlink_compat_phys_port_name_get(struct net_device *dev,
+ char *name, size_t len)
{
return -EOPNOTSUPP;
}
static inline int
-devlink_port_param_driverinit_value_set(struct devlink_port *devlink_port,
- u32 param_id,
- union devlink_param_value init_val)
+devlink_compat_switch_id_get(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
return -EOPNOTSUPP;
}
-static inline void
-devlink_param_value_changed(struct devlink *devlink, u32 param_id)
-{
-}
-
-static inline void
-devlink_port_param_value_changed(struct devlink_port *devlink_port,
- u32 param_id)
-{
-}
-
-static inline void
-devlink_param_value_str_fill(union devlink_param_value *dst_val,
- const char *src)
-{
-}
-
-static inline struct devlink_region *
-devlink_region_create(struct devlink *devlink,
- const char *region_name,
- u32 region_max_snapshots,
- u64 region_size)
-{
- return NULL;
-}
-
-static inline void
-devlink_region_destroy(struct devlink_region *region)
-{
-}
-
-static inline u32
-devlink_region_shapshot_id_get(struct devlink *devlink)
-{
- return 0;
-}
-
-static inline int
-devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
- u8 *data, u32 snapshot_id,
- devlink_snapshot_data_dest_t *data_destructor)
-{
- return 0;
-}
-
-static inline int
-devlink_info_driver_name_put(struct devlink_info_req *req, const char *name)
-{
- return 0;
-}
-
-static inline int
-devlink_info_serial_number_put(struct devlink_info_req *req, const char *sn)
-{
- return 0;
-}
-
-static inline int
-devlink_info_version_fixed_put(struct devlink_info_req *req,
- const char *version_name,
- const char *version_value)
-{
- return 0;
-}
-
-static inline int
-devlink_info_version_stored_put(struct devlink_info_req *req,
- const char *version_name,
- const char *version_value)
-{
- return 0;
-}
-
-static inline int
-devlink_info_version_running_put(struct devlink_info_req *req,
- const char *version_name,
- const char *version_value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_obj_nest_start(struct devlink_fmsg *fmsg)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_obj_nest_end(struct devlink_fmsg *fmsg)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_pair_nest_start(struct devlink_fmsg *fmsg, const char *name)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_pair_nest_end(struct devlink_fmsg *fmsg)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_arr_pair_nest_start(struct devlink_fmsg *fmsg,
- const char *name)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_arr_pair_nest_end(struct devlink_fmsg *fmsg)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_bool_put(struct devlink_fmsg *fmsg, bool value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
- u16 value_len)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
- bool value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_u8_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u8 value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_u32_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u32 value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
- u64 value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const char *value)
-{
- return 0;
-}
-
-static inline int
-devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const void *value, u16 value_len)
-{
- return 0;
-}
-
-static inline struct devlink_health_reporter *
-devlink_health_reporter_create(struct devlink *devlink,
- const struct devlink_health_reporter_ops *ops,
- u64 graceful_period, bool auto_recover,
- void *priv)
-{
- return NULL;
-}
-
-static inline void
-devlink_health_reporter_destroy(struct devlink_health_reporter *reporter)
-{
-}
-
-static inline void *
-devlink_health_reporter_priv(struct devlink_health_reporter *reporter)
-{
- return NULL;
-}
-
-static inline int
-devlink_health_report(struct devlink_health_reporter *reporter,
- const char *msg, void *priv_ctx)
-{
- return 0;
-}
-
-static inline void
-devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
- enum devlink_health_reporter_state state)
-{
-}
-
-static inline void
-devlink_compat_running_version(struct net_device *dev, char *buf, size_t len)
-{
-}
-
-static inline int
-devlink_compat_flash_update(struct net_device *dev, const char *file_name)
-{
- return -EOPNOTSUPP;
-}
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index ae480bba11f5..685294817712 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -30,20 +30,36 @@ struct phy_device;
struct fixed_phy_status;
struct phylink_link_state;
+#define DSA_TAG_PROTO_NONE_VALUE 0
+#define DSA_TAG_PROTO_BRCM_VALUE 1
+#define DSA_TAG_PROTO_BRCM_PREPEND_VALUE 2
+#define DSA_TAG_PROTO_DSA_VALUE 3
+#define DSA_TAG_PROTO_EDSA_VALUE 4
+#define DSA_TAG_PROTO_GSWIP_VALUE 5
+#define DSA_TAG_PROTO_KSZ9477_VALUE 6
+#define DSA_TAG_PROTO_KSZ9893_VALUE 7
+#define DSA_TAG_PROTO_LAN9303_VALUE 8
+#define DSA_TAG_PROTO_MTK_VALUE 9
+#define DSA_TAG_PROTO_QCA_VALUE 10
+#define DSA_TAG_PROTO_TRAILER_VALUE 11
+#define DSA_TAG_PROTO_8021Q_VALUE 12
+#define DSA_TAG_PROTO_SJA1105_VALUE 13
+
enum dsa_tag_protocol {
- DSA_TAG_PROTO_NONE = 0,
- DSA_TAG_PROTO_BRCM,
- DSA_TAG_PROTO_BRCM_PREPEND,
- DSA_TAG_PROTO_DSA,
- DSA_TAG_PROTO_EDSA,
- DSA_TAG_PROTO_GSWIP,
- DSA_TAG_PROTO_KSZ9477,
- DSA_TAG_PROTO_KSZ9893,
- DSA_TAG_PROTO_LAN9303,
- DSA_TAG_PROTO_MTK,
- DSA_TAG_PROTO_QCA,
- DSA_TAG_PROTO_TRAILER,
- DSA_TAG_LAST, /* MUST BE LAST */
+ DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
+ DSA_TAG_PROTO_BRCM = DSA_TAG_PROTO_BRCM_VALUE,
+ DSA_TAG_PROTO_BRCM_PREPEND = DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
+ DSA_TAG_PROTO_DSA = DSA_TAG_PROTO_DSA_VALUE,
+ DSA_TAG_PROTO_EDSA = DSA_TAG_PROTO_EDSA_VALUE,
+ DSA_TAG_PROTO_GSWIP = DSA_TAG_PROTO_GSWIP_VALUE,
+ DSA_TAG_PROTO_KSZ9477 = DSA_TAG_PROTO_KSZ9477_VALUE,
+ DSA_TAG_PROTO_KSZ9893 = DSA_TAG_PROTO_KSZ9893_VALUE,
+ DSA_TAG_PROTO_LAN9303 = DSA_TAG_PROTO_LAN9303_VALUE,
+ DSA_TAG_PROTO_MTK = DSA_TAG_PROTO_MTK_VALUE,
+ DSA_TAG_PROTO_QCA = DSA_TAG_PROTO_QCA_VALUE,
+ DSA_TAG_PROTO_TRAILER = DSA_TAG_PROTO_TRAILER_VALUE,
+ DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE,
+ DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
};
struct packet_type;
@@ -55,9 +71,37 @@ struct dsa_device_ops {
struct packet_type *pt);
int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
int *offset);
+ /* Used to determine which traffic should match the DSA filter in
+ * eth_type_trans, and which, if any, should bypass it and be processed
+ * as regular on the master net device.
+ */
+ bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
unsigned int overhead;
+ const char *name;
+ enum dsa_tag_protocol proto;
+};
+
+#define DSA_TAG_DRIVER_ALIAS "dsa_tag-"
+#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto) \
+ MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
+
+struct dsa_skb_cb {
+ struct sk_buff *clone;
+ bool deferred_xmit;
+};
+
+struct __dsa_skb_cb {
+ struct dsa_skb_cb cb;
+ u8 priv[48 - sizeof(struct dsa_skb_cb)];
};
+#define __DSA_SKB_CB(skb) ((struct __dsa_skb_cb *)((skb)->cb))
+
+#define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb))
+
+#define DSA_SKB_CB_PRIV(skb) \
+ ((void *)(skb)->cb + offsetof(struct __dsa_skb_cb, priv))
+
struct dsa_switch_tree {
struct list_head list;
@@ -128,6 +172,7 @@ struct dsa_port {
struct dsa_switch_tree *dst;
struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt);
+ bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
enum {
DSA_PORT_TYPE_UNUSED = 0,
@@ -140,12 +185,24 @@ struct dsa_port {
unsigned int index;
const char *name;
const struct dsa_port *cpu_dp;
+ const char *mac;
struct device_node *dn;
unsigned int ageing_time;
+ bool vlan_filtering;
u8 stp_state;
struct net_device *bridge_dev;
struct devlink_port devlink_port;
struct phylink *pl;
+
+ struct work_struct xmit_work;
+ struct sk_buff_head xmit_queue;
+
+ /*
+ * Give the switch driver somewhere to hang its per-port private data
+ * structures (accessible from the tagger).
+ */
+ void *priv;
+
/*
* Original copy of the master netdev ethtool_ops
*/
@@ -208,6 +265,16 @@ struct dsa_switch {
/* Number of switch port queues */
unsigned int num_tx_queues;
+ /* Disallow bridge core from requesting different VLAN awareness
+ * settings on ports if not hardware-supported
+ */
+ bool vlan_filtering_is_global;
+
+ /* In case vlan_filtering_is_global is set, the VLAN awareness state
+ * should be retrieved from here and not from the per-port settings.
+ */
+ bool vlan_filtering;
+
unsigned long *bitmap;
unsigned long _bitmap;
@@ -275,18 +342,19 @@ static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
}
+static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
+{
+ const struct dsa_switch *ds = dp->ds;
+
+ if (ds->vlan_filtering_is_global)
+ return ds->vlan_filtering;
+ else
+ return dp->vlan_filtering;
+}
+
typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
bool is_static, void *data);
struct dsa_switch_ops {
-#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
- /*
- * Legacy probing.
- */
- const char *(*probe)(struct device *dsa_dev,
- struct device *host_dev, int sw_addr,
- void **priv);
-#endif
-
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
int port);
@@ -469,6 +537,12 @@ struct dsa_switch_ops {
struct sk_buff *clone, unsigned int type);
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
+
+ /*
+ * Deferred frame Tx
+ */
+ netdev_tx_t (*port_deferred_xmit)(struct dsa_switch *ds, int port,
+ struct sk_buff *skb);
};
struct dsa_switch_driver {
@@ -476,20 +550,6 @@ struct dsa_switch_driver {
const struct dsa_switch_ops *ops;
};
-#if IS_ENABLED(CONFIG_NET_DSA_LEGACY)
-/* Legacy driver registration */
-void register_switch_driver(struct dsa_switch_driver *type);
-void unregister_switch_driver(struct dsa_switch_driver *type);
-struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
-
-#else
-static inline void register_switch_driver(struct dsa_switch_driver *type) { }
-static inline void unregister_switch_driver(struct dsa_switch_driver *type) { }
-static inline struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev)
-{
- return NULL;
-}
-#endif
struct net_device *dsa_dev_to_net_device(struct device *dev);
/* Keep inline for faster access in hot path */
@@ -501,6 +561,15 @@ static inline bool netdev_uses_dsa(struct net_device *dev)
return false;
}
+static inline bool dsa_can_decode(const struct sk_buff *skb,
+ struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ return !dev->dsa_ptr->filter || dev->dsa_ptr->filter(skb, dev);
+#endif
+ return false;
+}
+
struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n);
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
@@ -569,9 +638,76 @@ static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev,
#define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
+netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data);
int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data);
int dsa_port_get_phy_sset_count(struct dsa_port *dp);
void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
+struct dsa_tag_driver {
+ const struct dsa_device_ops *ops;
+ struct list_head list;
+ struct module *owner;
+};
+
+void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
+ unsigned int count,
+ struct module *owner);
+void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
+ unsigned int count);
+
+#define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count) \
+static int __init dsa_tag_driver_module_init(void) \
+{ \
+ dsa_tag_drivers_register(__dsa_tag_drivers_array, __count, \
+ THIS_MODULE); \
+ return 0; \
+} \
+module_init(dsa_tag_driver_module_init); \
+ \
+static void __exit dsa_tag_driver_module_exit(void) \
+{ \
+ dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count); \
+} \
+module_exit(dsa_tag_driver_module_exit)
+
+/**
+ * module_dsa_tag_drivers() - Helper macro for registering DSA tag
+ * drivers
+ * @__ops_array: Array of tag driver strucutres
+ *
+ * Helper macro for DSA tag drivers which do not do anything special
+ * in module init/exit. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dsa_tag_drivers(__ops_array) \
+dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
+
+#define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
+
+/* Create a static structure we can build a linked list of dsa_tag
+ * drivers
+ */
+#define DSA_TAG_DRIVER(__ops) \
+static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = { \
+ .ops = &__ops, \
+}
+
+/**
+ * module_dsa_tag_driver() - Helper macro for registering a single DSA tag
+ * driver
+ * @__ops: Single tag driver structures
+ *
+ * Helper macro for DSA tag drivers which do not do anything special
+ * in module init/exit. Each module may only use this macro once, and
+ * calling it replaces module_init() and module_exit().
+ */
+#define module_dsa_tag_driver(__ops) \
+DSA_TAG_DRIVER(__ops); \
+ \
+static struct dsa_tag_driver *dsa_tag_driver_array[] = { \
+ &DSA_TAG_DRIVER_NAME(__ops) \
+}; \
+module_dsa_tag_drivers(dsa_tag_driver_array)
#endif
+
diff --git a/include/net/dst.h b/include/net/dst.h
index 6cf0870414c7..12b31c602cb0 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -19,17 +19,6 @@
#include <net/neighbour.h>
#include <asm/processor.h>
-#define DST_GC_MIN (HZ/10)
-#define DST_GC_INC (HZ/2)
-#define DST_GC_MAX (120*HZ)
-
-/* Each dst_entry has reference count and sits in some parent list(s).
- * When it is removed from parent list, it is "freed" (dst_free).
- * After this it enters dead state (dst->obsolete > 0) and if its refcnt
- * is zero, it can be destroyed immediately, otherwise it is added
- * to gc list and garbage collector periodically checks the refcnt.
- */
-
struct sk_buff;
struct dst_entry {
diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h
index c91ec732afd6..c49d7bfb5c30 100644
--- a/include/net/fib_notifier.h
+++ b/include/net/fib_notifier.h
@@ -2,10 +2,11 @@
#define __NET_FIB_NOTIFIER_H
#include <linux/types.h>
-#include <linux/module.h>
#include <linux/notifier.h>
#include <net/net_namespace.h>
+struct module;
+
struct fib_notifier_info {
struct net *net;
int family;
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 2b26979efb48..7c5a8d9a8d2a 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -305,4 +305,11 @@ static inline void *skb_flow_dissector_target(struct flow_dissector *flow_dissec
return ((char *)target_container) + flow_dissector->offset[key_id];
}
+struct bpf_flow_dissector {
+ struct bpf_flow_keys *flow_keys;
+ const struct sk_buff *skb;
+ void *data;
+ void *data_end;
+};
+
#endif
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index d035183c8d03..6200900434e1 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -118,6 +118,8 @@ enum flow_action_id {
FLOW_ACTION_MARK,
FLOW_ACTION_WAKE,
FLOW_ACTION_QUEUE,
+ FLOW_ACTION_SAMPLE,
+ FLOW_ACTION_POLICE,
};
/* This is mirroring enum pedit_header_type definition for easy mapping between
@@ -157,6 +159,16 @@ struct flow_action_entry {
u32 index;
u8 vf;
} queue;
+ struct { /* FLOW_ACTION_SAMPLE */
+ struct psample_group *psample_group;
+ u32 rate;
+ u32 trunc_size;
+ bool truncate;
+ } sample;
+ struct { /* FLOW_ACTION_POLICE */
+ s64 burst;
+ u64 rate_bytes_ps;
+ } police;
};
};
@@ -170,6 +182,17 @@ static inline bool flow_action_has_entries(const struct flow_action *action)
return action->num_entries;
}
+/**
+ * flow_action_has_one_action() - check if exactly one action is present
+ * @action: tc filter flow offload action
+ *
+ * Returns true if exactly one action is present.
+ */
+static inline bool flow_offload_has_one_action(const struct flow_action *action)
+{
+ return action->num_entries == 1;
+}
+
#define flow_action_for_each(__i, __act, __actions) \
for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i])
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index be7c0fab3478..2caa86660ab0 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -107,21 +107,23 @@ begin:
return skb;
}
+static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
+{
+ u32 hash = skb_get_hash_perturb(skb, fq->perturbation);
+
+ return reciprocal_scale(hash, fq->flows_cnt);
+}
+
static struct fq_flow *fq_flow_classify(struct fq *fq,
- struct fq_tin *tin,
+ struct fq_tin *tin, u32 idx,
struct sk_buff *skb,
fq_flow_get_default_t get_default_func)
{
struct fq_flow *flow;
- u32 hash;
- u32 idx;
lockdep_assert_held(&fq->lock);
- hash = skb_get_hash_perturb(skb, fq->perturbation);
- idx = reciprocal_scale(hash, fq->flows_cnt);
flow = &fq->flows[idx];
-
if (flow->tin && flow->tin != tin) {
flow = get_default_func(fq, tin, idx, skb);
tin->collisions++;
@@ -153,7 +155,7 @@ static void fq_recalc_backlog(struct fq *fq,
}
static void fq_tin_enqueue(struct fq *fq,
- struct fq_tin *tin,
+ struct fq_tin *tin, u32 idx,
struct sk_buff *skb,
fq_skb_free_t free_func,
fq_flow_get_default_t get_default_func)
@@ -163,7 +165,7 @@ static void fq_tin_enqueue(struct fq *fq,
lockdep_assert_held(&fq->lock);
- flow = fq_flow_classify(fq, tin, skb, get_default_func);
+ flow = fq_flow_classify(fq, tin, idx, skb, get_default_func);
flow->tin = tin;
flow->backlog += skb->len;
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index aa2e5888f18d..9292f1c588b7 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -26,6 +26,7 @@ struct genl_info;
* @name: name of family
* @version: protocol version
* @maxattr: maximum number of attributes supported
+ * @policy: netlink policy
* @netnsok: set to true if the family can handle network
* namespaces and should be presented in all of them
* @parallel_ops: operations can be called in parallel and aren't
@@ -56,6 +57,7 @@ struct genl_family {
unsigned int maxattr;
bool netnsok;
bool parallel_ops;
+ const struct nla_policy *policy;
int (*pre_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
@@ -119,19 +121,23 @@ static inline int genl_err_attr(struct genl_info *info, int err,
return err;
}
+enum genl_validate_flags {
+ GENL_DONT_VALIDATE_STRICT = BIT(0),
+ GENL_DONT_VALIDATE_DUMP = BIT(1),
+ GENL_DONT_VALIDATE_DUMP_STRICT = BIT(2),
+};
+
/**
* struct genl_ops - generic netlink operations
* @cmd: command identifier
* @internal_flags: flags used by the family
* @flags: flags
- * @policy: attribute validation policy
* @doit: standard command callback
* @start: start callback for dumps
* @dumpit: callback for dumpers
* @done: completion callback for dumps
*/
struct genl_ops {
- const struct nla_policy *policy;
int (*doit)(struct sk_buff *skb,
struct genl_info *info);
int (*start)(struct netlink_callback *cb);
@@ -141,6 +147,7 @@ struct genl_ops {
u8 cmd;
u8 internal_flags;
u8 flags;
+ u8 validate;
};
int genl_register_family(struct genl_family *family);
@@ -165,6 +172,25 @@ static inline struct nlmsghdr *genlmsg_nlhdr(void *user_hdr)
}
/**
+ * genlmsg_parse_deprecated - parse attributes of a genetlink message
+ * @nlh: netlink message header
+ * @family: genetlink message family
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ */
+static inline int genlmsg_parse_deprecated(const struct nlmsghdr *nlh,
+ const struct genl_family *family,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
+ policy, NL_VALIDATE_LIBERAL, extack);
+}
+
+/**
* genlmsg_parse - parse attributes of a genetlink message
* @nlh: netlink message header
* @family: genetlink message family
@@ -179,8 +205,8 @@ static inline int genlmsg_parse(const struct nlmsghdr *nlh,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
- return nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
- policy, extack);
+ return __nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
+ policy, NL_VALIDATE_STRICT, extack);
}
/**
diff --git a/include/net/geneve.h b/include/net/geneve.h
index fc6a7e0a874a..bced0b1d9fe4 100644
--- a/include/net/geneve.h
+++ b/include/net/geneve.h
@@ -4,6 +4,8 @@
#include <net/udp_tunnel.h>
+#define GENEVE_UDP_PORT 6081
+
/* Geneve Header:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |Ver| Opt Len |O|C| Rsvd. | Protocol Type |
diff --git a/include/net/ife.h b/include/net/ife.h
index e117617e3c34..7e2538d8585b 100644
--- a/include/net/ife.h
+++ b/include/net/ife.h
@@ -4,7 +4,6 @@
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
-#include <linux/module.h>
#include <uapi/linux/ife.h>
#if IS_ENABLED(CONFIG_NET_IFE)
diff --git a/include/net/ip.h b/include/net/ip.h
index be3cad9c2e4c..2d3cce7c3e8a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -38,6 +38,10 @@
#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
#define IPV4_MIN_MTU 68 /* RFC 791 */
+extern unsigned int sysctl_fib_sync_mem;
+extern unsigned int sysctl_fib_sync_mem_min;
+extern unsigned int sysctl_fib_sync_mem_max;
+
struct sock;
struct inet_skb_parm {
@@ -677,7 +681,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
unsigned char __user *data, int optlen);
void ip_options_undo(struct ip_options *opt);
void ip_forward_options(struct sk_buff *skb);
-int ip_options_rcv_srr(struct sk_buff *skb);
+int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
/*
* Functions provided by ip_sockglue.c
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 84097010237c..40105738e2f6 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -19,6 +19,7 @@
#include <linux/notifier.h>
#include <net/dst.h>
#include <net/flow.h>
+#include <net/ip_fib.h>
#include <net/netlink.h>
#include <net/inetpeer.h>
#include <net/fib_notifier.h>
@@ -50,7 +51,8 @@ struct fib6_config {
u32 fc_protocol;
u16 fc_type; /* only 8 bits are used */
u16 fc_delete_all_nh : 1,
- __unused : 15;
+ fc_ignore_dev_down:1,
+ __unused : 14;
struct in6_addr fc_dst;
struct in6_addr fc_src;
@@ -124,13 +126,11 @@ struct rt6_exception {
#define FIB6_MAX_DEPTH 5
struct fib6_nh {
- struct in6_addr nh_gw;
- struct net_device *nh_dev;
- struct lwtunnel_state *nh_lwtstate;
+ struct fib_nh_common nh_common;
- unsigned int nh_flags;
- atomic_t nh_upper_bound;
- int nh_weight;
+#ifdef CONFIG_IPV6_ROUTER_PREF
+ unsigned long last_probe;
+#endif
};
struct fib6_info {
@@ -146,7 +146,7 @@ struct fib6_info {
struct list_head fib6_siblings;
unsigned int fib6_nsiblings;
- atomic_t fib6_ref;
+ refcount_t fib6_ref;
unsigned long expires;
struct dst_metrics *fib6_metrics;
#define fib6_pmtu fib6_metrics->metrics[RTAX_MTU-1]
@@ -159,10 +159,6 @@ struct fib6_info {
struct rt6_info * __percpu *rt6i_pcpu;
struct rt6_exception_bucket __rcu *rt6i_exception_bucket;
-#ifdef CONFIG_IPV6_ROUTER_PREF
- unsigned long last_probe;
-#endif
-
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
@@ -194,6 +190,14 @@ struct rt6_info {
unsigned short rt6i_nfheader_len;
};
+struct fib6_result {
+ struct fib6_nh *nh;
+ struct fib6_info *f6i;
+ u32 fib6_flags;
+ u8 fib6_type;
+ struct rt6_info *rt6;
+};
+
#define for_each_fib6_node_rt_rcu(fn) \
for (rt = rcu_dereference((fn)->leaf); rt; \
rt = rcu_dereference(rt->fib6_next))
@@ -281,17 +285,17 @@ void fib6_info_destroy_rcu(struct rcu_head *head);
static inline void fib6_info_hold(struct fib6_info *f6i)
{
- atomic_inc(&f6i->fib6_ref);
+ refcount_inc(&f6i->fib6_ref);
}
static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
{
- return atomic_inc_not_zero(&f6i->fib6_ref);
+ return refcount_inc_not_zero(&f6i->fib6_ref);
}
static inline void fib6_info_release(struct fib6_info *f6i)
{
- if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
+ if (f6i && refcount_dec_and_test(&f6i->fib6_ref))
call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
}
@@ -388,18 +392,17 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
/* called with rcu lock held; can return error pointer
* caller needs to select path
*/
-struct fib6_info *fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
- int flags);
+int fib6_lookup(struct net *net, int oif, struct flowi6 *fl6,
+ struct fib6_result *res, int flags);
/* called with rcu lock held; caller needs to select path */
-struct fib6_info *fib6_table_lookup(struct net *net, struct fib6_table *table,
- int oif, struct flowi6 *fl6, int strict);
-
-struct fib6_info *fib6_multipath_select(const struct net *net,
- struct fib6_info *match,
- struct flowi6 *fl6, int oif,
- const struct sk_buff *skb, int strict);
+int fib6_table_lookup(struct net *net, struct fib6_table *table,
+ int oif, struct flowi6 *fl6, struct fib6_result *res,
+ int strict);
+void fib6_select_path(const struct net *net, struct fib6_result *res,
+ struct flowi6 *fl6, int oif, bool have_oif_match,
+ const struct sk_buff *skb, int strict);
struct fib6_node *fib6_node_lookup(struct fib6_node *root,
const struct in6_addr *daddr,
const struct in6_addr *saddr);
@@ -440,14 +443,13 @@ void rt6_get_prefsrc(const struct rt6_info *rt, struct in6_addr *addr)
static inline struct net_device *fib6_info_nh_dev(const struct fib6_info *f6i)
{
- return f6i->fib6_nh.nh_dev;
+ return f6i->fib6_nh.fib_nh_dev;
}
-static inline
-struct lwtunnel_state *fib6_info_nh_lwt(const struct fib6_info *f6i)
-{
- return f6i->fib6_nh.nh_lwtstate;
-}
+int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ struct fib6_config *cfg, gfp_t gfp_flags,
+ struct netlink_ext_ack *extack);
+void fib6_nh_release(struct fib6_nh *fib6_nh);
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
unsigned int flags);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 7ab119936e69..4790beaa86e0 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -68,8 +68,9 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
{
- return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
- RTF_GATEWAY;
+ /* the RTF_ADDRCONF flag filters out RA's */
+ return !(f6i->fib6_flags & RTF_ADDRCONF) &&
+ f6i->fib6_nh.fib_nh_gw_family;
}
void ip6_route_input(struct sk_buff *skb);
@@ -181,7 +182,7 @@ int rt6_dump_route(struct fib6_info *f6i, void *p_arg);
void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
void rt6_clean_tohost(struct net *net, struct in6_addr *gateway);
-void rt6_sync_up(struct net_device *dev, unsigned int nh_flags);
+void rt6_sync_up(struct net_device *dev, unsigned char nh_flags);
void rt6_disable_ip(struct net_device *dev, unsigned long event);
void rt6_sync_down_dev(struct net_device *dev, unsigned long event);
void rt6_multipath_rebalance(struct fib6_info *f6i);
@@ -274,9 +275,11 @@ static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt,
static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b)
{
- return a->fib6_nh.nh_dev == b->fib6_nh.nh_dev &&
- ipv6_addr_equal(&a->fib6_nh.nh_gw, &b->fib6_nh.nh_gw) &&
- !lwtunnel_cmp_encap(a->fib6_nh.nh_lwtstate, b->fib6_nh.nh_lwtstate);
+ struct fib6_nh *nha = &a->fib6_nh, *nhb = &b->fib6_nh;
+
+ return nha->fib_nh_dev == nhb->fib_nh_dev &&
+ ipv6_addr_equal(&nha->fib_nh_gw6, &nhb->fib_nh_gw6) &&
+ !lwtunnel_cmp_encap(nha->fib_nh_lws, nhb->fib_nh_lws);
}
static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
@@ -300,8 +303,9 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
return mtu;
}
-u32 ip6_mtu_from_fib6(struct fib6_info *f6i, struct in6_addr *daddr,
- struct in6_addr *saddr);
+u32 ip6_mtu_from_fib6(const struct fib6_result *res,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
struct net_device *dev, struct sk_buff *skb,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 9c8214d2116d..d0e28f4ab099 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -32,10 +32,14 @@ struct fib_config {
u8 fc_protocol;
u8 fc_scope;
u8 fc_type;
- /* 3 bytes unused */
+ u8 fc_gw_family;
+ /* 2 bytes unused */
u32 fc_table;
__be32 fc_dst;
- __be32 fc_gw;
+ union {
+ __be32 fc_gw4;
+ struct in6_addr fc_gw6;
+ };
int fc_oif;
u32 fc_flags;
u32 fc_priority;
@@ -76,27 +80,49 @@ struct fnhe_hash_bucket {
#define FNHE_HASH_SIZE (1 << FNHE_HASH_SHIFT)
#define FNHE_RECLAIM_DEPTH 5
+struct fib_nh_common {
+ struct net_device *nhc_dev;
+ int nhc_oif;
+ unsigned char nhc_scope;
+ u8 nhc_family;
+ u8 nhc_gw_family;
+ unsigned char nhc_flags;
+ struct lwtunnel_state *nhc_lwtstate;
+
+ union {
+ __be32 ipv4;
+ struct in6_addr ipv6;
+ } nhc_gw;
+
+ int nhc_weight;
+ atomic_t nhc_upper_bound;
+
+ /* v4 specific, but allows fib6_nh with v4 routes */
+ struct rtable __rcu * __percpu *nhc_pcpu_rth_output;
+ struct rtable __rcu *nhc_rth_input;
+ struct fnhe_hash_bucket __rcu *nhc_exceptions;
+};
+
struct fib_nh {
- struct net_device *nh_dev;
+ struct fib_nh_common nh_common;
struct hlist_node nh_hash;
struct fib_info *nh_parent;
- unsigned int nh_flags;
- unsigned char nh_scope;
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
- int nh_weight;
- atomic_t nh_upper_bound;
-#endif
#ifdef CONFIG_IP_ROUTE_CLASSID
__u32 nh_tclassid;
#endif
- int nh_oif;
- __be32 nh_gw;
__be32 nh_saddr;
int nh_saddr_genid;
- struct rtable __rcu * __percpu *nh_pcpu_rth_output;
- struct rtable __rcu *nh_rth_input;
- struct fnhe_hash_bucket __rcu *nh_exceptions;
- struct lwtunnel_state *nh_lwtstate;
+#define fib_nh_family nh_common.nhc_family
+#define fib_nh_dev nh_common.nhc_dev
+#define fib_nh_oif nh_common.nhc_oif
+#define fib_nh_flags nh_common.nhc_flags
+#define fib_nh_lws nh_common.nhc_lwtstate
+#define fib_nh_scope nh_common.nhc_scope
+#define fib_nh_gw_family nh_common.nhc_gw_family
+#define fib_nh_gw4 nh_common.nhc_gw.ipv4
+#define fib_nh_gw6 nh_common.nhc_gw.ipv6
+#define fib_nh_weight nh_common.nhc_weight
+#define fib_nh_upper_bound nh_common.nhc_upper_bound
};
/*
@@ -123,9 +149,10 @@ struct fib_info {
#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
int fib_nhs;
+ bool fib_nh_is_v6;
struct rcu_head rcu;
struct fib_nh fib_nh[0];
-#define fib_dev fib_nh[0].nh_dev
+#define fib_dev fib_nh[0].fib_nh_dev
};
@@ -135,15 +162,16 @@ struct fib_rule;
struct fib_table;
struct fib_result {
- __be32 prefix;
- unsigned char prefixlen;
- unsigned char nh_sel;
- unsigned char type;
- unsigned char scope;
- u32 tclassid;
- struct fib_info *fi;
- struct fib_table *table;
- struct hlist_head *fa_head;
+ __be32 prefix;
+ unsigned char prefixlen;
+ unsigned char nh_sel;
+ unsigned char type;
+ unsigned char scope;
+ u32 tclassid;
+ struct fib_nh_common *nhc;
+ struct fib_info *fi;
+ struct fib_table *table;
+ struct hlist_head *fa_head;
};
struct fib_result_nl {
@@ -161,11 +189,10 @@ struct fib_result_nl {
int err;
};
-#ifdef CONFIG_IP_ROUTE_MULTIPATH
-#define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
-#else /* CONFIG_IP_ROUTE_MULTIPATH */
-#define FIB_RES_NH(res) ((res).fi->fib_nh[0])
-#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+static inline struct fib_nh_common *fib_info_nhc(struct fib_info *fi, int nhsel)
+{
+ return &fi->fib_nh[nhsel].nh_common;
+}
#ifdef CONFIG_IP_MULTIPLE_TABLES
#define FIB_TABLE_HASHSZ 256
@@ -174,18 +201,11 @@ struct fib_result_nl {
#endif
__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+__be32 fib_result_prefsrc(struct net *net, struct fib_result *res);
-#define FIB_RES_SADDR(net, res) \
- ((FIB_RES_NH(res).nh_saddr_genid == \
- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
- FIB_RES_NH(res).nh_saddr : \
- fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
-#define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
-#define FIB_RES_DEV(res) (FIB_RES_NH(res).nh_dev)
-#define FIB_RES_OIF(res) (FIB_RES_NH(res).nh_oif)
-
-#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \
- FIB_RES_SADDR(net, res))
+#define FIB_RES_NHC(res) ((res).nhc)
+#define FIB_RES_DEV(res) (FIB_RES_NHC(res)->nhc_dev)
+#define FIB_RES_OIF(res) (FIB_RES_NHC(res)->nhc_oif)
struct fib_entry_notifier_info {
struct fib_notifier_info info; /* must be first */
@@ -383,6 +403,8 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
/* Exported by fib_frontend.c */
extern const struct nla_policy rtm_ipv4_policy[];
void ip_fib_init(void);
+int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla,
+ struct netlink_ext_ack *extack);
__be32 fib_compute_spec_dst(struct sk_buff *skb);
bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev);
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
@@ -405,7 +427,7 @@ int fib_unmerge(struct net *net);
int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net_device *dev, __be32 local);
-int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+int fib_sync_up(struct net_device *dev, unsigned char nh_flags);
void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -416,6 +438,15 @@ void fib_select_multipath(struct fib_result *res, int hash);
void fib_select_path(struct net *net, struct fib_result *res,
struct flowi4 *fl4, const struct sk_buff *skb);
+int fib_nh_init(struct net *net, struct fib_nh *fib_nh,
+ struct fib_config *cfg, int nh_weight,
+ struct netlink_ext_ack *extack);
+void fib_nh_release(struct net *net, struct fib_nh *fib_nh);
+int fib_nh_common_init(struct fib_nh_common *nhc, struct nlattr *fc_encap,
+ u16 fc_encap_type, void *cfg, gfp_t gfp_flags,
+ struct netlink_ext_ack *extack);
+void fib_nh_common_release(struct fib_nh_common *nhc);
+
/* Exported by fib_trie.c */
void fib_trie_init(void);
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
@@ -423,10 +454,12 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
{
#ifdef CONFIG_IP_ROUTE_CLASSID
+ struct fib_nh_common *nhc = res->nhc;
+ struct fib_nh *nh = container_of(nhc, struct fib_nh, nh_common);
#ifdef CONFIG_IP_MULTIPLE_TABLES
u32 rtag;
#endif
- *itag = FIB_RES_NH(*res).nh_tclassid<<16;
+ *itag = nh->nh_tclassid << 16;
#ifdef CONFIG_IP_MULTIPLE_TABLES
rtag = res->tclassid;
if (*itag == 0)
@@ -467,4 +500,9 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr);
int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
struct fib_dump_filter *filter,
struct netlink_callback *cb);
+
+int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
+ unsigned char *flags, bool skip_oif);
+int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
+ int nh_weight);
#endif /* _NET_FIB_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 047f9a5ccaad..2ac40135b576 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -600,6 +600,9 @@ struct ip_vs_dest_user_kern {
/* Address family of addr */
u16 af;
+
+ u16 tun_type; /* tunnel type */
+ __be16 tun_port; /* tunnel port */
};
@@ -660,6 +663,8 @@ struct ip_vs_dest {
atomic_t conn_flags; /* flags to copy to conn */
atomic_t weight; /* server weight */
atomic_t last_weight; /* server latest weight */
+ __u16 tun_type; /* tunnel type */
+ __be16 tun_port; /* tunnel port */
refcount_t refcnt; /* reference counter */
struct ip_vs_stats stats; /* statistics */
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index 28aa9b30aece..1f77fb4dc79d 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -94,7 +94,6 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
goto out;
head->dev = dev;
- skb_get(head);
spin_unlock(&fq->q.lock);
icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
new file mode 100644
index 000000000000..6c0c4fde16f8
--- /dev/null
+++ b/include/net/ipv6_stubs.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IPV6_STUBS_H
+#define _IPV6_STUBS_H
+
+#include <linux/in6.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/dst.h>
+#include <net/flow.h>
+#include <net/neighbour.h>
+#include <net/sock.h>
+
+/* structs from net/ip6_fib.h */
+struct fib6_info;
+struct fib6_nh;
+struct fib6_config;
+struct fib6_result;
+
+/* This is ugly, ideally these symbols should be built
+ * into the core kernel.
+ */
+struct ipv6_stub {
+ int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
+ struct dst_entry **dst, struct flowi6 *fl6);
+ int (*ipv6_route_input)(struct sk_buff *skb);
+
+ struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
+ int (*fib6_lookup)(struct net *net, int oif, struct flowi6 *fl6,
+ struct fib6_result *res, int flags);
+ int (*fib6_table_lookup)(struct net *net, struct fib6_table *table,
+ int oif, struct flowi6 *fl6,
+ struct fib6_result *res, int flags);
+ void (*fib6_select_path)(const struct net *net, struct fib6_result *res,
+ struct flowi6 *fl6, int oif, bool oif_match,
+ const struct sk_buff *skb, int strict);
+ u32 (*ip6_mtu_from_fib6)(const struct fib6_result *res,
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr);
+
+ int (*fib6_nh_init)(struct net *net, struct fib6_nh *fib6_nh,
+ struct fib6_config *cfg, gfp_t gfp_flags,
+ struct netlink_ext_ack *extack);
+ void (*fib6_nh_release)(struct fib6_nh *fib6_nh);
+ void (*udpv6_encap_enable)(void);
+ void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override, bool inc_opt);
+ struct neigh_table *nd_tbl;
+};
+extern const struct ipv6_stub *ipv6_stub __read_mostly;
+
+/* A stub used by bpf helpers. Similarly ugly as ipv6_stub */
+struct ipv6_bpf_stub {
+ int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len,
+ bool force_bind_address_no_port, bool with_lock);
+ struct sock *(*udp6_lib_lookup)(struct net *net,
+ const struct in6_addr *saddr, __be16 sport,
+ const struct in6_addr *daddr, __be16 dport,
+ int dif, int sdif, struct udp_table *tbl,
+ struct sk_buff *skb);
+};
+extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
+
+#endif
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index 671113bcb2cc..5d6c5b1fc695 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -118,8 +118,8 @@ int lwtunnel_build_state(u16 encap_type,
unsigned int family, const void *cfg,
struct lwtunnel_state **lws,
struct netlink_ext_ack *extack);
-int lwtunnel_fill_encap(struct sk_buff *skb,
- struct lwtunnel_state *lwtstate);
+int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate,
+ int encap_attr, int encap_type_attr);
int lwtunnel_get_encap_size(struct lwtunnel_state *lwtstate);
struct lwtunnel_state *lwtunnel_state_alloc(int hdr_len);
int lwtunnel_cmp_encap(struct lwtunnel_state *a, struct lwtunnel_state *b);
@@ -219,7 +219,8 @@ static inline int lwtunnel_build_state(u16 encap_type,
}
static inline int lwtunnel_fill_encap(struct sk_buff *skb,
- struct lwtunnel_state *lwtstate)
+ struct lwtunnel_state *lwtstate,
+ int encap_attr, int encap_type_attr)
{
return 0;
}
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index ac2ed8ec662b..72080d9d617e 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -807,6 +807,7 @@ enum mac80211_tx_info_flags {
* @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
* @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
* @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
+ * @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup
*
* These flags are used in tx_info->control.flags.
*/
@@ -816,6 +817,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
IEEE80211_TX_CTRL_AMSDU = BIT(3),
IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
+ IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5),
};
/*
@@ -1697,6 +1699,7 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
* @IEEE80211_KEY_FLAG_PUT_MIC_SPACE: This flag should be set by the driver for
* a TKIP key if it only requires MIC space. Do not set together with
* @IEEE80211_KEY_FLAG_GENERATE_MMIC on the same key.
+ * @IEEE80211_KEY_FLAG_NO_AUTO_TX: Key needs explicit Tx activation.
*/
enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_GENERATE_IV_MGMT = BIT(0),
@@ -1708,6 +1711,7 @@ enum ieee80211_key_flags {
IEEE80211_KEY_FLAG_RX_MGMT = BIT(6),
IEEE80211_KEY_FLAG_RESERVE_TAILROOM = BIT(7),
IEEE80211_KEY_FLAG_PUT_MIC_SPACE = BIT(8),
+ IEEE80211_KEY_FLAG_NO_AUTO_TX = BIT(9),
};
/**
@@ -1888,6 +1892,24 @@ struct ieee80211_sta_rates {
};
/**
+ * struct ieee80211_sta_txpwr - station txpower configuration
+ *
+ * Used to configure txpower for station.
+ *
+ * @power: indicates the tx power, in dBm, to be used when sending data frames
+ * to the STA.
+ * @type: In particular if TPC %type is NL80211_TX_POWER_LIMITED then tx power
+ * will be less than or equal to specified from userspace, whereas if TPC
+ * %type is NL80211_TX_POWER_AUTOMATIC then it indicates default tx power.
+ * NL80211_TX_POWER_FIXED is not a valid configuration option for
+ * per peer TPC.
+ */
+struct ieee80211_sta_txpwr {
+ s16 power;
+ enum nl80211_tx_power_setting type;
+};
+
+/**
* struct ieee80211_sta - station table entry
*
* A station table entry represents a station we are possibly
@@ -1973,6 +1995,7 @@ struct ieee80211_sta {
bool support_p2p_ps;
u16 max_rc_amsdu_len;
u16 max_tid_amsdu_len[IEEE80211_NUM_TIDS];
+ struct ieee80211_sta_txpwr txpwr;
struct ieee80211_txq *txq[IEEE80211_NUM_TIDS + 1];
@@ -2243,6 +2266,9 @@ struct ieee80211_txq {
* @IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID: Hardware supports multi BSSID
* only for HE APs. Applies if @IEEE80211_HW_SUPPORTS_MULTI_BSSID is set.
*
+ * @IEEE80211_HW_EXT_KEY_ID_NATIVE: Driver and hardware are supporting Extended
+ * Key ID and can handle two unicast keys per station for Rx and Tx.
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2294,6 +2320,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_TX_STATUS_NO_AMPDU_LEN,
IEEE80211_HW_SUPPORTS_MULTI_BSSID,
IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID,
+ IEEE80211_HW_EXT_KEY_ID_NATIVE,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3794,6 +3821,9 @@ struct ieee80211_ops {
#endif
void (*sta_notify)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
enum sta_notify_cmd, struct ieee80211_sta *sta);
+ int (*sta_set_txpwr)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
int (*sta_state)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
@@ -6231,8 +6261,6 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @ac: AC number to return packets from.
*
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
* Returns the next txq if successful, %NULL if no queue is eligible. If a txq
* is returned, it should be returned with ieee80211_return_txq() after the
* driver has finished scheduling it.
@@ -6240,51 +6268,58 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac);
/**
- * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
- *
- * @hw: pointer as obtained from ieee80211_alloc_hw()
- * @txq: pointer obtained from station or virtual interface
- *
- * Should only be called between calls to ieee80211_txq_schedule_start()
- * and ieee80211_txq_schedule_end().
- */
-void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
-
-/**
- * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC
+ * ieee80211_txq_schedule_start - start new scheduling round for TXQs
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @ac: AC number to acquire locks for
*
- * Acquire locks needed to schedule TXQs from the given AC. Should be called
- * before ieee80211_next_txq() or ieee80211_return_txq().
+ * Should be called before ieee80211_next_txq() or ieee80211_return_txq().
+ * The driver must not call multiple TXQ scheduling rounds concurrently.
*/
-void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
- __acquires(txq_lock);
+void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac);
+
+/* (deprecated) */
+static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
+{
+}
+
+void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq, bool force);
/**
- * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC
+ * ieee80211_schedule_txq - schedule a TXQ for transmission
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
- * @ac: AC number to acquire locks for
+ * @txq: pointer obtained from station or virtual interface
+ *
+ * Schedules a TXQ for transmission if it is not already scheduled,
+ * even if mac80211 does not have any packets buffered.
*
- * Release locks previously acquired by ieee80211_txq_schedule_end().
+ * The driver may call this function if it has buffered packets for
+ * this TXQ internally.
*/
-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
- __releases(txq_lock);
+static inline void
+ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
+{
+ __ieee80211_schedule_txq(hw, txq, true);
+}
/**
- * ieee80211_schedule_txq - schedule a TXQ for transmission
+ * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq()
*
* @hw: pointer as obtained from ieee80211_alloc_hw()
* @txq: pointer obtained from station or virtual interface
+ * @force: schedule txq even if mac80211 does not have any buffered packets.
*
- * Schedules a TXQ for transmission if it is not already scheduled. Takes a
- * lock, which means it must *not* be called between
- * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end()
+ * The driver may set force=true if it has buffered packets for this TXQ
+ * internally.
*/
-void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
- __acquires(txq_lock) __releases(txq_lock);
+static inline void
+ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
+ bool force)
+{
+ __ieee80211_schedule_txq(hw, txq, force);
+}
/**
* ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index ddfbb591e2c5..366150053043 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -2,6 +2,8 @@
#ifndef _NDISC_H
#define _NDISC_H
+#include <net/ipv6_stubs.h>
+
/*
* ICMP codes for neighbour discovery messages
*/
@@ -379,6 +381,14 @@ static inline struct neighbour *__ipv6_neigh_lookup_noref(struct net_device *dev
return ___neigh_lookup_noref(&nd_tbl, neigh_key_eq128, ndisc_hashfn, pkey, dev);
}
+static inline
+struct neighbour *__ipv6_neigh_lookup_noref_stub(struct net_device *dev,
+ const void *pkey)
+{
+ return ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
+ ndisc_hashfn, pkey, dev);
+}
+
static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, const void *pkey)
{
struct neighbour *n;
@@ -409,6 +419,36 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
rcu_read_unlock_bh();
}
+static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
+ const void *pkey)
+{
+ struct neighbour *n;
+
+ rcu_read_lock_bh();
+ n = __ipv6_neigh_lookup_noref_stub(dev, pkey);
+ if (n) {
+ unsigned long now = jiffies;
+
+ /* avoid dirtying neighbour */
+ if (n->confirmed != now)
+ n->confirmed = now;
+ }
+ rcu_read_unlock_bh();
+}
+
+/* uses ipv6_stub and is meant for use outside of IPv6 core */
+static inline struct neighbour *ip_neigh_gw6(struct net_device *dev,
+ const void *addr)
+{
+ struct neighbour *neigh;
+
+ neigh = __ipv6_neigh_lookup_noref_stub(dev, addr);
+ if (unlikely(!neigh))
+ neigh = __neigh_create(ipv6_stub->nd_tbl, addr, dev, false);
+
+ return neigh;
+}
+
int ndisc_init(void);
int ndisc_late_init(void);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 7c1ab9edba03..50a67bd6a434 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -205,6 +205,8 @@ struct neigh_table {
int (*pconstructor)(struct pneigh_entry *);
void (*pdestructor)(struct pneigh_entry *);
void (*proxy_redo)(struct sk_buff *skb);
+ bool (*allow_add)(const struct net_device *dev,
+ struct netlink_ext_ack *extack);
char *id;
struct neigh_parms parms;
struct list_head parms_list;
@@ -498,11 +500,12 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
return dev_queue_xmit(skb);
}
-static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
+static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
+ bool skip_cache)
{
const struct hh_cache *hh = &n->hh;
- if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
+ if ((n->nud_state & NUD_CONNECTED) && hh->hh_len && !skip_cache)
return neigh_hh_output(hh, skb);
else
return n->output(n, skb);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index a68ced28d8f4..12689ddfc24c 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -59,6 +59,7 @@ struct net {
*/
spinlock_t rules_mod_lock;
+ u32 hash_mix;
atomic64_t cookie_gen;
struct list_head list; /* list of network namespaces */
diff --git a/include/net/netfilter/ipv4/nf_nat_masquerade.h b/include/net/netfilter/ipv4/nf_nat_masquerade.h
deleted file mode 100644
index 13d55206bb9f..000000000000
--- a/include/net/netfilter/ipv4/nf_nat_masquerade.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NF_NAT_MASQUERADE_IPV4_H_
-#define _NF_NAT_MASQUERADE_IPV4_H_
-
-#include <net/netfilter/nf_nat.h>
-
-unsigned int
-nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
- const struct nf_nat_range2 *range,
- const struct net_device *out);
-
-int nf_nat_masquerade_ipv4_register_notifier(void);
-void nf_nat_masquerade_ipv4_unregister_notifier(void);
-
-#endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
diff --git a/include/net/netfilter/ipv6/nf_nat_masquerade.h b/include/net/netfilter/ipv6/nf_nat_masquerade.h
deleted file mode 100644
index 2917bf95c437..000000000000
--- a/include/net/netfilter/ipv6/nf_nat_masquerade.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NF_NAT_MASQUERADE_IPV6_H_
-#define _NF_NAT_MASQUERADE_IPV6_H_
-
-unsigned int
-nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
- const struct net_device *out);
-int nf_nat_masquerade_ipv6_register_notifier(void);
-void nf_nat_masquerade_ipv6_unregister_notifier(void);
-
-#endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 5ee7b30b4917..d2bc733a2ef1 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -316,6 +316,8 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
gfp_t flags);
void nf_ct_tmpl_free(struct nf_conn *tmpl);
+u32 nf_ct_get_id(const struct nf_conn *ct);
+
static inline void
nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
{
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 006e430d1cdf..93ce6b0daaba 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -48,7 +48,7 @@ struct nf_conntrack_expect {
/* Expectation class */
unsigned int class;
-#ifdef CONFIG_NF_NAT_NEEDED
+#if IS_ENABLED(CONFIG_NF_NAT)
union nf_inet_addr saved_addr;
/* This is the original per-proto part, used to map the
* expected connection the way the recipient expects. */
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index ec52a8dc32fd..44b5a00a9c64 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -15,6 +15,11 @@
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#define NF_NAT_HELPER_PREFIX "ip_nat_"
+#define NF_NAT_HELPER_NAME(name) NF_NAT_HELPER_PREFIX name
+#define MODULE_ALIAS_NF_NAT_HELPER(name) \
+ MODULE_ALIAS(NF_NAT_HELPER_NAME(name))
+
struct module;
enum nf_ct_helper_flags {
@@ -54,6 +59,8 @@ struct nf_conntrack_helper {
unsigned int queue_num;
/* length of userspace private data stored in nf_conn_help->data */
u16 data_len;
+ /* name of NAT helper module */
+ char nat_mod_name[NF_CT_HELPER_NAME_LEN];
};
/* Must be kept in sync with the classes defined by helpers */
@@ -153,4 +160,21 @@ nf_ct_helper_expectfn_find_by_symbol(const void *symbol);
extern struct hlist_head *nf_ct_helper_hash;
extern unsigned int nf_ct_helper_hsize;
+struct nf_conntrack_nat_helper {
+ struct list_head list;
+ char mod_name[NF_CT_HELPER_NAME_LEN]; /* module name */
+ struct module *module; /* pointer to self */
+};
+
+#define NF_CT_NAT_HELPER_INIT(name) \
+ { \
+ .mod_name = NF_NAT_HELPER_NAME(name), \
+ .module = THIS_MODULE \
+ }
+
+void nf_nat_helper_register(struct nf_conntrack_nat_helper *nat);
+void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat);
+int nf_nat_helper_try_module_get(const char *name, u16 l3num,
+ u8 protonum);
+void nf_nat_helper_put(struct nf_conntrack_helper *helper);
#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 778087591983..a49edfdf47e8 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -75,6 +75,12 @@ bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig);
+int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
+ unsigned int dataoff,
+ const struct nf_hook_state *state,
+ u8 l4proto,
+ union nf_inet_addr *outer_daddr);
+
int nf_conntrack_icmpv4_error(struct nf_conn *tmpl,
struct sk_buff *skb,
unsigned int dataoff,
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 3394d75e1c80..00a8fbb2d735 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -88,6 +88,9 @@ static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct)
int nf_conntrack_timeout_init(void);
void nf_conntrack_timeout_fini(void);
void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout);
+int nf_ct_set_timeout(struct net *net, struct nf_conn *ct, u8 l3num, u8 l4num,
+ const char *timeout_name);
+void nf_ct_destroy_timeout(struct nf_conn *ct);
#else
static inline int nf_conntrack_timeout_init(void)
{
@@ -98,6 +101,18 @@ static inline void nf_conntrack_timeout_fini(void)
{
return;
}
+
+static inline int nf_ct_set_timeout(struct net *net, struct nf_conn *ct,
+ u8 l3num, u8 l4num,
+ const char *timeout_name)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void nf_ct_destroy_timeout(struct nf_conn *ct)
+{
+ return;
+}
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index cf332c4e0b32..423cda2c6542 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -69,9 +69,9 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum,
#endif
}
-int nf_nat_register_fn(struct net *net, const struct nf_hook_ops *ops,
+int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
const struct nf_hook_ops *nat_ops, unsigned int ops_count);
-void nf_nat_unregister_fn(struct net *net, const struct nf_hook_ops *ops,
+void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
unsigned int ops_count);
unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
@@ -98,6 +98,9 @@ void nf_nat_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
int nf_nat_ipv6_register_fn(struct net *net, const struct nf_hook_ops *ops);
void nf_nat_ipv6_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
+int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops);
+void nf_nat_inet_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
+
unsigned int
nf_nat_inet_fn(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
diff --git a/include/net/netfilter/nf_nat_masquerade.h b/include/net/netfilter/nf_nat_masquerade.h
new file mode 100644
index 000000000000..54a14d643c34
--- /dev/null
+++ b/include/net/netfilter/nf_nat_masquerade.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NF_NAT_MASQUERADE_H_
+#define _NF_NAT_MASQUERADE_H_
+
+#include <net/netfilter/nf_nat.h>
+
+unsigned int
+nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
+ const struct nf_nat_range2 *range,
+ const struct net_device *out);
+
+int nf_nat_masquerade_inet_register_notifiers(void);
+void nf_nat_masquerade_inet_unregister_notifiers(void);
+
+unsigned int
+nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
+ const struct net_device *out);
+
+#endif /*_NF_NAT_MASQUERADE_H_ */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index a50a69f5334c..7239105d9d2e 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -119,4 +119,7 @@ nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
return queue;
}
+int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
+ const struct nf_hook_entries *entries, unsigned int index,
+ unsigned int verdict);
#endif /* _NF_QUEUE_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3e9ab643eedf..5b8624ae4a27 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -2,7 +2,6 @@
#ifndef _NET_NF_TABLES_H
#define _NET_NF_TABLES_H
-#include <linux/module.h>
#include <linux/list.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nfnetlink.h>
@@ -13,6 +12,8 @@
#include <net/netfilter/nf_flow_table.h>
#include <net/netlink.h>
+struct module;
+
#define NFT_JUMP_STACK_SIZE 16
struct nft_pktinfo {
@@ -475,8 +476,6 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
enum nft_trans_phase phase);
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding);
-void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
- struct nft_set_binding *binding, bool commit);
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
/**
@@ -808,23 +807,6 @@ void nft_expr_destroy(const struct nft_ctx *ctx, struct nft_expr *expr);
int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
const struct nft_expr *expr);
-static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
-{
- int err;
-
- if (src->ops->clone) {
- dst->ops = src->ops;
- err = src->ops->clone(dst, src);
- if (err < 0)
- return err;
- } else {
- memcpy(dst, src, src->ops->size);
- }
-
- __module_get(src->ops->type->owner);
- return 0;
-}
-
/**
* struct nft_rule - nf_tables rule
*
@@ -1411,4 +1393,6 @@ struct nft_trans_flowtable {
int __init nft_chain_filter_init(void);
void nft_chain_filter_fini(void);
+void __init nft_chain_route_init(void);
+void nft_chain_route_fini(void);
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 23f27b0b3cef..395b4406f4b0 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -183,6 +183,7 @@ enum {
NLA_REJECT,
NLA_EXACT_LEN,
NLA_EXACT_LEN_WARN,
+ NLA_MIN_LEN,
__NLA_TYPE_MAX,
};
@@ -212,6 +213,7 @@ enum nla_policy_validation {
* NLA_NUL_STRING Maximum length of string (excluding NUL)
* NLA_FLAG Unused
* NLA_BINARY Maximum length of attribute payload
+ * NLA_MIN_LEN Minimum length of attribute payload
* NLA_NESTED,
* NLA_NESTED_ARRAY Length verification is done by checking len of
* nested header (or empty); len field is used if
@@ -230,6 +232,7 @@ enum nla_policy_validation {
* it is rejected.
* NLA_EXACT_LEN_WARN Attribute should have exactly this length, a warning
* is logged if it is longer, shorter is rejected.
+ * NLA_MIN_LEN Minimum length of attribute payload
* All other Minimum length of attribute payload
*
* Meaning of `validation_data' field:
@@ -281,7 +284,7 @@ enum nla_policy_validation {
* static const struct nla_policy my_policy[ATTR_MAX+1] = {
* [ATTR_FOO] = { .type = NLA_U16 },
* [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
- * [ATTR_BAZ] = { .len = sizeof(struct mystruct) },
+ * [ATTR_BAZ] = { .type = NLA_EXACT_LEN, .len = sizeof(struct mystruct) },
* [ATTR_GOO] = { .type = NLA_BITFIELD32, .validation_data = &myvalidflags },
* };
*/
@@ -296,12 +299,31 @@ struct nla_policy {
};
int (*validate)(const struct nlattr *attr,
struct netlink_ext_ack *extack);
+ /* This entry is special, and used for the attribute at index 0
+ * only, and specifies special data about the policy, namely it
+ * specifies the "boundary type" where strict length validation
+ * starts for any attribute types >= this value, also, strict
+ * nesting validation starts here.
+ *
+ * Additionally, it means that NLA_UNSPEC is actually NLA_REJECT
+ * for any types >= this, so need to use NLA_MIN_LEN to get the
+ * previous pure { .len = xyz } behaviour. The advantage of this
+ * is that types not specified in the policy will be rejected.
+ *
+ * For completely new families it should be set to 1 so that the
+ * validation is enforced for all attributes. For existing ones
+ * it should be set at least when new attributes are added to
+ * the enum used by the policy, and be set to the new value that
+ * was added to enforce strict validation from thereon.
+ */
+ u16 strict_start_type;
};
};
#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len }
#define NLA_POLICY_EXACT_LEN_WARN(_len) { .type = NLA_EXACT_LEN_WARN, \
.len = _len }
+#define NLA_POLICY_MIN_LEN(_len) { .type = NLA_MIN_LEN, .len = _len }
#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN)
#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
@@ -365,21 +387,52 @@ struct nl_info {
bool skip_notify;
};
+/**
+ * enum netlink_validation - netlink message/attribute validation levels
+ * @NL_VALIDATE_LIBERAL: Old-style "be liberal" validation, not caring about
+ * extra data at the end of the message, attributes being longer than
+ * they should be, or unknown attributes being present.
+ * @NL_VALIDATE_TRAILING: Reject junk data encountered after attribute parsing.
+ * @NL_VALIDATE_MAXTYPE: Reject attributes > max type; Together with _TRAILING
+ * this is equivalent to the old nla_parse_strict()/nlmsg_parse_strict().
+ * @NL_VALIDATE_UNSPEC: Reject attributes with NLA_UNSPEC in the policy.
+ * This can safely be set by the kernel when the given policy has no
+ * NLA_UNSPEC anymore, and can thus be used to ensure policy entries
+ * are enforced going forward.
+ * @NL_VALIDATE_STRICT_ATTRS: strict attribute policy parsing (e.g.
+ * U8, U16, U32 must have exact size, etc.)
+ * @NL_VALIDATE_NESTED: Check that NLA_F_NESTED is set for NLA_NESTED(_ARRAY)
+ * and unset for other policies.
+ */
+enum netlink_validation {
+ NL_VALIDATE_LIBERAL = 0,
+ NL_VALIDATE_TRAILING = BIT(0),
+ NL_VALIDATE_MAXTYPE = BIT(1),
+ NL_VALIDATE_UNSPEC = BIT(2),
+ NL_VALIDATE_STRICT_ATTRS = BIT(3),
+ NL_VALIDATE_NESTED = BIT(4),
+};
+
+#define NL_VALIDATE_DEPRECATED_STRICT (NL_VALIDATE_TRAILING |\
+ NL_VALIDATE_MAXTYPE)
+#define NL_VALIDATE_STRICT (NL_VALIDATE_TRAILING |\
+ NL_VALIDATE_MAXTYPE |\
+ NL_VALIDATE_UNSPEC |\
+ NL_VALIDATE_STRICT_ATTRS |\
+ NL_VALIDATE_NESTED)
+
int netlink_rcv_skb(struct sk_buff *skb,
int (*cb)(struct sk_buff *, struct nlmsghdr *,
struct netlink_ext_ack *));
int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
unsigned int group, int report, gfp_t flags);
-int nla_validate(const struct nlattr *head, int len, int maxtype,
- const struct nla_policy *policy,
- struct netlink_ext_ack *extack);
-int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
- int len, const struct nla_policy *policy,
- struct netlink_ext_ack *extack);
-int nla_parse_strict(struct nlattr **tb, int maxtype, const struct nlattr *head,
- int len, const struct nla_policy *policy,
- struct netlink_ext_ack *extack);
+int __nla_validate(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy, unsigned int validate,
+ struct netlink_ext_ack *extack);
+int __nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+ int len, const struct nla_policy *policy, unsigned int validate,
+ struct netlink_ext_ack *extack);
int nla_policy_len(const struct nla_policy *, int);
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
@@ -508,42 +561,167 @@ nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
}
/**
- * nlmsg_parse - parse attributes of a netlink message
+ * nla_parse - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @policy: validation policy
+ * @extack: extended ACK pointer
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessible via the attribute type. Attributes with a type
+ * exceeding maxtype will be rejected, policy must be specified, attributes
+ * will be validated in the strictest way possible.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int nla_parse(struct nlattr **tb, int maxtype,
+ const struct nlattr *head, int len,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, policy,
+ NL_VALIDATE_STRICT, extack);
+}
+
+/**
+ * nla_parse_deprecated - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @policy: validation policy
+ * @extack: extended ACK pointer
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessible via the attribute type. Attributes with a type
+ * exceeding maxtype will be ignored and attributes from the policy are not
+ * always strictly validated (only for new attributes).
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int nla_parse_deprecated(struct nlattr **tb, int maxtype,
+ const struct nlattr *head, int len,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, policy,
+ NL_VALIDATE_LIBERAL, extack);
+}
+
+/**
+ * nla_parse_deprecated_strict - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @policy: validation policy
+ * @extack: extended ACK pointer
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessible via the attribute type. Attributes with a type
+ * exceeding maxtype will be rejected as well as trailing data, but the
+ * policy is not completely strictly validated (only for new attributes).
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int nla_parse_deprecated_strict(struct nlattr **tb, int maxtype,
+ const struct nlattr *head,
+ int len,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, head, len, policy,
+ NL_VALIDATE_DEPRECATED_STRICT, extack);
+}
+
+/**
+ * __nlmsg_parse - parse attributes of a netlink message
* @nlh: netlink message header
* @hdrlen: length of family specific header
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
+ * @validate: validation strictness
* @extack: extended ACK report struct
*
* See nla_parse()
*/
-static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
- struct nlattr *tb[], int maxtype,
- const struct nla_policy *policy,
- struct netlink_ext_ack *extack)
+static inline int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ unsigned int validate,
+ struct netlink_ext_ack *extack)
{
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) {
NL_SET_ERR_MSG(extack, "Invalid header length");
return -EINVAL;
}
- return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
- nlmsg_attrlen(nlh, hdrlen), policy, extack);
+ return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), policy, validate,
+ extack);
}
-static inline int nlmsg_parse_strict(const struct nlmsghdr *nlh, int hdrlen,
- struct nlattr *tb[], int maxtype,
- const struct nla_policy *policy,
- struct netlink_ext_ack *extack)
+/**
+ * nlmsg_parse - parse attributes of a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @validate: validation strictness
+ * @extack: extended ACK report struct
+ *
+ * See nla_parse()
+ */
+static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
- if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) {
- NL_SET_ERR_MSG(extack, "Invalid header length");
- return -EINVAL;
- }
+ return __nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), policy,
+ NL_VALIDATE_STRICT, extack);
+}
- return nla_parse_strict(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
- nlmsg_attrlen(nlh, hdrlen), policy, extack);
+/**
+ * nlmsg_parse_deprecated - parse attributes of a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @extack: extended ACK report struct
+ *
+ * See nla_parse_deprecated()
+ */
+static inline int nlmsg_parse_deprecated(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
+ NL_VALIDATE_LIBERAL, extack);
+}
+
+/**
+ * nlmsg_parse_deprecated_strict - parse attributes of a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @extack: extended ACK report struct
+ *
+ * See nla_parse_deprecated_strict()
+ */
+static inline int
+nlmsg_parse_deprecated_strict(const struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nlmsg_parse(nlh, hdrlen, tb, maxtype, policy,
+ NL_VALIDATE_DEPRECATED_STRICT, extack);
}
/**
@@ -562,26 +740,75 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
}
/**
- * nlmsg_validate - validate a netlink message including attributes
+ * nla_validate_deprecated - Validate a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @validate: validation strictness
+ * @extack: extended ACK report struct
+ *
+ * Validates all attributes in the specified attribute stream against the
+ * specified policy. Validation is done in liberal mode.
+ * See documenation of struct nla_policy for more details.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int nla_validate_deprecated(const struct nlattr *head, int len,
+ int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_validate(head, len, maxtype, policy, NL_VALIDATE_LIBERAL,
+ extack);
+}
+
+/**
+ * nla_validate - Validate a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ * @validate: validation strictness
+ * @extack: extended ACK report struct
+ *
+ * Validates all attributes in the specified attribute stream against the
+ * specified policy. Validation is done in strict mode.
+ * See documenation of struct nla_policy for more details.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+static inline int nla_validate(const struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_validate(head, len, maxtype, policy, NL_VALIDATE_STRICT,
+ extack);
+}
+
+/**
+ * nlmsg_validate_deprecated - validate a netlink message including attributes
* @nlh: netlinket message header
* @hdrlen: length of familiy specific header
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
* @extack: extended ACK report struct
*/
-static inline int nlmsg_validate(const struct nlmsghdr *nlh,
- int hdrlen, int maxtype,
- const struct nla_policy *policy,
- struct netlink_ext_ack *extack)
+static inline int nlmsg_validate_deprecated(const struct nlmsghdr *nlh,
+ int hdrlen, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -EINVAL;
- return nla_validate(nlmsg_attrdata(nlh, hdrlen),
- nlmsg_attrlen(nlh, hdrlen), maxtype, policy,
- extack);
+ return __nla_validate(nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), maxtype,
+ policy, NL_VALIDATE_LIBERAL, extack);
}
+
+
/**
* nlmsg_report - need to report back to application?
* @nlh: netlink message header
@@ -909,8 +1136,32 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
const struct nla_policy *policy,
struct netlink_ext_ack *extack)
{
- return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
- extack);
+ if (!(nla->nla_type & NLA_F_NESTED)) {
+ NL_SET_ERR_MSG_ATTR(extack, nla, "NLA_F_NESTED is missing");
+ return -EINVAL;
+ }
+
+ return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
+ NL_VALIDATE_STRICT, extack);
+}
+
+/**
+ * nla_parse_nested_deprecated - parse nested attributes
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @nla: attribute containing the nested attributes
+ * @policy: validation policy
+ * @extack: extended ACK report struct
+ *
+ * See nla_parse_deprecated()
+ */
+static inline int nla_parse_nested_deprecated(struct nlattr *tb[], int maxtype,
+ const struct nlattr *nla,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
+ NL_VALIDATE_LIBERAL, extack);
}
/**
@@ -1415,13 +1666,18 @@ static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp)
}
/**
- * nla_nest_start - Start a new level of nested attributes
+ * nla_nest_start_noflag - Start a new level of nested attributes
* @skb: socket buffer to add attributes to
* @attrtype: attribute type of container
*
- * Returns the container attribute
+ * This function exists for backward compatibility to use in APIs which never
+ * marked their nest attributes with NLA_F_NESTED flag. New APIs should use
+ * nla_nest_start() which sets the flag.
+ *
+ * Returns the container attribute or NULL on error
*/
-static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
+static inline struct nlattr *nla_nest_start_noflag(struct sk_buff *skb,
+ int attrtype)
{
struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb);
@@ -1432,6 +1688,21 @@ static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
}
/**
+ * nla_nest_start - Start a new level of nested attributes, with NLA_F_NESTED
+ * @skb: socket buffer to add attributes to
+ * @attrtype: attribute type of container
+ *
+ * Unlike nla_nest_start_noflag(), mark the nest attribute with NLA_F_NESTED
+ * flag. This is the preferred function to use in new code.
+ *
+ * Returns the container attribute or NULL on error
+ */
+static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
+{
+ return nla_nest_start_noflag(skb, attrtype | NLA_F_NESTED);
+}
+
+/**
* nla_nest_end - Finalize nesting of attributes
* @skb: socket buffer the attributes are stored in
* @start: container attribute
@@ -1465,6 +1736,7 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
* @start: container attribute
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
+ * @validate: validation strictness
* @extack: extended ACK report struct
*
* Validates all attributes in the nested attribute stream against the
@@ -1473,12 +1745,22 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
*
* Returns 0 on success or a negative error code.
*/
-static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
- const struct nla_policy *policy,
- struct netlink_ext_ack *extack)
+static inline int __nla_validate_nested(const struct nlattr *start, int maxtype,
+ const struct nla_policy *policy,
+ unsigned int validate,
+ struct netlink_ext_ack *extack)
+{
+ return __nla_validate(nla_data(start), nla_len(start), maxtype, policy,
+ validate, extack);
+}
+
+static inline int
+nla_validate_nested_deprecated(const struct nlattr *start, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
- return nla_validate(nla_data(start), nla_len(start), maxtype, policy,
- extack);
+ return __nla_validate_nested(start, maxtype, policy,
+ NL_VALIDATE_LIBERAL, extack);
}
/**
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index f19b53130bf7..806454e767bf 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -24,9 +24,9 @@ struct nf_generic_net {
struct nf_tcp_net {
unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
- unsigned int tcp_loose;
- unsigned int tcp_be_liberal;
- unsigned int tcp_max_retrans;
+ int tcp_loose;
+ int tcp_be_liberal;
+ int tcp_max_retrans;
};
enum udp_conntrack {
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 16a842456189..d9b665151f3d 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -2,16 +2,10 @@
#ifndef __NET_NS_HASH_H__
#define __NET_NS_HASH_H__
-#include <asm/cache.h>
-
-struct net;
+#include <net/net_namespace.h>
static inline u32 net_hash_mix(const struct net *net)
{
-#ifdef CONFIG_NET_NS
- return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
-#else
- return 0;
-#endif
+ return net->hash_mix;
}
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 104a6669e344..7698460a3dd1 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -9,6 +9,7 @@
#include <linux/uidgid.h>
#include <net/inet_frag.h>
#include <linux/rcupdate.h>
+#include <linux/siphash.h>
struct tcpm_hash_bucket;
struct ctl_table_header;
@@ -217,5 +218,6 @@ struct netns_ipv4 {
unsigned int ipmr_seq; /* protected by rtnl_mutex */
atomic_t rt_genid;
+ siphash_key_t ip_id_key;
};
#endif
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index b028a1dc150d..5e61b5a8635d 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -8,6 +8,7 @@
#ifndef __NETNS_IPV6_H__
#define __NETNS_IPV6_H__
#include <net/dst_ops.h>
+#include <uapi/linux/icmpv6.h>
struct ctl_table_header;
@@ -33,6 +34,10 @@ struct netns_sysctl_ipv6 {
int auto_flowlabels;
int icmpv6_time;
int icmpv6_echo_ignore_all;
+ int icmpv6_echo_ignore_multicast;
+ int icmpv6_echo_ignore_anycast;
+ DECLARE_BITMAP(icmpv6_ratemask, ICMPV6_MSG_MAX + 1);
+ unsigned long *icmpv6_ratemask_ptr;
int anycast_src_echo_reply;
int ip_nonlocal_bind;
int fwmark_reflect;
diff --git a/include/net/netrom.h b/include/net/netrom.h
index 5a0714ff500f..80f15b1c1a48 100644
--- a/include/net/netrom.h
+++ b/include/net/netrom.h
@@ -266,7 +266,7 @@ void nr_stop_idletimer(struct sock *);
int nr_t1timer_running(struct sock *);
/* sysctl_net_netrom.c */
-void nr_register_sysctl(void);
+int nr_register_sysctl(void);
void nr_unregister_sysctl(void);
#endif
diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h
index 87499b6b35d6..df5c69db68af 100644
--- a/include/net/nfc/nci_core.h
+++ b/include/net/nfc/nci_core.h
@@ -166,7 +166,7 @@ struct nci_conn_info {
* According to specification 102 622 chapter 4.4 Pipes,
* the pipe identifier is 7 bits long.
*/
-#define NCI_HCI_MAX_PIPES 127
+#define NCI_HCI_MAX_PIPES 128
struct nci_hci_gate {
u8 gate;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index d5e7a1af346f..514e3c80ecc1 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -100,6 +100,11 @@ int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode);
#else
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+ return false;
+}
+
static inline
int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
@@ -372,30 +377,6 @@ static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
}
/**
- * tcf_exts_has_one_action - check if exactly one action is present
- * @exts: tc filter extensions handle
- *
- * Returns true if exactly one action is present.
- */
-static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
-{
-#ifdef CONFIG_NET_CLS_ACT
- return exts->nr_actions == 1;
-#else
- return false;
-#endif
-}
-
-static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
-{
-#ifdef CONFIG_NET_CLS_ACT
- return exts->actions[0];
-#else
- return NULL;
-#endif
-}
-
-/**
* tcf_exts_exec - execute tc filter extensions
* @skb: socket buffer
* @exts: tc filter extensions handle
@@ -784,12 +765,14 @@ tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
enum tc_matchall_command {
TC_CLSMATCHALL_REPLACE,
TC_CLSMATCHALL_DESTROY,
+ TC_CLSMATCHALL_STATS,
};
struct tc_cls_matchall_offload {
struct tc_cls_common_offload common;
enum tc_matchall_command command;
- struct tcf_exts *exts;
+ struct flow_rule *rule;
+ struct flow_stats stats;
unsigned long cookie;
};
diff --git a/include/net/psample.h b/include/net/psample.h
index 9b80f814ab04..37a4df2325b2 100644
--- a/include/net/psample.h
+++ b/include/net/psample.h
@@ -3,7 +3,6 @@
#define __NET_PSAMPLE_H
#include <uapi/linux/psample.h>
-#include <linux/module.h>
#include <linux/list.h>
struct psample_group {
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 21a5243fecd1..9dfd7960d90a 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -106,10 +106,8 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
return req;
}
-static inline void reqsk_free(struct request_sock *req)
+static inline void __reqsk_free(struct request_sock *req)
{
- WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
-
req->rsk_ops->destructor(req);
if (req->rsk_listener)
sock_put(req->rsk_listener);
@@ -117,6 +115,12 @@ static inline void reqsk_free(struct request_sock *req)
kmem_cache_free(req->rsk_ops->slab, req);
}
+static inline void reqsk_free(struct request_sock *req)
+{
+ WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
+ __reqsk_free(req);
+}
+
static inline void reqsk_put(struct request_sock *req)
{
if (refcount_dec_and_test(&req->rsk_refcnt))
diff --git a/include/net/route.h b/include/net/route.h
index 9883dc82f723..96f6c9ae33c2 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -29,6 +29,8 @@
#include <net/flow.h>
#include <net/inet_sock.h>
#include <net/ip_fib.h>
+#include <net/arp.h>
+#include <net/ndisc.h>
#include <linux/in_route.h>
#include <linux/rtnetlink.h>
#include <linux/rcupdate.h>
@@ -55,12 +57,15 @@ struct rtable {
unsigned int rt_flags;
__u16 rt_type;
__u8 rt_is_input;
- __u8 rt_uses_gateway;
+ u8 rt_gw_family;
int rt_iif;
/* Info on neighbour */
- __be32 rt_gateway;
+ union {
+ __be32 rt_gw4;
+ struct in6_addr rt_gw6;
+ };
/* Miscellaneous cached information */
u32 rt_mtu_locked:1,
@@ -82,8 +87,8 @@ static inline bool rt_is_output_route(const struct rtable *rt)
static inline __be32 rt_nexthop(const struct rtable *rt, __be32 daddr)
{
- if (rt->rt_gateway)
- return rt->rt_gateway;
+ if (rt->rt_gw_family == AF_INET)
+ return rt->rt_gw4;
return daddr;
}
@@ -347,4 +352,34 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
return hoplimit;
}
+static inline struct neighbour *ip_neigh_gw4(struct net_device *dev,
+ __be32 daddr)
+{
+ struct neighbour *neigh;
+
+ neigh = __ipv4_neigh_lookup_noref(dev, daddr);
+ if (unlikely(!neigh))
+ neigh = __neigh_create(&arp_tbl, &daddr, dev, false);
+
+ return neigh;
+}
+
+static inline struct neighbour *ip_neigh_for_gw(struct rtable *rt,
+ struct sk_buff *skb,
+ bool *is_v6gw)
+{
+ struct net_device *dev = rt->dst.dev;
+ struct neighbour *neigh;
+
+ if (likely(rt->rt_gw_family == AF_INET)) {
+ neigh = ip_neigh_gw4(dev, rt->rt_gw4);
+ } else if (rt->rt_gw_family == AF_INET6) {
+ neigh = ip_neigh_gw6(dev, &rt->rt_gw6);
+ *is_v6gw = true;
+ } else {
+ neigh = ip_neigh_gw4(dev, ip_hdr(skb)->daddr);
+ }
+ return neigh;
+}
+
#endif /* _ROUTE_H */
diff --git a/include/net/nexthop.h b/include/net/rtnh.h
index 902ff382a6dc..aa2cfc508f7c 100644
--- a/include/net/nexthop.h
+++ b/include/net/rtnh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NET_NEXTHOP_H
-#define __NET_NEXTHOP_H
+#ifndef __NET_RTNH_H
+#define __NET_RTNH_H
#include <linux/rtnetlink.h>
#include <net/netlink.h>
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 31284c078d06..21f434f3ac9e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -52,10 +52,7 @@ struct qdisc_size_table {
struct qdisc_skb_head {
struct sk_buff *head;
struct sk_buff *tail;
- union {
- u32 qlen;
- atomic_t atomic_qlen;
- };
+ __u32 qlen;
spinlock_t lock;
};
@@ -113,6 +110,9 @@ struct Qdisc {
spinlock_t busylock ____cacheline_aligned_in_smp;
spinlock_t seqlock;
+
+ /* for NOLOCK qdisc, true if there are no enqueued skbs */
+ bool empty;
struct rcu_head rcu;
};
@@ -143,11 +143,24 @@ static inline bool qdisc_is_running(struct Qdisc *qdisc)
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
}
+static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
+{
+ return q->flags & TCQ_F_CPUSTATS;
+}
+
+static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
+{
+ if (qdisc_is_percpu_stats(qdisc))
+ return qdisc->empty;
+ return !qdisc->q.qlen;
+}
+
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK) {
if (!spin_trylock(&qdisc->seqlock))
return false;
+ qdisc->empty = false;
} else if (qdisc_is_running(qdisc)) {
return false;
}
@@ -351,13 +364,10 @@ struct tcf_proto {
};
struct qdisc_skb_cb {
- union {
- struct {
- unsigned int pkt_len;
- u16 slave_dev_queue_mapping;
- u16 tc_classid;
- };
- struct bpf_flow_keys *flow_keys;
+ struct {
+ unsigned int pkt_len;
+ u16 slave_dev_queue_mapping;
+ u16 tc_classid;
};
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
@@ -378,6 +388,7 @@ struct tcf_chain {
bool flushing;
const struct tcf_proto_ops *tmplt_ops;
void *tmplt_priv;
+ struct rcu_head rcu;
};
struct tcf_block {
@@ -469,19 +480,27 @@ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
BUILD_BUG_ON(sizeof(qcb->data) < sz);
}
+static inline int qdisc_qlen_cpu(const struct Qdisc *q)
+{
+ return this_cpu_ptr(q->cpu_qstats)->qlen;
+}
+
static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}
-static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
{
- u32 qlen = q->qstats.qlen;
+ __u32 qlen = q->qstats.qlen;
+ int i;
- if (q->flags & TCQ_F_NOLOCK)
- qlen += atomic_read(&q->q.atomic_qlen);
- else
+ if (qdisc_is_percpu_stats(q)) {
+ for_each_possible_cpu(i)
+ qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
+ } else {
qlen += q->q.qlen;
+ }
return qlen;
}
@@ -735,7 +754,7 @@ static inline bool qdisc_all_tx_empty(const struct net_device *dev)
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
const struct Qdisc *q = rcu_dereference(txq->qdisc);
- if (q->q.qlen) {
+ if (!qdisc_is_empty(q)) {
rcu_read_unlock();
return false;
}
@@ -805,11 +824,6 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return sch->enqueue(skb, sch, to_free);
}
-static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
-{
- return q->flags & TCQ_F_CPUSTATS;
-}
-
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
__u64 bytes, __u32 packets)
{
@@ -877,14 +891,14 @@ static inline void qdisc_qstats_cpu_backlog_inc(struct Qdisc *sch,
this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
}
-static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
{
- atomic_inc(&sch->q.atomic_qlen);
+ this_cpu_inc(sch->cpu_qstats->qlen);
}
-static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
{
- atomic_dec(&sch->q.atomic_qlen);
+ this_cpu_dec(sch->cpu_qstats->qlen);
}
static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
@@ -922,6 +936,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch->qstats.overlimits++;
}
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+{
+ __u32 qlen = qdisc_qlen_sum(sch);
+
+ return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+}
+
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
+ __u32 *backlog)
+{
+ struct gnet_stats_queue qstats = { 0 };
+ __u32 len = qdisc_qlen_sum(sch);
+
+ __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
+ *qlen = qstats.qlen;
+ *backlog = qstats.backlog;
+}
+
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void qdisc_purge_queue(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_reset(sch);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
{
qh->head = NULL;
@@ -1059,6 +1108,32 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
return skb;
}
+static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
+ struct sk_buff *skb)
+{
+ if (qdisc_is_percpu_stats(sch)) {
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
+ qdisc_bstats_cpu_update(sch, skb);
+ qdisc_qstats_cpu_qlen_dec(sch);
+ } else {
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ sch->q.qlen--;
+ }
+}
+
+static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
+ unsigned int pkt_len)
+{
+ if (qdisc_is_percpu_stats(sch)) {
+ qdisc_qstats_cpu_qlen_inc(sch);
+ this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
+ } else {
+ sch->qstats.backlog += pkt_len;
+ sch->q.qlen++;
+ }
+}
+
/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
@@ -1066,8 +1141,13 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
if (skb) {
skb = __skb_dequeue(&sch->gso_skb);
- qdisc_qstats_backlog_dec(sch, skb);
- sch->q.qlen--;
+ if (qdisc_is_percpu_stats(sch)) {
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
+ qdisc_qstats_cpu_qlen_dec(sch);
+ } else {
+ qdisc_qstats_backlog_dec(sch, skb);
+ sch->q.qlen--;
+ }
} else {
skb = sch->dequeue(sch);
}
@@ -1105,13 +1185,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
sch_tree_lock(sch);
old = *pold;
*pold = new;
- if (old != NULL) {
- unsigned int qlen = old->q.qlen;
- unsigned int backlog = old->qstats.backlog;
-
- qdisc_reset(old);
- qdisc_tree_reduce_backlog(old, qlen, backlog);
- }
+ if (old != NULL)
+ qdisc_tree_flush_backlog(old);
sch_tree_unlock(sch);
return old;
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 32ee65a30aff..1c6e6c0766ca 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
unsigned int offset)
{
- struct sctphdr *sh = sctp_hdr(skb);
+ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
const struct skb_checksum_ops ops = {
.update = sctp_csum_update,
.combine = sctp_csum_combine,
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 6640f84fe536..6d5beac29bc1 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -105,7 +105,6 @@ enum sctp_verb {
SCTP_CMD_T1_RETRAN, /* Mark for retransmission after T1 timeout */
SCTP_CMD_UPDATE_INITTAG, /* Update peer inittag */
SCTP_CMD_SEND_MSG, /* Send the whole use message */
- SCTP_CMD_SEND_NEXT_ASCONF, /* Send the next ASCONF after ACK */
SCTP_CMD_PURGE_ASCONF_QUEUE, /* Purge all asconf queues.*/
SCTP_CMD_SET_ASOC, /* Restore association context */
SCTP_CMD_LAST
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 1d13ec3f2707..eefdfa5abf6e 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -421,7 +421,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
/*
* This mimics the behavior of skb_set_owner_r
*/
- sk->sk_forward_alloc -= event->rmem_len;
+ sk_mem_charge(sk, event->rmem_len);
}
/* Tests if the list has one and only one entry. */
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index bb0ecba3db2b..f4ac7117ff29 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -59,7 +59,7 @@ void sctp_ulpq_free(struct sctp_ulpq *);
int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
/* Add a new event for propagation to the ULP. */
-int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
+int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sk_buff_head *skb_list);
/* Renege previously received chunks. */
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
diff --git a/include/net/sock.h b/include/net/sock.h
index 328cb7cb7b0b..4d208c0f9c14 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -236,6 +236,8 @@ struct sock_common {
/* public: */
};
+struct bpf_sk_storage;
+
/**
* struct sock - network layer representation of sockets
* @__sk_common: shared layout with inet_timewait_sock
@@ -368,6 +370,7 @@ struct sock {
atomic_t sk_drops;
int sk_rcvlowat;
struct sk_buff_head sk_error_queue;
+ struct sk_buff *sk_rx_skb_cache;
struct sk_buff_head sk_receive_queue;
/*
* The backlog queue is special, it is always used with
@@ -414,6 +417,7 @@ struct sock {
struct sk_buff *sk_send_head;
struct rb_root tcp_rtx_queue;
};
+ struct sk_buff *sk_tx_skb_cache;
struct sk_buff_head sk_write_queue;
__s32 sk_peek_off;
int sk_write_pending;
@@ -508,6 +512,9 @@ struct sock {
#endif
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
+#ifdef CONFIG_BPF_SYSCALL
+ struct bpf_sk_storage __rcu *sk_bpf_storage;
+#endif
struct rcu_head sk_rcu;
};
@@ -710,6 +717,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
hlist_add_head_rcu(&sk->sk_node, list);
}
+static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ hlist_add_tail_rcu(&sk->sk_node, list);
+}
+
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
@@ -960,7 +973,7 @@ static inline void sock_rps_record_flow_hash(__u32 hash)
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
- if (static_key_false(&rfs_needed)) {
+ if (static_branch_unlikely(&rfs_needed)) {
/* Reading sk->sk_rxhash might incur an expensive cache line
* miss.
*
@@ -1460,6 +1473,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
sk->sk_wmem_queued -= skb->truesize;
sk_mem_uncharge(sk, skb->truesize);
+ if (!sk->sk_tx_skb_cache) {
+ skb_zcopy_clear(skb, true);
+ sk->sk_tx_skb_cache = skb;
+ return;
+ }
__kfree_skb(skb);
}
@@ -1601,6 +1619,8 @@ int sock_setsockopt(struct socket *sock, int level, int op,
int sock_getsockopt(struct socket *sock, int level, int op,
char __user *optval, int __user *optlen);
+int sock_gettstamp(struct socket *sock, void __user *userstamp,
+ bool timeval, bool time32);
struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
int noblock, int *errcode);
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
@@ -2078,12 +2098,6 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
* @p: poll_table
*
* See the comments in the wq_has_sleeper function.
- *
- * Do not derive sock from filp->private_data here. An SMC socket establishes
- * an internal TCP socket that is used in the fallback case. All socket
- * operations on the SMC socket are then forwarded to the TCP socket. In case of
- * poll, the filp->private_data pointer references the SMC socket because the
- * TCP socket has no file assigned.
*/
static inline void sock_poll_wait(struct file *filp, struct socket *sock,
poll_table *p)
@@ -2427,6 +2441,15 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
+ if (
+#ifdef CONFIG_RPS
+ !static_branch_unlikely(&rps_needed) &&
+#endif
+ !sk->sk_rx_skb_cache) {
+ sk->sk_rx_skb_cache = skb;
+ skb_orphan(skb);
+ return;
+ }
__kfree_skb(skb);
}
@@ -2487,8 +2510,6 @@ static inline bool sk_listener(const struct sock *sk)
}
void sock_enable_timestamp(struct sock *sk, int flag);
-int sock_get_timestamp(struct sock *, struct timeval __user *);
-int sock_get_timestampns(struct sock *, struct timespec __user *);
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
int type);
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index ee8d005f56fc..eb8f01c819e6 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
{
- return a->goto_chain->index;
+ return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
}
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index 86d13b01b39d..c7f24a2da1ca 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -5,7 +5,8 @@
#include <net/act_api.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
-#include <linux/module.h>
+
+struct module;
struct tcf_ife_params {
u8 eth_dst[ETH_ALEN];
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
new file mode 100644
index 000000000000..8b9ef3664262
--- /dev/null
+++ b/include/net/tc_act/tc_police.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_POLICE_H
+#define __NET_TC_POLICE_H
+
+#include <net/act_api.h>
+
+struct tcf_police_params {
+ int tcfp_result;
+ u32 tcfp_ewma_rate;
+ s64 tcfp_burst;
+ u32 tcfp_mtu;
+ s64 tcfp_mtu_ptoks;
+ struct psched_ratecfg rate;
+ bool rate_present;
+ struct psched_ratecfg peak;
+ bool peak_present;
+ struct rcu_head rcu;
+};
+
+struct tcf_police {
+ struct tc_action common;
+ struct tcf_police_params __rcu *params;
+
+ spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
+ s64 tcfp_toks;
+ s64 tcfp_ptoks;
+ s64 tcfp_t_c;
+};
+
+#define to_police(pc) ((struct tcf_police *)pc)
+
+/* old policer structure from before tc actions */
+struct tc_police_compat {
+ u32 index;
+ int action;
+ u32 limit;
+ u32 burst;
+ u32 mtu;
+ struct tc_ratespec rate;
+ struct tc_ratespec peakrate;
+};
+
+static inline bool is_tcf_police(const struct tc_action *act)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (act->ops && act->ops->id == TCA_ID_POLICE)
+ return true;
+#endif
+ return false;
+}
+
+static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_bh(police->params);
+ return params->rate.rate_bytes_ps;
+}
+
+static inline s64 tcf_police_tcfp_burst(const struct tc_action *act)
+{
+ struct tcf_police *police = to_police(act);
+ struct tcf_police_params *params;
+
+ params = rcu_dereference_bh(police->params);
+ return params->tcfp_burst;
+}
+
+#endif /* __NET_TC_POLICE_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 68ee02523b87..985aa5db570c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1314,7 +1314,7 @@ static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
static inline __sum16 tcp_v4_check(int len, __be32 saddr,
__be32 daddr, __wsum base)
{
- return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
+ return csum_tcpudp_magic(saddr, daddr, len, IPPROTO_TCP, base);
}
static inline bool tcp_checksum_complete(struct sk_buff *skb)
@@ -2198,7 +2198,7 @@ extern struct static_key_false tcp_have_smc;
void clean_acked_data_enable(struct inet_connection_sock *icsk,
void (*cad)(struct sock *sk, u32 ack_seq));
void clean_acked_data_disable(struct inet_connection_sock *icsk);
-
+void clean_acked_data_flush(void);
#endif
#endif /* _TCP_H */
diff --git a/include/net/tls.h b/include/net/tls.h
index a5a938583295..39ea62f0c1f6 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -60,6 +60,17 @@
#define TLS_AAD_SPACE_SIZE 13
#define TLS_DEVICE_NAME_MAX 32
+#define MAX_IV_SIZE 16
+
+/* For AES-CCM, the full 16-bytes of IV is made of '4' fields of given sizes.
+ *
+ * IV[16] = b0[1] || implicit nonce[4] || explicit nonce[8] || length[3]
+ *
+ * The field 'length' is encoded in field 'b0' as '(length width - 1)'.
+ * Hence b0 contains (3 - 1) = 2.
+ */
+#define TLS_AES_CCM_IV_B0_BYTE 2
+
/*
* This structure defines the routines for Inline TLS driver.
* The following routines are optional and filled with a
@@ -123,8 +134,7 @@ struct tls_rec {
struct scatterlist sg_content_type;
char aad_space[TLS_AAD_SPACE_SIZE];
- u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE +
- TLS_CIPHER_AES_GCM_128_SALT_SIZE];
+ u8 iv_data[MAX_IV_SIZE];
struct aead_request aead_req;
u8 aead_req_ctx[];
};
@@ -219,6 +229,7 @@ struct tls_prot_info {
u16 tag_size;
u16 overhead_size;
u16 iv_size;
+ u16 salt_size;
u16 rec_seq_size;
u16 aad_size;
u16 tail_size;
@@ -266,6 +277,23 @@ struct tls_context {
void (*unhash)(struct sock *sk);
};
+enum tls_offload_ctx_dir {
+ TLS_OFFLOAD_CTX_DIR_RX,
+ TLS_OFFLOAD_CTX_DIR_TX,
+};
+
+struct tlsdev_ops {
+ int (*tls_dev_add)(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn);
+ void (*tls_dev_del)(struct net_device *netdev,
+ struct tls_context *ctx,
+ enum tls_offload_ctx_dir direction);
+ void (*tls_dev_resync_rx)(struct net_device *netdev,
+ struct sock *sk, u32 seq, u64 rcd_sn);
+};
+
struct tls_offload_context_rx {
/* sw must be the first member of tls_offload_context_rx */
struct tls_sw_context_rx sw;
@@ -306,7 +334,7 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_device_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
-void tls_device_sk_destruct(struct sock *sk);
+void tls_device_free_resources_tx(struct sock *sk);
void tls_device_init(void);
void tls_device_cleanup(void);
int tls_tx_records(struct sock *sk, int flags);
@@ -324,12 +352,12 @@ static inline u32 tls_record_start_seq(struct tls_record_info *rec)
return rec->end_seq - rec->len;
}
-void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
int tls_push_sg(struct sock *sk, struct tls_context *ctx,
struct scatterlist *sg, u16 first_offset,
int flags);
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
int flags);
+bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
{
@@ -379,7 +407,7 @@ tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
{
#ifdef CONFIG_SOCK_VALIDATE_XMIT
- return sk_fullsock(sk) &
+ return sk_fullsock(sk) &&
(smp_load_acquire(&sk->sk_validate_xmit_skb) ==
&tls_validate_xmit_skb);
#else
@@ -534,7 +562,7 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
- atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
+ atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1);
}
diff --git a/include/net/udp.h b/include/net/udp.h
index fd6d948755c8..d8ce937bc395 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -269,13 +269,13 @@ void skb_consume_udp(struct sock *sk, struct sk_buff *skb, int len);
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb);
void udp_skb_destructor(struct sock *sk, struct sk_buff *skb);
struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
- int noblock, int *peeked, int *off, int *err);
+ int noblock, int *off, int *err);
static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags,
int noblock, int *err)
{
- int peeked, off = 0;
+ int off = 0;
- return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err);
+ return __skb_recv_udp(sk, flags, noblock, &off, err);
}
int udp_v4_early_demux(struct sk_buff *skb);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index b8137953fea3..4b1f95e08307 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -7,7 +7,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
-#include <net/addrconf.h>
+#include <net/ipv6_stubs.h>
#endif
struct udp_port_cfg {
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 00254a58824b..83b5999a2587 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -8,6 +8,8 @@
#include <net/rtnetlink.h>
#include <net/switchdev.h>
+#define IANA_VXLAN_UDP_PORT 4789
+
/* VXLAN protocol (RFC 7348) header:
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |R|R|R|R|I|R|R|R| Reserved |
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 61cf7dbb6782..d074b6d60f8a 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -36,7 +36,6 @@ struct xdp_umem {
u32 headroom;
u32 chunk_size_nohr;
struct user_struct *user;
- struct pid *pid;
unsigned long address;
refcount_t users;
struct work_struct work;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 85386becbaea..a2907873ed56 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -132,6 +132,17 @@ struct xfrm_state_offload {
u8 flags;
};
+struct xfrm_mode {
+ u8 encap;
+ u8 family;
+ u8 flags;
+};
+
+/* Flags for xfrm_mode. */
+enum {
+ XFRM_MODE_FLAG_TUNNEL = 1,
+};
+
/* Full description of state of transformer. */
struct xfrm_state {
possible_net_t xs_net;
@@ -219,7 +230,7 @@ struct xfrm_state {
struct xfrm_stats stats;
struct xfrm_lifetime_cur curlft;
- struct tasklet_hrtimer mtimer;
+ struct hrtimer mtimer;
struct xfrm_state_offload xso;
@@ -234,9 +245,9 @@ struct xfrm_state {
/* Reference to data common to all the instances of this
* transformer. */
const struct xfrm_type *type;
- struct xfrm_mode *inner_mode;
- struct xfrm_mode *inner_mode_iaf;
- struct xfrm_mode *outer_mode;
+ struct xfrm_mode inner_mode;
+ struct xfrm_mode inner_mode_iaf;
+ struct xfrm_mode outer_mode;
const struct xfrm_type_offload *type_offload;
@@ -295,7 +306,8 @@ struct xfrm_replay {
};
struct xfrm_if_cb {
- struct xfrm_if *(*decode_session)(struct sk_buff *skb);
+ struct xfrm_if *(*decode_session)(struct sk_buff *skb,
+ unsigned short family);
};
void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
@@ -315,13 +327,6 @@ struct xfrm_policy_afinfo {
xfrm_address_t *saddr,
xfrm_address_t *daddr,
u32 mark);
- void (*decode_session)(struct sk_buff *skb,
- struct flowi *fl,
- int reverse);
- int (*get_tos)(const struct flowi *fl);
- int (*init_path)(struct xfrm_dst *path,
- struct dst_entry *dst,
- int nfheader_len);
int (*fill_dst)(struct xfrm_dst *xdst,
struct net_device *dev,
const struct flowi *fl);
@@ -347,7 +352,6 @@ struct xfrm_state_afinfo {
struct module *owner;
const struct xfrm_type *type_map[IPPROTO_MAX];
const struct xfrm_type_offload *type_offload_map[IPPROTO_MAX];
- struct xfrm_mode *mode_map[XFRM_MODE_MAX];
int (*init_flags)(struct xfrm_state *x);
void (*init_tempsel)(struct xfrm_selector *sel,
@@ -422,78 +426,6 @@ struct xfrm_type_offload {
int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
-struct xfrm_mode {
- /*
- * Remove encapsulation header.
- *
- * The IP header will be moved over the top of the encapsulation
- * header.
- *
- * On entry, the transport header shall point to where the IP header
- * should be and the network header shall be set to where the IP
- * header currently is. skb->data shall point to the start of the
- * payload.
- */
- int (*input2)(struct xfrm_state *x, struct sk_buff *skb);
-
- /*
- * This is the actual input entry point.
- *
- * For transport mode and equivalent this would be identical to
- * input2 (which does not need to be set). While tunnel mode
- * and equivalent would set this to the tunnel encapsulation function
- * xfrm4_prepare_input that would in turn call input2.
- */
- int (*input)(struct xfrm_state *x, struct sk_buff *skb);
-
- /*
- * Add encapsulation header.
- *
- * On exit, the transport header will be set to the start of the
- * encapsulation header to be filled in by x->type->output and
- * the mac header will be set to the nextheader (protocol for
- * IPv4) field of the extension header directly preceding the
- * encapsulation header, or in its absence, that of the top IP
- * header. The value of the network header will always point
- * to the top IP header while skb->data will point to the payload.
- */
- int (*output2)(struct xfrm_state *x,struct sk_buff *skb);
-
- /*
- * This is the actual output entry point.
- *
- * For transport mode and equivalent this would be identical to
- * output2 (which does not need to be set). While tunnel mode
- * and equivalent would set this to a tunnel encapsulation function
- * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn
- * call output2.
- */
- int (*output)(struct xfrm_state *x, struct sk_buff *skb);
-
- /*
- * Adjust pointers into the packet and do GSO segmentation.
- */
- struct sk_buff *(*gso_segment)(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features);
-
- /*
- * Adjust pointers into the packet when IPsec is done at layer2.
- */
- void (*xmit)(struct xfrm_state *x, struct sk_buff *skb);
-
- struct xfrm_state_afinfo *afinfo;
- struct module *owner;
- unsigned int encap;
- int flags;
-};
-
-/* Flags for xfrm_mode. */
-enum {
- XFRM_MODE_FLAG_TUNNEL = 1,
-};
-
-int xfrm_register_mode(struct xfrm_mode *mode, int family);
-int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
-
static inline int xfrm_af2proto(unsigned int family)
{
switch(family) {
@@ -506,13 +438,13 @@ static inline int xfrm_af2proto(unsigned int family)
}
}
-static inline struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
+static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
{
if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
(ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
- return x->inner_mode;
+ return &x->inner_mode;
else
- return x->inner_mode_iaf;
+ return &x->inner_mode_iaf;
}
struct xfrm_tmpl {
@@ -1404,6 +1336,23 @@ static inline int xfrm_state_kern(const struct xfrm_state *x)
return atomic_read(&x->tunnel_users);
}
+static inline bool xfrm_id_proto_valid(u8 proto)
+{
+ switch (proto) {
+ case IPPROTO_AH:
+ case IPPROTO_ESP:
+ case IPPROTO_COMP:
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ROUTING:
+ case IPPROTO_DSTOPTS:
+#endif
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
{
return (!userproto || proto == userproto ||
@@ -1605,7 +1554,6 @@ int xfrm_init_replay(struct xfrm_state *x);
int xfrm_state_mtu(struct xfrm_state *x, int mtu);
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
int xfrm_init_state(struct xfrm_state *x);
-int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
int xfrm_trans_queue(struct sk_buff *skb,
@@ -1613,7 +1561,11 @@ int xfrm_trans_queue(struct sk_buff *skb,
struct sk_buff *));
int xfrm_output_resume(struct sk_buff *skb, int err);
int xfrm_output(struct sock *sk, struct sk_buff *skb);
-int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+
+#if IS_ENABLED(CONFIG_NET_PKTGEN)
+int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
+#endif
+
void xfrm_local_error(struct sk_buff *skb, int mtu);
int xfrm4_extract_header(struct sk_buff *skb);
int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
@@ -1632,10 +1584,8 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
}
int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
-int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
@@ -1651,7 +1601,6 @@ int xfrm6_rcv(struct sk_buff *skb);
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
xfrm_address_t *saddr, u8 proto);
void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
-int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
@@ -1659,7 +1608,6 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
__be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
@@ -2051,7 +1999,7 @@ static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
tunnel = true;
break;
}
- if (tunnel && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL))
+ if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
return -EINVAL;
return 0;
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index 62e990b620aa..870b5e6c06db 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -54,6 +54,10 @@ const struct ib_gid_attr *rdma_find_gid_by_filter(
void *),
void *context);
+int rdma_read_gid_l2_fields(const struct ib_gid_attr *attr,
+ u16 *vlan_id, u8 *smac);
+struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
+
/**
* ib_get_cached_pkey - Returns a cached PKey table entry
* @device: The device to query.
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 79ba8219e7dc..eea946fcc819 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -198,7 +198,7 @@ struct ib_sa_hdr {
__be16 attr_offset;
__be16 reserved;
ib_sa_comp_mask comp_mask;
-} __attribute__ ((packed));
+} __packed;
struct ib_mad {
struct ib_mad_hdr mad_hdr;
@@ -227,7 +227,7 @@ struct ib_sa_mad {
struct ib_rmpp_hdr rmpp_hdr;
struct ib_sa_hdr sa_hdr;
u8 data[IB_MGMT_SA_DATA];
-} __attribute__ ((packed));
+} __packed;
struct ib_vendor_mad {
struct ib_mad_hdr mad_hdr;
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index b439e988408e..7be0028f155c 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -61,7 +61,7 @@ struct ib_smp {
u8 data[IB_SMP_DATA_SIZE];
u8 initial_path[IB_SMP_MAX_PATH_HOPS];
u8 return_path[IB_SMP_MAX_PATH_HOPS];
-} __attribute__ ((packed));
+} __packed;
#define IB_SMP_DIRECTION cpu_to_be16(0x8000)
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 73af05db04c7..040d853077c6 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -48,12 +48,11 @@ struct ib_umem {
unsigned long address;
int page_shift;
u32 writable : 1;
- u32 hugetlb : 1;
u32 is_odp : 1;
struct work_struct work;
struct sg_table sg_head;
int nmap;
- int npages;
+ unsigned int sg_nents;
};
/* Returns the offset of the umem start relative to the first page. */
@@ -87,6 +86,9 @@ void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
size_t length);
+unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
+ unsigned long pgsz_bitmap,
+ unsigned long virt);
#else /* CONFIG_INFINIBAND_USER_MEM */
@@ -104,6 +106,12 @@ static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offs
size_t length) {
return -EINVAL;
}
+static inline int ib_umem_find_best_pgsz(struct ib_umem *umem,
+ unsigned long pgsz_bitmap,
+ unsigned long virt) {
+ return -EINVAL;
+}
+
#endif /* CONFIG_INFINIBAND_USER_MEM */
#endif /* IB_UMEM_H */
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index dadc96dea39c..eeec4e53c448 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -69,6 +69,7 @@ struct ib_umem_odp {
int notifiers_seq;
int notifiers_count;
+ int npages;
/* Tree tracking */
struct umem_odp_node interval_tree;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 9b9e17bcc201..0742095355f2 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -59,6 +59,8 @@
#include <linux/mmu_notifier.h>
#include <linux/uaccess.h>
#include <linux/cgroup_rdma.h>
+#include <linux/irqflags.h>
+#include <linux/preempt.h>
#include <uapi/rdma/ib_user_verbs.h>
#include <rdma/restrack.h>
#include <uapi/rdma/rdma_user_ioctl.h>
@@ -72,6 +74,36 @@ extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
extern struct workqueue_struct *ib_comp_unbound_wq;
+__printf(3, 4) __cold
+void ibdev_printk(const char *level, const struct ib_device *ibdev,
+ const char *format, ...);
+__printf(2, 3) __cold
+void ibdev_emerg(const struct ib_device *ibdev, const char *format, ...);
+__printf(2, 3) __cold
+void ibdev_alert(const struct ib_device *ibdev, const char *format, ...);
+__printf(2, 3) __cold
+void ibdev_crit(const struct ib_device *ibdev, const char *format, ...);
+__printf(2, 3) __cold
+void ibdev_err(const struct ib_device *ibdev, const char *format, ...);
+__printf(2, 3) __cold
+void ibdev_warn(const struct ib_device *ibdev, const char *format, ...);
+__printf(2, 3) __cold
+void ibdev_notice(const struct ib_device *ibdev, const char *format, ...);
+__printf(2, 3) __cold
+void ibdev_info(const struct ib_device *ibdev, const char *format, ...);
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define ibdev_dbg(__dev, format, args...) \
+ dynamic_ibdev_dbg(__dev, format, ##args)
+#elif defined(DEBUG)
+#define ibdev_dbg(__dev, format, args...) \
+ ibdev_printk(KERN_DEBUG, __dev, format, ##args)
+#else
+__printf(2, 3) __cold
+static inline
+void ibdev_dbg(const struct ib_device *ibdev, const char *format, ...) {}
+#endif
+
union ib_gid {
u8 raw[16];
struct {
@@ -92,7 +124,7 @@ enum ib_gid_type {
#define ROCE_V2_UDP_DPORT 4791
struct ib_gid_attr {
- struct net_device *ndev;
+ struct net_device __rcu *ndev;
struct ib_device *device;
union ib_gid gid;
enum ib_gid_type gid_type;
@@ -108,6 +140,7 @@ enum rdma_node_type {
RDMA_NODE_RNIC,
RDMA_NODE_USNIC,
RDMA_NODE_USNIC_UDP,
+ RDMA_NODE_UNSPECIFIED,
};
enum {
@@ -119,7 +152,8 @@ enum rdma_transport_type {
RDMA_TRANSPORT_IB,
RDMA_TRANSPORT_IWARP,
RDMA_TRANSPORT_USNIC,
- RDMA_TRANSPORT_USNIC_UDP
+ RDMA_TRANSPORT_USNIC_UDP,
+ RDMA_TRANSPORT_UNSPECIFIED,
};
enum rdma_protocol_type {
@@ -2189,8 +2223,6 @@ struct ib_cache {
struct ib_event_handler event_handler;
};
-struct iw_cm_verbs;
-
struct ib_port_immutable {
int pkey_tbl_len;
int gid_tbl_len;
@@ -2272,6 +2304,8 @@ struct ib_counters_read_attr {
};
struct uverbs_attr_bundle;
+struct iw_cm_id;
+struct iw_cm_conn_param;
#define INIT_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
.size_##ib_struct = \
@@ -2281,8 +2315,11 @@ struct uverbs_attr_bundle;
!__same_type(((struct drv_struct *)NULL)->member, \
struct ib_struct)))
+#define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \
+ ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp))
+
#define rdma_zalloc_drv_obj(ib_dev, ib_type) \
- ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, GFP_KERNEL))
+ rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL)
#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
@@ -2394,23 +2431,21 @@ struct ib_device_ops {
void (*dealloc_ucontext)(struct ib_ucontext *context);
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
- int (*alloc_pd)(struct ib_pd *pd, struct ib_ucontext *context,
- struct ib_udata *udata);
- void (*dealloc_pd)(struct ib_pd *pd);
- struct ib_ah *(*create_ah)(struct ib_pd *pd,
- struct rdma_ah_attr *ah_attr, u32 flags,
- struct ib_udata *udata);
+ int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
+ void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
+ int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
+ u32 flags, struct ib_udata *udata);
int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
- int (*destroy_ah)(struct ib_ah *ah, u32 flags);
- struct ib_srq *(*create_srq)(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr,
- struct ib_udata *udata);
+ void (*destroy_ah)(struct ib_ah *ah, u32 flags);
+ int (*create_srq)(struct ib_srq *srq,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
int (*modify_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
enum ib_srq_attr_mask srq_attr_mask,
struct ib_udata *udata);
int (*query_srq)(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
- int (*destroy_srq)(struct ib_srq *srq);
+ void (*destroy_srq)(struct ib_srq *srq, struct ib_udata *udata);
struct ib_qp *(*create_qp)(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
@@ -2418,13 +2453,12 @@ struct ib_device_ops {
int qp_attr_mask, struct ib_udata *udata);
int (*query_qp)(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
- int (*destroy_qp)(struct ib_qp *qp);
+ int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
struct ib_cq *(*create_cq)(struct ib_device *device,
const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
struct ib_udata *udata);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
- int (*destroy_cq)(struct ib_cq *cq);
+ int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
@@ -2433,9 +2467,9 @@ struct ib_device_ops {
int (*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
struct ib_pd *pd, struct ib_udata *udata);
- int (*dereg_mr)(struct ib_mr *mr);
+ int (*dereg_mr)(struct ib_mr *mr, struct ib_udata *udata);
struct ib_mr *(*alloc_mr)(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg);
+ u32 max_num_sg, struct ib_udata *udata);
int (*advise_mr)(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice, u32 flags,
struct ib_sge *sg_list, u32 num_sge,
@@ -2456,9 +2490,8 @@ struct ib_device_ops {
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
- struct ib_ucontext *ucontext,
struct ib_udata *udata);
- int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
+ int (*dealloc_xrcd)(struct ib_xrcd *xrcd, struct ib_udata *udata);
struct ib_flow *(*create_flow)(struct ib_qp *qp,
struct ib_flow_attr *flow_attr,
int domain, struct ib_udata *udata);
@@ -2483,7 +2516,7 @@ struct ib_device_ops {
struct ib_wq *(*create_wq)(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr,
struct ib_udata *udata);
- int (*destroy_wq)(struct ib_wq *wq);
+ int (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata);
int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask, struct ib_udata *udata);
struct ib_rwq_ind_table *(*create_rwq_ind_table)(
@@ -2495,7 +2528,7 @@ struct ib_device_ops {
struct ib_ucontext *context,
struct ib_dm_alloc_attr *attr,
struct uverbs_attr_bundle *attrs);
- int (*dealloc_dm)(struct ib_dm *dm);
+ int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
@@ -2550,12 +2583,37 @@ struct ib_device_ops {
*/
void (*dealloc_driver)(struct ib_device *dev);
+ /* iWarp CM callbacks */
+ void (*iw_add_ref)(struct ib_qp *qp);
+ void (*iw_rem_ref)(struct ib_qp *qp);
+ struct ib_qp *(*iw_get_qp)(struct ib_device *device, int qpn);
+ int (*iw_connect)(struct iw_cm_id *cm_id,
+ struct iw_cm_conn_param *conn_param);
+ int (*iw_accept)(struct iw_cm_id *cm_id,
+ struct iw_cm_conn_param *conn_param);
+ int (*iw_reject)(struct iw_cm_id *cm_id, const void *pdata,
+ u8 pdata_len);
+ int (*iw_create_listen)(struct iw_cm_id *cm_id, int backlog);
+ int (*iw_destroy_listen)(struct iw_cm_id *cm_id);
+
+ DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
+ DECLARE_RDMA_OBJ_SIZE(ib_srq);
DECLARE_RDMA_OBJ_SIZE(ib_ucontext);
};
-struct rdma_restrack_root;
+struct ib_core_device {
+ /* device must be the first element in structure until,
+ * union of ib_core_device and device exists in ib_device.
+ */
+ struct device dev;
+ possible_net_t rdma_net;
+ struct kobject *ports_kobj;
+ struct list_head port_list;
+ struct ib_device *owner; /* reach back to owner ib_device */
+};
+struct rdma_restrack_root;
struct ib_device {
/* Do not access @dma_device directly from ULP nor from HW drivers. */
struct device *dma_device;
@@ -2578,19 +2636,18 @@ struct ib_device {
int num_comp_vectors;
- struct iw_cm_verbs *iwcm;
-
struct module *owner;
- struct device dev;
+ union {
+ struct device dev;
+ struct ib_core_device coredev;
+ };
+
/* First group for device attributes,
* Second group for driver provided attributes (optional).
* It is NULL terminated array.
*/
const struct attribute_group *groups[3];
- struct kobject *ports_kobj;
- struct list_head port_list;
-
int uverbs_abi_ver;
u64 uverbs_cmd_mask;
u64 uverbs_ex_cmd_mask;
@@ -2626,6 +2683,15 @@ struct ib_device {
struct work_struct unregistration_work;
const struct rdma_link_ops *link_ops;
+
+ /* Protects compat_devs xarray modifications */
+ struct mutex compat_devs_mutex;
+ /* Maintains compat devices for each net namespace */
+ struct xarray compat_devs;
+
+ /* Used by iWarp CM */
+ char iw_ifname[IFNAMSIZ];
+ u32 iw_driver_flags;
};
struct ib_client {
@@ -2662,6 +2728,21 @@ struct ib_client {
u8 no_kverbs_req:1;
};
+/*
+ * IB block DMA iterator
+ *
+ * Iterates the DMA-mapped SGL in contiguous memory blocks aligned
+ * to a HW supported page size.
+ */
+struct ib_block_iter {
+ /* internal states */
+ struct scatterlist *__sg; /* sg holding the current aligned block */
+ dma_addr_t __dma_addr; /* unaligned DMA address of this block */
+ unsigned int __sg_nents; /* number of SG entries */
+ unsigned int __sg_advance; /* number of bytes to advance in sg in next step */
+ unsigned int __pg_bit; /* alignment of current block */
+};
+
struct ib_device *_ib_alloc_device(size_t size);
#define ib_alloc_device(drv_struct, member) \
container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
@@ -2682,6 +2763,38 @@ void ib_unregister_device_queued(struct ib_device *ib_dev);
int ib_register_client (struct ib_client *client);
void ib_unregister_client(struct ib_client *client);
+void __rdma_block_iter_start(struct ib_block_iter *biter,
+ struct scatterlist *sglist,
+ unsigned int nents,
+ unsigned long pgsz);
+bool __rdma_block_iter_next(struct ib_block_iter *biter);
+
+/**
+ * rdma_block_iter_dma_address - get the aligned dma address of the current
+ * block held by the block iterator.
+ * @biter: block iterator holding the memory block
+ */
+static inline dma_addr_t
+rdma_block_iter_dma_address(struct ib_block_iter *biter)
+{
+ return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1);
+}
+
+/**
+ * rdma_for_each_block - iterate over contiguous memory blocks of the sg list
+ * @sglist: sglist to iterate over
+ * @biter: block iterator holding the memory block
+ * @nents: maximum number of sg entries to iterate over
+ * @pgsz: best HW supported page size to use
+ *
+ * Callers may use rdma_block_iter_dma_address() to get each
+ * blocks aligned DMA address.
+ */
+#define rdma_for_each_block(sglist, biter, nents, pgsz) \
+ for (__rdma_block_iter_start(biter, sglist, nents, \
+ pgsz); \
+ __rdma_block_iter_next(biter);)
+
/**
* ib_get_client_data - Get IB client context
* @device:Device to get context for
@@ -2705,9 +2818,6 @@ void ib_set_device_ops(struct ib_device *device,
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
unsigned long pfn, unsigned long size, pgprot_t prot);
-int rdma_user_mmap_page(struct ib_ucontext *ucontext,
- struct vm_area_struct *vma, struct page *page,
- unsigned long size);
#else
static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
struct vm_area_struct *vma,
@@ -2716,12 +2826,6 @@ static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
{
return -EINVAL;
}
-static inline int rdma_user_mmap_page(struct ib_ucontext *ucontext,
- struct vm_area_struct *vma, struct page *page,
- unsigned long size)
-{
- return -EINVAL;
-}
#endif
static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
@@ -2978,8 +3082,8 @@ static inline bool rdma_cap_ib_mad(const struct ib_device *device, u8 port_num)
*/
static inline bool rdma_cap_opa_mad(struct ib_device *device, u8 port_num)
{
- return (device->port_data[port_num].immutable.core_cap_flags &
- RDMA_CORE_CAP_OPA_MAD) == RDMA_CORE_CAP_OPA_MAD;
+ return device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_CAP_OPA_MAD;
}
/**
@@ -3195,6 +3299,30 @@ static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
return rdma_protocol_iwarp(dev, port_num);
}
+/**
+ * rdma_find_pg_bit - Find page bit given address and HW supported page sizes
+ *
+ * @addr: address
+ * @pgsz_bitmap: bitmap of HW supported page sizes
+ */
+static inline unsigned int rdma_find_pg_bit(unsigned long addr,
+ unsigned long pgsz_bitmap)
+{
+ unsigned long align;
+ unsigned long pgsz;
+
+ align = addr & -addr;
+
+ /* Find page bit such that addr is aligned to the highest supported
+ * HW page size
+ */
+ pgsz = pgsz_bitmap & ~(-align << 1);
+ if (!pgsz)
+ return __ffs(pgsz_bitmap);
+
+ return __fls(pgsz);
+}
+
int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
int state);
int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
@@ -3236,9 +3364,27 @@ enum ib_pd_flags {
struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
const char *caller);
+
#define ib_alloc_pd(device, flags) \
__ib_alloc_pd((device), (flags), KBUILD_MODNAME)
-void ib_dealloc_pd(struct ib_pd *pd);
+
+/**
+ * ib_dealloc_pd_user - Deallocate kernel/user PD
+ * @pd: The protection domain
+ * @udata: Valid user data or NULL for kernel objects
+ */
+void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata);
+
+/**
+ * ib_dealloc_pd - Deallocate kernel PD
+ * @pd: The protection domain
+ *
+ * NOTE: for user PD use ib_dealloc_pd_user with valid udata!
+ */
+static inline void ib_dealloc_pd(struct ib_pd *pd)
+{
+ ib_dealloc_pd_user(pd, NULL);
+}
enum rdma_create_ah_flags {
/* In a sleepable context */
@@ -3351,11 +3497,24 @@ enum rdma_destroy_ah_flags {
};
/**
- * rdma_destroy_ah - Destroys an address handle.
+ * rdma_destroy_ah_user - Destroys an address handle.
* @ah: The address handle to destroy.
* @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
+ * @udata: Valid user data or NULL for kernel objects
*/
-int rdma_destroy_ah(struct ib_ah *ah, u32 flags);
+int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata);
+
+/**
+ * rdma_destroy_ah - Destroys an kernel address handle.
+ * @ah: The address handle to destroy.
+ * @flags: Destroy address handle flags (see enum rdma_destroy_ah_flags).
+ *
+ * NOTE: for user ah use rdma_destroy_ah_user with valid udata!
+ */
+static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
+{
+ return rdma_destroy_ah_user(ah, flags, NULL);
+}
/**
* ib_create_srq - Creates a SRQ associated with the specified protection
@@ -3399,10 +3558,22 @@ int ib_query_srq(struct ib_srq *srq,
struct ib_srq_attr *srq_attr);
/**
- * ib_destroy_srq - Destroys the specified SRQ.
+ * ib_destroy_srq_user - Destroys the specified SRQ.
* @srq: The SRQ to destroy.
+ * @udata: Valid user data or NULL for kernel objects
*/
-int ib_destroy_srq(struct ib_srq *srq);
+int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata);
+
+/**
+ * ib_destroy_srq - Destroys the specified kernel SRQ.
+ * @srq: The SRQ to destroy.
+ *
+ * NOTE: for user srq use ib_destroy_srq_user with valid udata!
+ */
+static inline int ib_destroy_srq(struct ib_srq *srq)
+{
+ return ib_destroy_srq_user(srq, NULL);
+}
/**
* ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
@@ -3422,15 +3593,34 @@ static inline int ib_post_srq_recv(struct ib_srq *srq,
}
/**
- * ib_create_qp - Creates a QP associated with the specified protection
+ * ib_create_qp_user - Creates a QP associated with the specified protection
* domain.
* @pd: The protection domain associated with the QP.
* @qp_init_attr: A list of initial attributes required to create the
* QP. If QP creation succeeds, then the attributes are updated to
* the actual capabilities of the created QP.
+ * @udata: Valid user data or NULL for kernel objects
*/
-struct ib_qp *ib_create_qp(struct ib_pd *pd,
- struct ib_qp_init_attr *qp_init_attr);
+struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata);
+
+/**
+ * ib_create_qp - Creates a kernel QP associated with the specified protection
+ * domain.
+ * @pd: The protection domain associated with the QP.
+ * @qp_init_attr: A list of initial attributes required to create the
+ * QP. If QP creation succeeds, then the attributes are updated to
+ * the actual capabilities of the created QP.
+ * @udata: Valid user data or NULL for kernel objects
+ *
+ * NOTE: for user qp use ib_create_qp_user with valid udata!
+ */
+static inline struct ib_qp *ib_create_qp(struct ib_pd *pd,
+ struct ib_qp_init_attr *qp_init_attr)
+{
+ return ib_create_qp_user(pd, qp_init_attr, NULL);
+}
/**
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
@@ -3480,8 +3670,20 @@ int ib_query_qp(struct ib_qp *qp,
/**
* ib_destroy_qp - Destroys the specified QP.
* @qp: The QP to destroy.
+ * @udata: Valid udata or NULL for kernel objects
*/
-int ib_destroy_qp(struct ib_qp *qp);
+int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata);
+
+/**
+ * ib_destroy_qp - Destroys the specified kernel QP.
+ * @qp: The QP to destroy.
+ *
+ * NOTE: for user qp use ib_destroy_qp_user with valid udata!
+ */
+static inline int ib_destroy_qp(struct ib_qp *qp)
+{
+ return ib_destroy_qp_user(qp, NULL);
+}
/**
* ib_open_qp - Obtain a reference to an existing sharable QP.
@@ -3541,13 +3743,66 @@ static inline int ib_post_recv(struct ib_qp *qp,
return qp->device->ops.post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
}
-struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
- int nr_cqe, int comp_vector,
- enum ib_poll_context poll_ctx, const char *caller);
-#define ib_alloc_cq(device, priv, nr_cqe, comp_vect, poll_ctx) \
- __ib_alloc_cq((device), (priv), (nr_cqe), (comp_vect), (poll_ctx), KBUILD_MODNAME)
+struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx,
+ const char *caller, struct ib_udata *udata);
+
+/**
+ * ib_alloc_cq_user: Allocate kernel/user CQ
+ * @dev: The IB device
+ * @private: Private data attached to the CQE
+ * @nr_cqe: Number of CQEs in the CQ
+ * @comp_vector: Completion vector used for the IRQs
+ * @poll_ctx: Context used for polling the CQ
+ * @udata: Valid user data or NULL for kernel objects
+ */
+static inline struct ib_cq *ib_alloc_cq_user(struct ib_device *dev,
+ void *private, int nr_cqe,
+ int comp_vector,
+ enum ib_poll_context poll_ctx,
+ struct ib_udata *udata)
+{
+ return __ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
+ KBUILD_MODNAME, udata);
+}
+
+/**
+ * ib_alloc_cq: Allocate kernel CQ
+ * @dev: The IB device
+ * @private: Private data attached to the CQE
+ * @nr_cqe: Number of CQEs in the CQ
+ * @comp_vector: Completion vector used for the IRQs
+ * @poll_ctx: Context used for polling the CQ
+ *
+ * NOTE: for user cq use ib_alloc_cq_user with valid udata!
+ */
+static inline struct ib_cq *ib_alloc_cq(struct ib_device *dev, void *private,
+ int nr_cqe, int comp_vector,
+ enum ib_poll_context poll_ctx)
+{
+ return ib_alloc_cq_user(dev, private, nr_cqe, comp_vector, poll_ctx,
+ NULL);
+}
+
+/**
+ * ib_free_cq_user - Free kernel/user CQ
+ * @cq: The CQ to free
+ * @udata: Valid user data or NULL for kernel objects
+ */
+void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
+
+/**
+ * ib_free_cq - Free kernel CQ
+ * @cq: The CQ to free
+ *
+ * NOTE: for user cq use ib_free_cq_user with valid udata!
+ */
+static inline void ib_free_cq(struct ib_cq *cq)
+{
+ ib_free_cq_user(cq, NULL);
+}
-void ib_free_cq(struct ib_cq *cq);
int ib_process_cq_direct(struct ib_cq *cq, int budget);
/**
@@ -3591,10 +3846,22 @@ int ib_resize_cq(struct ib_cq *cq, int cqe);
int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period);
/**
- * ib_destroy_cq - Destroys the specified CQ.
+ * ib_destroy_cq_user - Destroys the specified CQ.
* @cq: The CQ to destroy.
+ * @udata: Valid user data or NULL for kernel objects
*/
-int ib_destroy_cq(struct ib_cq *cq);
+int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata);
+
+/**
+ * ib_destroy_cq - Destroys the specified kernel CQ.
+ * @cq: The CQ to destroy.
+ *
+ * NOTE: for user cq use ib_destroy_cq_user with valid udata!
+ */
+static inline int ib_destroy_cq(struct ib_cq *cq)
+{
+ return ib_destroy_cq_user(cq, NULL);
+}
/**
* ib_poll_cq - poll a CQ for completion(s)
@@ -3848,17 +4115,37 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
}
/**
- * ib_dereg_mr - Deregisters a memory region and removes it from the
+ * ib_dereg_mr_user - Deregisters a memory region and removes it from the
+ * HCA translation table.
+ * @mr: The memory region to deregister.
+ * @udata: Valid user data or NULL for kernel object
+ *
+ * This function can fail, if the memory region has memory windows bound to it.
+ */
+int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata);
+
+/**
+ * ib_dereg_mr - Deregisters a kernel memory region and removes it from the
* HCA translation table.
* @mr: The memory region to deregister.
*
* This function can fail, if the memory region has memory windows bound to it.
+ *
+ * NOTE: for user mr use ib_dereg_mr_user with valid udata!
*/
-int ib_dereg_mr(struct ib_mr *mr);
+static inline int ib_dereg_mr(struct ib_mr *mr)
+{
+ return ib_dereg_mr_user(mr, NULL);
+}
+
+struct ib_mr *ib_alloc_mr_user(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg, struct ib_udata *udata);
-struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
- enum ib_mr_type mr_type,
- u32 max_num_sg);
+static inline struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
+ enum ib_mr_type mr_type, u32 max_num_sg)
+{
+ return ib_alloc_mr_user(pd, mr_type, max_num_sg, NULL);
+}
/**
* ib_update_fast_reg_key - updates the key portion of the fast_reg MR
@@ -3956,8 +4243,9 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
/**
* ib_dealloc_xrcd - Deallocates an XRC domain.
* @xrcd: The XRC domain to deallocate.
+ * @udata: Valid user data or NULL for kernel object
*/
-int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
+int ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
static inline int ib_check_mr_access(int flags)
{
@@ -4033,7 +4321,7 @@ struct net_device *ib_device_netdev(struct ib_device *dev, u8 port);
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
-int ib_destroy_wq(struct ib_wq *wq);
+int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr,
u32 wq_attr_mask);
struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
@@ -4349,7 +4637,10 @@ rdma_set_device_sysfs_group(struct ib_device *dev,
*/
static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
{
- return container_of(device, struct ib_device, dev);
+ struct ib_core_device *coredev =
+ container_of(device, struct ib_core_device, dev);
+
+ return coredev->owner;
}
/**
@@ -4362,4 +4653,7 @@ static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
*/
#define rdma_device_to_drv_device(dev, drv_dev_struct, ibdev_member) \
container_of(rdma_device_to_ibdev(dev), drv_dev_struct, ibdev_member)
+
+bool rdma_dev_access_netns(const struct ib_device *device,
+ const struct net *net);
#endif /* IB_VERBS_H */
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 0e1f02815643..5aa8a9c76aa0 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -118,31 +118,6 @@ enum iw_flags {
IW_F_NO_PORT_MAP = (1 << 0),
};
-struct iw_cm_verbs {
- void (*add_ref)(struct ib_qp *qp);
-
- void (*rem_ref)(struct ib_qp *qp);
-
- struct ib_qp * (*get_qp)(struct ib_device *device,
- int qpn);
-
- int (*connect)(struct iw_cm_id *cm_id,
- struct iw_cm_conn_param *conn_param);
-
- int (*accept)(struct iw_cm_id *cm_id,
- struct iw_cm_conn_param *conn_param);
-
- int (*reject)(struct iw_cm_id *cm_id,
- const void *pdata, u8 pdata_len);
-
- int (*create_listen)(struct iw_cm_id *cm_id,
- int backlog);
-
- int (*destroy_listen)(struct iw_cm_id *cm_id);
- char ifname[IFNAMSIZ];
- enum iw_flags driver_flags;
-};
-
/**
* iw_create_cm_id - Create an IW CM identifier.
*
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index b4f0ac02f283..7147a9263011 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -413,6 +413,6 @@ struct opa_port_info {
u8 local_port_num;
u8 reserved12;
u8 reserved13; /* was guid_cap */
-} __attribute__ ((packed));
+} __packed;
#endif /* OPA_PORT_INFO_H */
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
index f7896117936e..c7b2ef12792d 100644
--- a/include/rdma/opa_smi.h
+++ b/include/rdma/opa_smi.h
@@ -98,7 +98,7 @@ struct opa_smp {
struct opa_node_description {
u8 data[64];
-} __attribute__ ((packed));
+} __packed;
struct opa_node_info {
u8 base_version;
@@ -114,7 +114,7 @@ struct opa_node_info {
__be32 revision;
u8 local_port_num;
u8 vendor_id[3]; /* network byte order */
-} __attribute__ ((packed));
+} __packed;
#define OPA_PARTITION_TABLE_BLK_SIZE 32
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 4c257aff7d32..b9cd06db1a71 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -59,7 +59,6 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_mad.h>
#include <rdma/rdmavt_mr.h>
-#include <rdma/rdmavt_qp.h>
#define RVT_MAX_PKEY_VALUES 16
@@ -72,6 +71,8 @@ struct trap_list {
struct list_head list;
};
+struct rvt_qp;
+struct rvt_qpn_table;
struct rvt_ibport {
struct rvt_qp __rcu *qp[2];
struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
@@ -206,6 +207,20 @@ struct rvt_ah {
u8 log_pmtu;
};
+/*
+ * This structure is used by rvt_mmap() to validate an offset
+ * when an mmap() request is made. The vm_area_struct then uses
+ * this as its vm_private_data.
+ */
+struct rvt_mmap_info {
+ struct list_head pending_mmaps;
+ struct ib_ucontext *context;
+ void *obj;
+ __u64 offset;
+ struct kref ref;
+ u32 size;
+};
+
/* memory working set size */
struct rvt_wss {
unsigned long *entries;
@@ -501,16 +516,6 @@ static inline struct rvt_dev_info *ib_to_rvt(struct ib_device *ibdev)
return container_of(ibdev, struct rvt_dev_info, ibdev);
}
-static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
-{
- return container_of(ibsrq, struct rvt_srq, ibsrq);
-}
-
-static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
-{
- return container_of(ibqp, struct rvt_qp, ibqp);
-}
-
static inline unsigned rvt_get_npkeys(struct rvt_dev_info *rdi)
{
/*
@@ -548,57 +553,6 @@ static inline u16 rvt_get_pkey(struct rvt_dev_info *rdi,
return rdi->ports[port_index]->pkey_table[index];
}
-/**
- * rvt_lookup_qpn - return the QP with the given QPN
- * @ibp: the ibport
- * @qpn: the QP number to look up
- *
- * The caller must hold the rcu_read_lock(), and keep the lock until
- * the returned qp is no longer in use.
- */
-/* TODO: Remove this and put in rdmavt/qp.h when no longer needed by drivers */
-static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
- struct rvt_ibport *rvp,
- u32 qpn) __must_hold(RCU)
-{
- struct rvt_qp *qp = NULL;
-
- if (unlikely(qpn <= 1)) {
- qp = rcu_dereference(rvp->qp[qpn]);
- } else {
- u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
-
- for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
- qp = rcu_dereference(qp->next))
- if (qp->ibqp.qp_num == qpn)
- break;
- }
- return qp;
-}
-
-/**
- * rvt_mod_retry_timer - mod a retry timer
- * @qp - the QP
- * @shift - timeout shift to wait for multiple packets
- * Modify a potentially already running retry timer
- */
-static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
-{
- struct ib_qp *ibqp = &qp->ibqp;
- struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
-
- lockdep_assert_held(&qp->s_lock);
- qp->s_flags |= RVT_S_TIMER;
- /* 4.096 usec. * (1 << qp->timeout) */
- mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
- (qp->timeout_jiffies << shift));
-}
-
-static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
-{
- return rvt_mod_retry_timer_ext(qp, 0);
-}
-
struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
void rvt_dealloc_device(struct rvt_dev_info *rdi);
int rvt_register_device(struct rvt_dev_info *rvd, u32 driver_id);
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index f0fbd4063fef..68e38c20afc0 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -83,7 +83,6 @@
* RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
* next send completion entry not via send DMA
* RVT_S_WAIT_PIO - waiting for a send buffer to be available
- * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
* RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
* RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
* RVT_S_WAIT_KMEM - waiting for kernel memory to be available
@@ -212,20 +211,6 @@ struct rvt_rq {
};
/*
- * This structure is used by rvt_mmap() to validate an offset
- * when an mmap() request is made. The vm_area_struct then uses
- * this as its vm_private_data.
- */
-struct rvt_mmap_info {
- struct list_head pending_mmaps;
- struct ib_ucontext *context;
- void *obj;
- __u64 offset;
- struct kref ref;
- unsigned size;
-};
-
-/*
* This structure holds the information that the send tasklet needs
* to send a RDMA read response or atomic operation.
*/
@@ -399,6 +384,16 @@ struct rvt_srq {
u32 limit;
};
+static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
+{
+ return container_of(ibsrq, struct rvt_srq, ibsrq);
+}
+
+static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
+{
+ return container_of(ibqp, struct rvt_qp, ibqp);
+}
+
#define RVT_QPN_MAX BIT(24)
#define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
#define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
@@ -678,6 +673,70 @@ static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
}
+/**
+ * rvt_lookup_qpn - return the QP with the given QPN
+ * @ibp: the ibport
+ * @qpn: the QP number to look up
+ *
+ * The caller must hold the rcu_read_lock(), and keep the lock until
+ * the returned qp is no longer in use.
+ */
+static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
+ struct rvt_ibport *rvp,
+ u32 qpn) __must_hold(RCU)
+{
+ struct rvt_qp *qp = NULL;
+
+ if (unlikely(qpn <= 1)) {
+ qp = rcu_dereference(rvp->qp[qpn]);
+ } else {
+ u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
+
+ for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
+ qp = rcu_dereference(qp->next))
+ if (qp->ibqp.qp_num == qpn)
+ break;
+ }
+ return qp;
+}
+
+/**
+ * rvt_mod_retry_timer - mod a retry timer
+ * @qp - the QP
+ * @shift - timeout shift to wait for multiple packets
+ * Modify a potentially already running retry timer
+ */
+static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
+{
+ struct ib_qp *ibqp = &qp->ibqp;
+ struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
+
+ lockdep_assert_held(&qp->s_lock);
+ qp->s_flags |= RVT_S_TIMER;
+ /* 4.096 usec. * (1 << qp->timeout) */
+ mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
+ (qp->timeout_jiffies << shift));
+}
+
+static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
+{
+ return rvt_mod_retry_timer_ext(qp, 0);
+}
+
+/**
+ * rvt_put_qp_swqe - drop refs held by swqe
+ * @qp: the send qp
+ * @wqe: the send wqe
+ *
+ * This drops any references held by the swqe
+ */
+static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
+{
+ rvt_put_swqe(wqe);
+ if (qp->allowed_ops == IB_OPCODE_UD)
+ atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
+}
+
extern const int ib_rvt_state_ops[];
struct rvt_dev_info;
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 794c47565971..05eabfd5d0d3 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -48,17 +48,15 @@
#define uobj_get_type(_attrs, _object) \
uapi_get_object((_attrs)->ufile->device->uapi, _object)
-struct ib_uobject *_uobj_get_read(enum uverbs_default_objects type,
- u32 object_id,
- struct uverbs_attr_bundle *attrs);
-
#define uobj_get_read(_type, _id, _attrs) \
- _uobj_get_read(_type, _uobj_check_id(_id), _attrs)
+ rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \
+ _uobj_check_id(_id), UVERBS_LOOKUP_READ, \
+ _attrs)
#define ufd_get_read(_type, _fdnum, _attrs) \
rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \
(_fdnum)*typecheck(s32, _fdnum), \
- UVERBS_LOOKUP_READ)
+ UVERBS_LOOKUP_READ, _attrs)
static inline void *_uobj_get_obj_read(struct ib_uobject *uobj)
{
@@ -70,22 +68,19 @@ static inline void *_uobj_get_obj_read(struct ib_uobject *uobj)
((struct ib_##_object *)_uobj_get_obj_read( \
uobj_get_read(_type, _id, _attrs)))
-struct ib_uobject *_uobj_get_write(enum uverbs_default_objects type,
- u32 object_id,
- struct uverbs_attr_bundle *attrs);
-
#define uobj_get_write(_type, _id, _attrs) \
- _uobj_get_write(_type, _uobj_check_id(_id), _attrs)
+ rdma_lookup_get_uobject(uobj_get_type(_attrs, _type), (_attrs)->ufile, \
+ _uobj_check_id(_id), UVERBS_LOOKUP_WRITE, \
+ _attrs)
int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
- const struct uverbs_attr_bundle *attrs);
+ struct uverbs_attr_bundle *attrs);
#define uobj_perform_destroy(_type, _id, _attrs) \
__uobj_perform_destroy(uobj_get_type(_attrs, _type), \
_uobj_check_id(_id), _attrs)
struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
- u32 id,
- const struct uverbs_attr_bundle *attrs);
+ u32 id, struct uverbs_attr_bundle *attrs);
#define uobj_get_destroy(_type, _id, _attrs) \
__uobj_get_destroy(uobj_get_type(_attrs, _type), _uobj_check_id(_id), \
@@ -109,30 +104,31 @@ static inline void uobj_put_write(struct ib_uobject *uobj)
rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
}
-static inline int __must_check uobj_alloc_commit(struct ib_uobject *uobj)
+static inline int __must_check
+uobj_alloc_commit(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
{
- int ret = rdma_alloc_commit_uobject(uobj);
+ int ret = rdma_alloc_commit_uobject(uobj, attrs);
if (ret)
return ret;
return 0;
}
-static inline void uobj_alloc_abort(struct ib_uobject *uobj)
+static inline void uobj_alloc_abort(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs)
{
- rdma_alloc_abort_uobject(uobj);
+ rdma_alloc_abort_uobject(uobj, attrs);
}
static inline struct ib_uobject *
__uobj_alloc(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev)
{
- struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, attrs->ufile);
+ struct ib_uobject *uobj =
+ rdma_alloc_begin_uobject(obj, attrs->ufile, attrs);
- if (!IS_ERR(uobj)) {
- *ib_dev = uobj->context->device;
- attrs->context = uobj->context;
- }
+ if (!IS_ERR(uobj))
+ *ib_dev = attrs->context->device;
return uobj;
}
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index 175d761695e1..d57a5ba00c74 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -95,7 +95,8 @@ struct uverbs_obj_type_class {
void (*lookup_put)(struct ib_uobject *uobj, enum rdma_lookup_mode mode);
/* This does not consume the kref on uobj */
int __must_check (*destroy_hw)(struct ib_uobject *uobj,
- enum rdma_remove_reason why);
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs);
void (*remove_handle)(struct ib_uobject *uobj);
u8 needs_kfree_rcu;
};
@@ -126,18 +127,23 @@ struct uverbs_obj_idr_type {
* completely unchanged.
*/
int __must_check (*destroy_object)(struct ib_uobject *uobj,
- enum rdma_remove_reason why);
+ enum rdma_remove_reason why,
+ struct uverbs_attr_bundle *attrs);
};
struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
struct ib_uverbs_file *ufile, s64 id,
- enum rdma_lookup_mode mode);
+ enum rdma_lookup_mode mode,
+ struct uverbs_attr_bundle *attrs);
void rdma_lookup_put_uobject(struct ib_uobject *uobj,
enum rdma_lookup_mode mode);
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile);
-void rdma_alloc_abort_uobject(struct ib_uobject *uobj);
-int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj);
+ struct ib_uverbs_file *ufile,
+ struct uverbs_attr_bundle *attrs);
+void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs);
+int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs);
struct uverbs_obj_fd_type {
/*
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 56b2dba7d911..b08febec7895 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -224,6 +224,13 @@ struct sas_work {
struct work_struct work;
};
+/* Lots of code duplicates this in the SCSI tree, which can be factored out */
+static inline bool sas_dev_type_is_expander(enum sas_device_type type)
+{
+ return type == SAS_EDGE_EXPANDER_DEVICE ||
+ type == SAS_FANOUT_EXPANDER_DEVICE;
+}
+
static inline void INIT_SAS_WORK(struct sas_work *sw, void (*fn)(struct work_struct *))
{
INIT_WORK(&sw->work, fn);
@@ -245,9 +252,9 @@ static inline struct sas_discovery_event *to_sas_discovery_event(struct work_str
struct sas_discovery {
struct sas_discovery_event disc_work[DISC_NUM_EVENTS];
unsigned long pending;
- u8 fanout_sas_addr[8];
- u8 eeds_a[8];
- u8 eeds_b[8];
+ u8 fanout_sas_addr[SAS_ADDR_SIZE];
+ u8 eeds_a[SAS_ADDR_SIZE];
+ u8 eeds_b[SAS_ADDR_SIZE];
int max_level;
};
diff --git a/include/scsi/osd_attributes.h b/include/scsi/osd_attributes.h
deleted file mode 100644
index 8a6acd054e4e..000000000000
--- a/include/scsi/osd_attributes.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __OSD_ATTRIBUTES_H__
-#define __OSD_ATTRIBUTES_H__
-
-#include <scsi/osd_protocol.h>
-
-/*
- * Contains types and constants that define attribute pages and attribute
- * numbers and their data types.
- */
-
-#define ATTR_SET(pg, id, l, ptr) \
- { .attr_page = pg, .attr_id = id, .len = l, .val_ptr = ptr }
-
-#define ATTR_DEF(pg, id, l) ATTR_SET(pg, id, l, NULL)
-
-/* osd-r10 4.7.3 Attributes pages */
-enum {
- OSD_APAGE_OBJECT_FIRST = 0x0,
- OSD_APAGE_OBJECT_DIRECTORY = 0,
- OSD_APAGE_OBJECT_INFORMATION = 1,
- OSD_APAGE_OBJECT_QUOTAS = 2,
- OSD_APAGE_OBJECT_TIMESTAMP = 3,
- OSD_APAGE_OBJECT_COLLECTIONS = 4,
- OSD_APAGE_OBJECT_SECURITY = 5,
- OSD_APAGE_OBJECT_LAST = 0x2fffffff,
-
- OSD_APAGE_PARTITION_FIRST = 0x30000000,
- OSD_APAGE_PARTITION_DIRECTORY = OSD_APAGE_PARTITION_FIRST + 0,
- OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1,
- OSD_APAGE_PARTITION_QUOTAS = OSD_APAGE_PARTITION_FIRST + 2,
- OSD_APAGE_PARTITION_TIMESTAMP = OSD_APAGE_PARTITION_FIRST + 3,
- OSD_APAGE_PARTITION_ATTR_ACCESS = OSD_APAGE_PARTITION_FIRST + 4,
- OSD_APAGE_PARTITION_SECURITY = OSD_APAGE_PARTITION_FIRST + 5,
- OSD_APAGE_PARTITION_LAST = 0x5FFFFFFF,
-
- OSD_APAGE_COLLECTION_FIRST = 0x60000000,
- OSD_APAGE_COLLECTION_DIRECTORY = OSD_APAGE_COLLECTION_FIRST + 0,
- OSD_APAGE_COLLECTION_INFORMATION = OSD_APAGE_COLLECTION_FIRST + 1,
- OSD_APAGE_COLLECTION_TIMESTAMP = OSD_APAGE_COLLECTION_FIRST + 3,
- OSD_APAGE_COLLECTION_SECURITY = OSD_APAGE_COLLECTION_FIRST + 5,
- OSD_APAGE_COLLECTION_LAST = 0x8FFFFFFF,
-
- OSD_APAGE_ROOT_FIRST = 0x90000000,
- OSD_APAGE_ROOT_DIRECTORY = OSD_APAGE_ROOT_FIRST + 0,
- OSD_APAGE_ROOT_INFORMATION = OSD_APAGE_ROOT_FIRST + 1,
- OSD_APAGE_ROOT_QUOTAS = OSD_APAGE_ROOT_FIRST + 2,
- OSD_APAGE_ROOT_TIMESTAMP = OSD_APAGE_ROOT_FIRST + 3,
- OSD_APAGE_ROOT_SECURITY = OSD_APAGE_ROOT_FIRST + 5,
- OSD_APAGE_ROOT_LAST = 0xBFFFFFFF,
-
- OSD_APAGE_RESERVED_TYPE_FIRST = 0xC0000000,
- OSD_APAGE_RESERVED_TYPE_LAST = 0xEFFFFFFF,
-
- OSD_APAGE_COMMON_FIRST = 0xF0000000,
- OSD_APAGE_COMMON_LAST = 0xFFFFFFFD,
-
- OSD_APAGE_CURRENT_COMMAND = 0xFFFFFFFE,
-
- OSD_APAGE_REQUEST_ALL = 0xFFFFFFFF,
-};
-
-/* subcategories of attr pages within each range above */
-enum {
- OSD_APAGE_STD_FIRST = 0x0,
- OSD_APAGE_STD_DIRECTORY = 0,
- OSD_APAGE_STD_INFORMATION = 1,
- OSD_APAGE_STD_QUOTAS = 2,
- OSD_APAGE_STD_TIMESTAMP = 3,
- OSD_APAGE_STD_COLLECTIONS = 4,
- OSD_APAGE_STD_POLICY_SECURITY = 5,
- OSD_APAGE_STD_LAST = 0x0000007F,
-
- OSD_APAGE_RESERVED_FIRST = 0x00000080,
- OSD_APAGE_RESERVED_LAST = 0x00007FFF,
-
- OSD_APAGE_OTHER_STD_FIRST = 0x00008000,
- OSD_APAGE_OTHER_STD_LAST = 0x0000EFFF,
-
- OSD_APAGE_PUBLIC_FIRST = 0x0000F000,
- OSD_APAGE_PUBLIC_LAST = 0x0000FFFF,
-
- OSD_APAGE_APP_DEFINED_FIRST = 0x00010000,
- OSD_APAGE_APP_DEFINED_LAST = 0x1FFFFFFF,
-
- OSD_APAGE_VENDOR_SPECIFIC_FIRST = 0x20000000,
- OSD_APAGE_VENDOR_SPECIFIC_LAST = 0x2FFFFFFF,
-};
-
-enum {
- OSD_ATTR_PAGE_IDENTIFICATION = 0, /* in all pages 40 bytes */
-};
-
-struct page_identification {
- u8 vendor_identification[8];
- u8 page_identification[32];
-} __packed;
-
-struct osd_attr_page_header {
- __be32 page_number;
- __be32 page_length;
-} __packed;
-
-/* 7.1.2.8 Root Information attributes page (OSD_APAGE_ROOT_INFORMATION) */
-enum {
- OSD_ATTR_RI_OSD_SYSTEM_ID = 0x3, /* 20 */
- OSD_ATTR_RI_VENDOR_IDENTIFICATION = 0x4, /* 8 */
- OSD_ATTR_RI_PRODUCT_IDENTIFICATION = 0x5, /* 16 */
- OSD_ATTR_RI_PRODUCT_MODEL = 0x6, /* 32 */
- OSD_ATTR_RI_PRODUCT_REVISION_LEVEL = 0x7, /* 4 */
- OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER = 0x8, /* variable */
- OSD_ATTR_RI_OSD_NAME = 0x9, /* variable */
- OSD_ATTR_RI_MAX_CDB_CONTINUATION_LEN = 0xA, /* 4 */
- OSD_ATTR_RI_TOTAL_CAPACITY = 0x80, /* 8 */
- OSD_ATTR_RI_USED_CAPACITY = 0x81, /* 8 */
- OSD_ATTR_RI_NUMBER_OF_PARTITIONS = 0xC0, /* 8 */
- OSD_ATTR_RI_CLOCK = 0x100, /* 6 */
- OARI_DEFAULT_ISOLATION_METHOD = 0X110, /* 1 */
- OARI_SUPPORTED_ISOLATION_METHODS = 0X111, /* 32 */
-
- OARI_DATA_ATOMICITY_GUARANTEE = 0X120, /* 8 */
- OARI_DATA_ATOMICITY_ALIGNMENT = 0X121, /* 8 */
- OARI_ATTRIBUTES_ATOMICITY_GUARANTEE = 0X122, /* 8 */
- OARI_DATA_ATTRIBUTES_ATOMICITY_MULTIPLIER = 0X123, /* 1 */
-
- OARI_MAXIMUM_SNAPSHOTS_COUNT = 0X1C1, /* 0 or 4 */
- OARI_MAXIMUM_CLONES_COUNT = 0X1C2, /* 0 or 4 */
- OARI_MAXIMUM_BRANCH_DEPTH = 0X1CC, /* 0 or 4 */
- OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_FIRST = 0X200, /* 0 or 4 */
- OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_LAST = 0X2ff, /* 0 or 4 */
- OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_FIRST = 0X300, /* 0 or 4 */
- OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_LAST = 0X30F, /* 0 or 4 */
- OARI_SUPPORT_FOR_DUPLICATED_OBJECT_FREEZING = 0X310, /* 0 or 4 */
- OARI_SUPPORT_FOR_SNAPSHOT_REFRESHING = 0X311, /* 0 or 1 */
- OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_FIRST = 0X7000001,/* 0 or 4 */
- OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_LAST = 0X700FFFF,/* 0 or 4 */
-};
-/* Root_Information_attributes_page does not have a get_page structure */
-
-/* 7.1.2.9 Partition Information attributes page
- * (OSD_APAGE_PARTITION_INFORMATION)
- */
-enum {
- OSD_ATTR_PI_PARTITION_ID = 0x1, /* 8 */
- OSD_ATTR_PI_USERNAME = 0x9, /* variable */
- OSD_ATTR_PI_USED_CAPACITY = 0x81, /* 8 */
- OSD_ATTR_PI_USED_CAPACITY_INCREMENT = 0x84, /* 0 or 8 */
- OSD_ATTR_PI_NUMBER_OF_OBJECTS = 0xC1, /* 8 */
-
- OSD_ATTR_PI_ACTUAL_DATA_SPACE = 0xD1, /* 0 or 8 */
- OSD_ATTR_PI_RESERVED_DATA_SPACE = 0xD2, /* 0 or 8 */
- OSD_ATTR_PI_DEFAULT_SNAPSHOT_DUPLICATION_METHOD = 0x200,/* 0 or 4 */
- OSD_ATTR_PI_DEFAULT_CLONE_DUPLICATION_METHOD = 0x201,/* 0 or 4 */
- OSD_ATTR_PI_DEFAULT_SP_TIME_OF_DUPLICATION = 0x300,/* 0 or 4 */
- OSD_ATTR_PI_DEFAULT_CLONE_TIME_OF_DUPLICATION = 0x301,/* 0 or 4 */
-};
-/* Partition Information attributes page does not have a get_page structure */
-
-/* 7.1.2.10 Collection Information attributes page
- * (OSD_APAGE_COLLECTION_INFORMATION)
- */
-enum {
- OSD_ATTR_CI_PARTITION_ID = 0x1, /* 8 */
- OSD_ATTR_CI_COLLECTION_OBJECT_ID = 0x2, /* 8 */
- OSD_ATTR_CI_USERNAME = 0x9, /* variable */
- OSD_ATTR_CI_COLLECTION_TYPE = 0xA, /* 1 */
- OSD_ATTR_CI_USED_CAPACITY = 0x81, /* 8 */
-};
-/* Collection Information attributes page does not have a get_page structure */
-
-/* 7.1.2.11 User Object Information attributes page
- * (OSD_APAGE_OBJECT_INFORMATION)
- */
-enum {
- OSD_ATTR_OI_PARTITION_ID = 0x1, /* 8 */
- OSD_ATTR_OI_OBJECT_ID = 0x2, /* 8 */
- OSD_ATTR_OI_USERNAME = 0x9, /* variable */
- OSD_ATTR_OI_USED_CAPACITY = 0x81, /* 8 */
- OSD_ATTR_OI_LOGICAL_LENGTH = 0x82, /* 8 */
- SD_ATTR_OI_ACTUAL_DATA_SPACE = 0XD1, /* 0 OR 8 */
- SD_ATTR_OI_RESERVED_DATA_SPACE = 0XD2, /* 0 OR 8 */
-};
-/* Object Information attributes page does not have a get_page structure */
-
-/* 7.1.2.12 Root Quotas attributes page (OSD_APAGE_ROOT_QUOTAS) */
-enum {
- OSD_ATTR_RQ_DEFAULT_MAXIMUM_USER_OBJECT_LENGTH = 0x1, /* 8 */
- OSD_ATTR_RQ_PARTITION_CAPACITY_QUOTA = 0x10001, /* 8 */
- OSD_ATTR_RQ_PARTITION_OBJECT_COUNT = 0x10002, /* 8 */
- OSD_ATTR_RQ_PARTITION_COLLECTIONS_PER_USER_OBJECT = 0x10081, /* 4 */
- OSD_ATTR_RQ_PARTITION_COUNT = 0x20002, /* 8 */
-};
-
-struct Root_Quotas_attributes_page {
- struct osd_attr_page_header hdr; /* id=R+2, size=0x24 */
- __be64 default_maximum_user_object_length;
- __be64 partition_capacity_quota;
- __be64 partition_object_count;
- __be64 partition_collections_per_user_object;
- __be64 partition_count;
-} __packed;
-
-/* 7.1.2.13 Partition Quotas attributes page (OSD_APAGE_PARTITION_QUOTAS)*/
-enum {
- OSD_ATTR_PQ_DEFAULT_MAXIMUM_USER_OBJECT_LENGTH = 0x1, /* 8 */
- OSD_ATTR_PQ_CAPACITY_QUOTA = 0x10001, /* 8 */
- OSD_ATTR_PQ_OBJECT_COUNT = 0x10002, /* 8 */
- OSD_ATTR_PQ_COLLECTIONS_PER_USER_OBJECT = 0x10081, /* 4 */
-};
-
-struct Partition_Quotas_attributes_page {
- struct osd_attr_page_header hdr; /* id=P+2, size=0x1C */
- __be64 default_maximum_user_object_length;
- __be64 capacity_quota;
- __be64 object_count;
- __be64 collections_per_user_object;
-} __packed;
-
-/* 7.1.2.14 User Object Quotas attributes page (OSD_APAGE_OBJECT_QUOTAS) */
-enum {
- OSD_ATTR_OQ_MAXIMUM_LENGTH = 0x1, /* 8 */
-};
-
-struct Object_Quotas_attributes_page {
- struct osd_attr_page_header hdr; /* id=U+2, size=0x8 */
- __be64 maximum_length;
-} __packed;
-
-/* 7.1.2.15 Root Timestamps attributes page (OSD_APAGE_ROOT_TIMESTAMP) */
-enum {
- OSD_ATTR_RT_ATTRIBUTES_ACCESSED_TIME = 0x2, /* 6 */
- OSD_ATTR_RT_ATTRIBUTES_MODIFIED_TIME = 0x3, /* 6 */
- OSD_ATTR_RT_TIMESTAMP_BYPASS = 0xFFFFFFFE, /* 1 */
-};
-
-struct root_timestamps_attributes_page {
- struct osd_attr_page_header hdr; /* id=R+3, size=0xD */
- struct osd_timestamp attributes_accessed_time;
- struct osd_timestamp attributes_modified_time;
- u8 timestamp_bypass;
-} __packed;
-
-/* 7.1.2.16 Partition Timestamps attributes page
- * (OSD_APAGE_PARTITION_TIMESTAMP)
- */
-enum {
- OSD_ATTR_PT_CREATED_TIME = 0x1, /* 6 */
- OSD_ATTR_PT_ATTRIBUTES_ACCESSED_TIME = 0x2, /* 6 */
- OSD_ATTR_PT_ATTRIBUTES_MODIFIED_TIME = 0x3, /* 6 */
- OSD_ATTR_PT_DATA_ACCESSED_TIME = 0x4, /* 6 */
- OSD_ATTR_PT_DATA_MODIFIED_TIME = 0x5, /* 6 */
- OSD_ATTR_PT_TIMESTAMP_BYPASS = 0xFFFFFFFE, /* 1 */
-};
-
-struct partition_timestamps_attributes_page {
- struct osd_attr_page_header hdr; /* id=P+3, size=0x1F */
- struct osd_timestamp created_time;
- struct osd_timestamp attributes_accessed_time;
- struct osd_timestamp attributes_modified_time;
- struct osd_timestamp data_accessed_time;
- struct osd_timestamp data_modified_time;
- u8 timestamp_bypass;
-} __packed;
-
-/* 7.1.2.17/18 Collection/Object Timestamps attributes page
- * (OSD_APAGE_COLLECTION_TIMESTAMP/OSD_APAGE_OBJECT_TIMESTAMP)
- */
-enum {
- OSD_ATTR_OT_CREATED_TIME = 0x1, /* 6 */
- OSD_ATTR_OT_ATTRIBUTES_ACCESSED_TIME = 0x2, /* 6 */
- OSD_ATTR_OT_ATTRIBUTES_MODIFIED_TIME = 0x3, /* 6 */
- OSD_ATTR_OT_DATA_ACCESSED_TIME = 0x4, /* 6 */
- OSD_ATTR_OT_DATA_MODIFIED_TIME = 0x5, /* 6 */
-};
-
-/* same for collection */
-struct object_timestamps_attributes_page {
- struct osd_attr_page_header hdr; /* id=C+3/3, size=0x1E */
- struct osd_timestamp created_time;
- struct osd_timestamp attributes_accessed_time;
- struct osd_timestamp attributes_modified_time;
- struct osd_timestamp data_accessed_time;
- struct osd_timestamp data_modified_time;
-} __packed;
-
-/* OSD2r05: 7.1.3.19 Attributes Access attributes page
- * (OSD_APAGE_PARTITION_ATTR_ACCESS)
- *
- * each attribute is of the form below. Total array length is deduced
- * from the attribute's length
- * (See allowed_attributes_access of the struct osd_cap_object_descriptor)
- */
-struct attributes_access_attr {
- struct osd_attributes_list_attrid attr_list[0];
-} __packed;
-
-/* OSD2r05: 7.1.2.21 Collections attributes page */
-/* TBD */
-
-/* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */
-enum {
- OSD_ATTR_RS_DEFAULT_SECURITY_METHOD = 0x1, /* 1 */
- OSD_ATTR_RS_OLDEST_VALID_NONCE_LIMIT = 0x2, /* 6 */
- OSD_ATTR_RS_NEWEST_VALID_NONCE_LIMIT = 0x3, /* 6 */
- OSD_ATTR_RS_PARTITION_DEFAULT_SECURITY_METHOD = 0x6, /* 1 */
- OSD_ATTR_RS_SUPPORTED_SECURITY_METHODS = 0x7, /* 2 */
- OSD_ATTR_RS_ADJUSTABLE_CLOCK = 0x9, /* 6 */
- OSD_ATTR_RS_MASTER_KEY_IDENTIFIER = 0x7FFD, /* 0 or 7 */
- OSD_ATTR_RS_ROOT_KEY_IDENTIFIER = 0x7FFE, /* 0 or 7 */
- OSD_ATTR_RS_SUPPORTED_INTEGRITY_ALGORITHM_0 = 0x80000000,/* 1,(x16)*/
- OSD_ATTR_RS_SUPPORTED_DH_GROUP_0 = 0x80000010,/* 1,(x16)*/
-};
-
-struct root_security_attributes_page {
- struct osd_attr_page_header hdr; /* id=R+5, size=0x3F */
- u8 default_security_method;
- u8 partition_default_security_method;
- __be16 supported_security_methods;
- u8 mki_valid_rki_valid;
- struct osd_timestamp oldest_valid_nonce_limit;
- struct osd_timestamp newest_valid_nonce_limit;
- struct osd_timestamp adjustable_clock;
- u8 master_key_identifier[32-25];
- u8 root_key_identifier[39-32];
- u8 supported_integrity_algorithm[16];
- u8 supported_dh_group[16];
-} __packed;
-
-/* 7.1.2.21 Partition Policy/Security attributes page
- * (OSD_APAGE_PARTITION_SECURITY)
- */
-enum {
- OSD_ATTR_PS_DEFAULT_SECURITY_METHOD = 0x1, /* 1 */
- OSD_ATTR_PS_OLDEST_VALID_NONCE = 0x2, /* 6 */
- OSD_ATTR_PS_NEWEST_VALID_NONCE = 0x3, /* 6 */
- OSD_ATTR_PS_REQUEST_NONCE_LIST_DEPTH = 0x4, /* 2 */
- OSD_ATTR_PS_FROZEN_WORKING_KEY_BIT_MASK = 0x5, /* 2 */
- OSD_ATTR_PS_PARTITION_KEY_IDENTIFIER = 0x7FFF, /* 0 or 7 */
- OSD_ATTR_PS_WORKING_KEY_IDENTIFIER_FIRST = 0x8000, /* 0 or 7 */
- OSD_ATTR_PS_WORKING_KEY_IDENTIFIER_LAST = 0x800F, /* 0 or 7 */
- OSD_ATTR_PS_POLICY_ACCESS_TAG = 0x40000001, /* 4 */
- OSD_ATTR_PS_USER_OBJECT_POLICY_ACCESS_TAG = 0x40000002, /* 4 */
-};
-
-struct partition_security_attributes_page {
- struct osd_attr_page_header hdr; /* id=p+5, size=0x8f */
- u8 reserved[3];
- u8 default_security_method;
- struct osd_timestamp oldest_valid_nonce;
- struct osd_timestamp newest_valid_nonce;
- __be16 request_nonce_list_depth;
- __be16 frozen_working_key_bit_mask;
- __be32 policy_access_tag;
- __be32 user_object_policy_access_tag;
- u8 pki_valid;
- __be16 wki_00_0f_vld;
- struct osd_key_identifier partition_key_identifier;
- struct osd_key_identifier working_key_identifiers[16];
-} __packed;
-
-/* 7.1.2.22/23 Collection/Object Policy-Security attributes page
- * (OSD_APAGE_COLLECTION_SECURITY/OSD_APAGE_OBJECT_SECURITY)
- */
-enum {
- OSD_ATTR_OS_POLICY_ACCESS_TAG = 0x40000001, /* 4 */
-};
-
-struct object_security_attributes_page {
- struct osd_attr_page_header hdr; /* id=C+5/5, size=4 */
- __be32 policy_access_tag;
-} __packed;
-
-/* OSD2r05: 7.1.3.31 Current Command attributes page
- * (OSD_APAGE_CURRENT_COMMAND)
- */
-enum {
- OSD_ATTR_CC_RESPONSE_INTEGRITY_CHECK_VALUE = 0x1, /* 32 */
- OSD_ATTR_CC_OBJECT_TYPE = 0x2, /* 1 */
- OSD_ATTR_CC_PARTITION_ID = 0x3, /* 8 */
- OSD_ATTR_CC_OBJECT_ID = 0x4, /* 8 */
- OSD_ATTR_CC_STARTING_BYTE_ADDRESS_OF_APPEND = 0x5, /* 8 */
- OSD_ATTR_CC_CHANGE_IN_USED_CAPACITY = 0x6, /* 8 */
-};
-
-/*TBD: osdv1_current_command_attributes_page */
-
-struct osdv2_current_command_attributes_page {
- struct osd_attr_page_header hdr; /* id=0xFFFFFFFE, size=0x44 */
- u8 response_integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
- u8 object_type;
- u8 reserved[3];
- __be64 partition_id;
- __be64 object_id;
- __be64 starting_byte_address_of_append;
- __be64 change_in_used_capacity;
-};
-
-#endif /*ndef __OSD_ATTRIBUTES_H__*/
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
deleted file mode 100644
index e0ca835e7bf7..000000000000
--- a/include/scsi/osd_protocol.h
+++ /dev/null
@@ -1,676 +0,0 @@
-/*
- * osd_protocol.h - OSD T10 standard C definitions.
- *
- * Copyright (C) 2008 Panasas Inc. All rights reserved.
- *
- * Authors:
- * Boaz Harrosh <ooo@electrozaur.com>
- * Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- * This file contains types and constants that are defined by the protocol
- * Note: All names and symbols are taken from the OSD standard's text.
- */
-#ifndef __OSD_PROTOCOL_H__
-#define __OSD_PROTOCOL_H__
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <asm/unaligned.h>
-#include <scsi/scsi.h>
-
-enum {
- OSDv1_ADDITIONAL_CDB_LENGTH = 192,
- OSDv1_TOTAL_CDB_LEN = OSDv1_ADDITIONAL_CDB_LENGTH + 8,
- OSDv1_CAP_LEN = 80,
-
- /* Latest supported version */
- OSDv2_ADDITIONAL_CDB_LENGTH = 228,
- OSD_ADDITIONAL_CDB_LENGTH =
- OSDv2_ADDITIONAL_CDB_LENGTH,
- OSD_TOTAL_CDB_LEN = OSD_ADDITIONAL_CDB_LENGTH + 8,
- OSD_CAP_LEN = 104,
-
- OSD_SYSTEMID_LEN = 20,
- OSDv1_CRYPTO_KEYID_SIZE = 20,
- OSDv2_CRYPTO_KEYID_SIZE = 32,
- OSD_CRYPTO_KEYID_SIZE = OSDv2_CRYPTO_KEYID_SIZE,
- OSD_CRYPTO_SEED_SIZE = 4,
- OSD_CRYPTO_NONCE_SIZE = 12,
- OSD_MAX_SENSE_LEN = 252, /* from SPC-3 */
-
- OSD_PARTITION_FIRST_ID = 0x10000,
- OSD_OBJECT_FIRST_ID = 0x10000,
-};
-
-/* (osd-r10 5.2.4)
- * osd2r03: 5.2.3 Caching control bits
- */
-enum osd_options_byte {
- OSD_CDB_FUA = 0x08, /* Force Unit Access */
- OSD_CDB_DPO = 0x10, /* Disable Page Out */
-};
-
-/*
- * osd2r03: 5.2.5 Isolation.
- * First 3 bits, V2-only.
- * Also for attr 110h "default isolation method" at Root Information page
- */
-enum osd_options_byte_isolation {
- OSD_ISOLATION_DEFAULT = 0,
- OSD_ISOLATION_NONE = 1,
- OSD_ISOLATION_STRICT = 2,
- OSD_ISOLATION_RANGE = 4,
- OSD_ISOLATION_FUNCTIONAL = 5,
- OSD_ISOLATION_VENDOR = 7,
-};
-
-/* (osd-r10: 6.7)
- * osd2r03: 6.8 FLUSH, FLUSH COLLECTION, FLUSH OSD, FLUSH PARTITION
- */
-enum osd_options_flush_scope_values {
- OSD_CDB_FLUSH_ALL = 0,
- OSD_CDB_FLUSH_ATTR_ONLY = 1,
-
- OSD_CDB_FLUSH_ALL_RECURSIVE = 2,
- /* V2-only */
- OSD_CDB_FLUSH_ALL_RANGE = 2,
-};
-
-/* osd2r03: 5.2.10 Timestamps control */
-enum {
- OSD_CDB_NORMAL_TIMESTAMPS = 0,
- OSD_CDB_BYPASS_TIMESTAMPS = 0x7f,
-};
-
-/* (osd-r10: 5.2.2.1)
- * osd2r03: 5.2.4.1 Get and set attributes CDB format selection
- * 2 bits at second nibble of command_specific_options byte
- */
-enum osd_attributes_mode {
- /* V2-only */
- OSD_CDB_SET_ONE_ATTR = 0x10,
-
- OSD_CDB_GET_ATTR_PAGE_SET_ONE = 0x20,
- OSD_CDB_GET_SET_ATTR_LISTS = 0x30,
-
- OSD_CDB_GET_SET_ATTR_MASK = 0x30,
-};
-
-/* (osd-r10: 4.12.5)
- * osd2r03: 4.14.5 Data-In and Data-Out buffer offsets
- * byte offset = mantissa * (2^(exponent+8))
- * struct {
- * unsigned mantissa: 28;
- * int exponent: 04;
- * }
- */
-typedef __be32 osd_cdb_offset;
-
-enum {
- OSD_OFFSET_UNUSED = 0xFFFFFFFF,
- OSD_OFFSET_MAX_BITS = 28,
-
- OSDv1_OFFSET_MIN_SHIFT = 8,
- OSD_OFFSET_MIN_SHIFT = 3,
- OSD_OFFSET_MAX_SHIFT = 16,
-};
-
-/* Return the smallest allowed encoded offset that contains @offset.
- *
- * The actual encoded offset returned is @offset + *padding.
- * (up to max_shift, non-inclusive)
- */
-osd_cdb_offset __osd_encode_offset(u64 offset, unsigned *padding,
- int min_shift, int max_shift);
-
-/* Minimum alignment is 256 bytes
- * Note: Seems from std v1 that exponent can be from 0+8 to 0xE+8 (inclusive)
- * which is 8 to 23 but IBM code restricts it to 16, so be it.
- */
-static inline osd_cdb_offset osd_encode_offset_v1(u64 offset, unsigned *padding)
-{
- return __osd_encode_offset(offset, padding,
- OSDv1_OFFSET_MIN_SHIFT, OSD_OFFSET_MAX_SHIFT);
-}
-
-/* Minimum 8 bytes alignment
- * Same as v1 but since exponent can be signed than a less than
- * 256 alignment can be reached with small offsets (<2GB)
- */
-static inline osd_cdb_offset osd_encode_offset_v2(u64 offset, unsigned *padding)
-{
- return __osd_encode_offset(offset, padding,
- OSD_OFFSET_MIN_SHIFT, OSD_OFFSET_MAX_SHIFT);
-}
-
-/* osd2r03: 5.2.1 Overview */
-struct osd_cdb_head {
- struct scsi_varlen_cdb_hdr varlen_cdb;
-/*10*/ u8 options;
- u8 command_specific_options;
- u8 timestamp_control;
-/*13*/ u8 reserved1[3];
-/*16*/ __be64 partition;
-/*24*/ __be64 object;
-/*32*/ union { /* V1 vs V2 alignment differences */
- struct __osdv1_cdb_addr_len {
-/*32*/ __be32 list_identifier;/* Rarely used */
-/*36*/ __be64 length;
-/*44*/ __be64 start_address;
- } __packed v1;
-
- struct __osdv2_cdb_addr_len {
- /* called allocation_length in some commands */
-/*32*/ __be64 length;
-/*40*/ __be64 start_address;
- union {
-/*48*/ __be32 list_identifier;/* Rarely used */
- /* OSD2r05 5.2.5 CDB continuation length */
-/*48*/ __be32 cdb_continuation_length;
- };
- } __packed v2;
- };
-/*52*/ union { /* selected attributes mode Page/List/Single */
- struct osd_attributes_page_mode {
-/*52*/ __be32 get_attr_page;
-/*56*/ __be32 get_attr_alloc_length;
-/*60*/ osd_cdb_offset get_attr_offset;
-
-/*64*/ __be32 set_attr_page;
-/*68*/ __be32 set_attr_id;
-/*72*/ __be32 set_attr_length;
-/*76*/ osd_cdb_offset set_attr_offset;
-/*80*/ } __packed attrs_page;
-
- struct osd_attributes_list_mode {
-/*52*/ __be32 get_attr_desc_bytes;
-/*56*/ osd_cdb_offset get_attr_desc_offset;
-
-/*60*/ __be32 get_attr_alloc_length;
-/*64*/ osd_cdb_offset get_attr_offset;
-
-/*68*/ __be32 set_attr_bytes;
-/*72*/ osd_cdb_offset set_attr_offset;
- __be32 not_used;
-/*80*/ } __packed attrs_list;
-
- /* osd2r03:5.2.4.2 Set one attribute value using CDB fields */
- struct osd_attributes_cdb_mode {
-/*52*/ __be32 set_attr_page;
-/*56*/ __be32 set_attr_id;
-/*60*/ __be16 set_attr_len;
-/*62*/ u8 set_attr_val[18];
-/*80*/ } __packed attrs_cdb;
-/*52*/ u8 get_set_attributes_parameters[28];
- };
-} __packed;
-/*80*/
-
-/*160 v1*/
-struct osdv1_security_parameters {
-/*160*/u8 integrity_check_value[OSDv1_CRYPTO_KEYID_SIZE];
-/*180*/u8 request_nonce[OSD_CRYPTO_NONCE_SIZE];
-/*192*/osd_cdb_offset data_in_integrity_check_offset;
-/*196*/osd_cdb_offset data_out_integrity_check_offset;
-} __packed;
-/*200 v1*/
-
-/*184 v2*/
-struct osdv2_security_parameters {
-/*184*/u8 integrity_check_value[OSDv2_CRYPTO_KEYID_SIZE];
-/*216*/u8 request_nonce[OSD_CRYPTO_NONCE_SIZE];
-/*228*/osd_cdb_offset data_in_integrity_check_offset;
-/*232*/osd_cdb_offset data_out_integrity_check_offset;
-} __packed;
-/*236 v2*/
-
-struct osd_security_parameters {
- union {
- struct osdv1_security_parameters v1;
- struct osdv2_security_parameters v2;
- };
-};
-
-struct osdv1_cdb {
- struct osd_cdb_head h;
- u8 caps[OSDv1_CAP_LEN];
- struct osdv1_security_parameters sec_params;
-} __packed;
-
-struct osdv2_cdb {
- struct osd_cdb_head h;
- u8 caps[OSD_CAP_LEN];
- struct osdv2_security_parameters sec_params;
-} __packed;
-
-struct osd_cdb {
- union {
- struct osdv1_cdb v1;
- struct osdv2_cdb v2;
- u8 buff[OSD_TOTAL_CDB_LEN];
- };
-} __packed;
-
-static inline struct osd_cdb_head *osd_cdb_head(struct osd_cdb *ocdb)
-{
- return (struct osd_cdb_head *)ocdb->buff;
-}
-
-/* define both version actions
- * Ex name = FORMAT_OSD we have OSD_ACT_FORMAT_OSD && OSDv1_ACT_FORMAT_OSD
- */
-#define OSD_ACT___(Name, Num) \
- OSD_ACT_##Name = cpu_to_be16(0x8880 + Num), \
- OSDv1_ACT_##Name = cpu_to_be16(0x8800 + Num),
-
-/* V2 only actions */
-#define OSD_ACT_V2(Name, Num) \
- OSD_ACT_##Name = cpu_to_be16(0x8880 + Num),
-
-#define OSD_ACT_V1_V2(Name, Num1, Num2) \
- OSD_ACT_##Name = cpu_to_be16(Num2), \
- OSDv1_ACT_##Name = cpu_to_be16(Num1),
-
-enum osd_service_actions {
- OSD_ACT_V2(OBJECT_STRUCTURE_CHECK, 0x00)
- OSD_ACT___(FORMAT_OSD, 0x01)
- OSD_ACT___(CREATE, 0x02)
- OSD_ACT___(LIST, 0x03)
- OSD_ACT_V2(PUNCH, 0x04)
- OSD_ACT___(READ, 0x05)
- OSD_ACT___(WRITE, 0x06)
- OSD_ACT___(APPEND, 0x07)
- OSD_ACT___(FLUSH, 0x08)
- OSD_ACT_V2(CLEAR, 0x09)
- OSD_ACT___(REMOVE, 0x0A)
- OSD_ACT___(CREATE_PARTITION, 0x0B)
- OSD_ACT___(REMOVE_PARTITION, 0x0C)
- OSD_ACT___(GET_ATTRIBUTES, 0x0E)
- OSD_ACT___(SET_ATTRIBUTES, 0x0F)
- OSD_ACT___(CREATE_AND_WRITE, 0x12)
- OSD_ACT___(CREATE_COLLECTION, 0x15)
- OSD_ACT___(REMOVE_COLLECTION, 0x16)
- OSD_ACT___(LIST_COLLECTION, 0x17)
- OSD_ACT___(SET_KEY, 0x18)
- OSD_ACT___(SET_MASTER_KEY, 0x19)
- OSD_ACT___(FLUSH_COLLECTION, 0x1A)
- OSD_ACT___(FLUSH_PARTITION, 0x1B)
- OSD_ACT___(FLUSH_OSD, 0x1C)
-
- OSD_ACT_V2(QUERY, 0x20)
- OSD_ACT_V2(REMOVE_MEMBER_OBJECTS, 0x21)
- OSD_ACT_V2(GET_MEMBER_ATTRIBUTES, 0x22)
- OSD_ACT_V2(SET_MEMBER_ATTRIBUTES, 0x23)
-
- OSD_ACT_V2(CREATE_CLONE, 0x28)
- OSD_ACT_V2(CREATE_SNAPSHOT, 0x29)
- OSD_ACT_V2(DETACH_CLONE, 0x2A)
- OSD_ACT_V2(REFRESH_SNAPSHOT_CLONE, 0x2B)
- OSD_ACT_V2(RESTORE_PARTITION_FROM_SNAPSHOT, 0x2C)
-
- OSD_ACT_V2(READ_MAP, 0x31)
- OSD_ACT_V2(READ_MAPS_COMPARE, 0x32)
-
- OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND, 0x8F7E, 0x8F7C)
- OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT, 0x8F7F, 0x8F7D)
- /* 0x8F80 to 0x8FFF are Vendor specific */
-};
-
-/* osd2r03: 7.1.3.2 List entry format for retrieving attributes */
-struct osd_attributes_list_attrid {
- __be32 attr_page;
- __be32 attr_id;
-} __packed;
-
-/*
- * NOTE: v1: is not aligned.
- */
-struct osdv1_attributes_list_element {
- __be32 attr_page;
- __be32 attr_id;
- __be16 attr_bytes; /* valid bytes at attr_val without padding */
- u8 attr_val[0];
-} __packed;
-
-/*
- * osd2r03: 7.1.3.3 List entry format for retrieved attributes and
- * for setting attributes
- * NOTE: v2 is 8-bytes aligned
- */
-struct osdv2_attributes_list_element {
- __be32 attr_page;
- __be32 attr_id;
- u8 reserved[6];
- __be16 attr_bytes; /* valid bytes at attr_val without padding */
- u8 attr_val[0];
-} __packed;
-
-enum {
- OSDv1_ATTRIBUTES_ELEM_ALIGN = 1,
- OSD_ATTRIBUTES_ELEM_ALIGN = 8,
-};
-
-enum {
- OSD_ATTR_LIST_ALL_PAGES = 0xFFFFFFFF,
- OSD_ATTR_LIST_ALL_IN_PAGE = 0xFFFFFFFF,
-};
-
-static inline unsigned osdv1_attr_list_elem_size(unsigned len)
-{
- return ALIGN(len + sizeof(struct osdv1_attributes_list_element),
- OSDv1_ATTRIBUTES_ELEM_ALIGN);
-}
-
-static inline unsigned osdv2_attr_list_elem_size(unsigned len)
-{
- return ALIGN(len + sizeof(struct osdv2_attributes_list_element),
- OSD_ATTRIBUTES_ELEM_ALIGN);
-}
-
-/*
- * osd2r03: 7.1.3 OSD attributes lists (Table 184) — List type values
- */
-enum osd_attr_list_types {
- OSD_ATTR_LIST_GET = 0x1, /* descriptors only */
- OSD_ATTR_LIST_SET_RETRIEVE = 0x9, /*descriptors/values variable-length*/
- OSD_V2_ATTR_LIST_MULTIPLE = 0xE, /* ver2, Multiple Objects lists*/
- OSD_V1_ATTR_LIST_CREATE_MULTIPLE = 0xF,/*ver1, used by create_multple*/
-};
-
-/* osd2r03: 7.1.3.4 Multi-object retrieved attributes format */
-struct osd_attributes_list_multi_header {
- __be64 object_id;
- u8 object_type; /* object_type enum below */
- u8 reserved[5];
- __be16 list_bytes;
- /* followed by struct osd_attributes_list_element's */
-};
-
-struct osdv1_attributes_list_header {
- u8 type; /* low 4-bit only */
- u8 pad;
- __be16 list_bytes; /* Initiator shall set to Zero. Only set by target */
- /*
- * type=9 followed by struct osd_attributes_list_element's
- * type=E followed by struct osd_attributes_list_multi_header's
- */
-} __packed;
-
-static inline unsigned osdv1_list_size(struct osdv1_attributes_list_header *h)
-{
- return be16_to_cpu(h->list_bytes);
-}
-
-struct osdv2_attributes_list_header {
- u8 type; /* lower 4-bits only */
- u8 pad[3];
-/*4*/ __be32 list_bytes; /* Initiator shall set to zero. Only set by target */
- /*
- * type=9 followed by struct osd_attributes_list_element's
- * type=E followed by struct osd_attributes_list_multi_header's
- */
-} __packed;
-
-static inline unsigned osdv2_list_size(struct osdv2_attributes_list_header *h)
-{
- return be32_to_cpu(h->list_bytes);
-}
-
-/* (osd-r10 6.13)
- * osd2r03: 6.15 LIST (Table 79) LIST command parameter data.
- * for root_lstchg below
- */
-enum {
- OSD_OBJ_ID_LIST_PAR = 0x1, /* V1-only. Not used in V2 */
- OSD_OBJ_ID_LIST_LSTCHG = 0x2,
-};
-
-/*
- * osd2r03: 6.15.2 LIST command parameter data
- * (Also for LIST COLLECTION)
- */
-struct osd_obj_id_list {
- __be64 list_bytes; /* bytes in list excluding list_bytes (-8) */
- __be64 continuation_id;
- __be32 list_identifier;
- u8 pad[3];
- u8 root_lstchg;
- __be64 object_ids[0];
-} __packed;
-
-static inline bool osd_is_obj_list_done(struct osd_obj_id_list *list,
- bool *is_changed)
-{
- *is_changed = (0 != (list->root_lstchg & OSD_OBJ_ID_LIST_LSTCHG));
- return 0 != list->continuation_id;
-}
-
-/*
- * osd2r03: 4.12.4.5 The ALLDATA security method
- */
-struct osd_data_out_integrity_info {
- __be64 data_bytes;
- __be64 set_attributes_bytes;
- __be64 get_attributes_bytes;
- __u8 integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
-} __packed;
-
-/* Same osd_data_out_integrity_info is used for OSD2/OSD1. The only difference
- * Is the sizeof the structure since in OSD1 the last array is smaller. Use
- * below for version independent handling of this structure
- */
-static inline int osd_data_out_integrity_info_sizeof(bool is_ver1)
-{
- return sizeof(struct osd_data_out_integrity_info) -
- (is_ver1 * (OSDv2_CRYPTO_KEYID_SIZE - OSDv1_CRYPTO_KEYID_SIZE));
-}
-
-struct osd_data_in_integrity_info {
- __be64 data_bytes;
- __be64 retrieved_attributes_bytes;
- __u8 integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
-} __packed;
-
-/* Same osd_data_in_integrity_info is used for OSD2/OSD1. The only difference
- * Is the sizeof the structure since in OSD1 the last array is smaller. Use
- * below for version independent handling of this structure
- */
-static inline int osd_data_in_integrity_info_sizeof(bool is_ver1)
-{
- return sizeof(struct osd_data_in_integrity_info) -
- (is_ver1 * (OSDv2_CRYPTO_KEYID_SIZE - OSDv1_CRYPTO_KEYID_SIZE));
-}
-
-struct osd_timestamp {
- u8 time[6]; /* number of milliseconds since 1/1/1970 UT (big endian) */
-} __packed;
-/* FIXME: define helper functions to convert to/from osd time format */
-
-/*
- * Capability & Security definitions
- * osd2r03: 4.11.2.2 Capability format
- * osd2r03: 5.2.8 Security parameters
- */
-
-struct osd_key_identifier {
- u8 id[7]; /* if you know why 7 please email ooo@electrozaur.com */
-} __packed;
-
-/* for osd_capability.format */
-enum {
- OSD_SEC_CAP_FORMAT_NO_CAPS = 0,
- OSD_SEC_CAP_FORMAT_VER1 = 1,
- OSD_SEC_CAP_FORMAT_VER2 = 2,
-};
-
-/* security_method */
-enum {
- OSD_SEC_NOSEC = 0,
- OSD_SEC_CAPKEY = 1,
- OSD_SEC_CMDRSP = 2,
- OSD_SEC_ALLDATA = 3,
-};
-
-enum object_type {
- OSD_SEC_OBJ_ROOT = 0x1,
- OSD_SEC_OBJ_PARTITION = 0x2,
- OSD_SEC_OBJ_COLLECTION = 0x40,
- OSD_SEC_OBJ_USER = 0x80,
-};
-
-enum osd_capability_bit_masks {
- OSD_SEC_CAP_APPEND = BIT(0),
- OSD_SEC_CAP_OBJ_MGMT = BIT(1),
- OSD_SEC_CAP_REMOVE = BIT(2),
- OSD_SEC_CAP_CREATE = BIT(3),
- OSD_SEC_CAP_SET_ATTR = BIT(4),
- OSD_SEC_CAP_GET_ATTR = BIT(5),
- OSD_SEC_CAP_WRITE = BIT(6),
- OSD_SEC_CAP_READ = BIT(7),
-
- OSD_SEC_CAP_NONE1 = BIT(8),
- OSD_SEC_CAP_NONE2 = BIT(9),
- OSD_SEC_GBL_REM = BIT(10), /*v2 only*/
- OSD_SEC_CAP_QUERY = BIT(11), /*v2 only*/
- OSD_SEC_CAP_M_OBJECT = BIT(12), /*v2 only*/
- OSD_SEC_CAP_POL_SEC = BIT(13),
- OSD_SEC_CAP_GLOBAL = BIT(14),
- OSD_SEC_CAP_DEV_MGMT = BIT(15),
-};
-
-/* for object_descriptor_type (hi nibble used) */
-enum {
- OSD_SEC_OBJ_DESC_NONE = 0, /* Not allowed */
- OSD_SEC_OBJ_DESC_OBJ = 1 << 4, /* v1: also collection */
- OSD_SEC_OBJ_DESC_PAR = 2 << 4, /* also root */
- OSD_SEC_OBJ_DESC_COL = 3 << 4, /* v2 only */
-};
-
-/* (osd-r10:4.9.2.2)
- * osd2r03:4.11.2.2 Capability format
- */
-struct osd_capability_head {
- u8 format; /* low nibble */
- u8 integrity_algorithm__key_version; /* MAKE_BYTE(integ_alg, key_ver) */
- u8 security_method;
- u8 reserved1;
-/*04*/ struct osd_timestamp expiration_time;
-/*10*/ u8 audit[20];
-/*30*/ u8 discriminator[12];
-/*42*/ struct osd_timestamp object_created_time;
-/*48*/ u8 object_type;
-/*49*/ u8 permissions_bit_mask[5];
-/*54*/ u8 reserved2;
-/*55*/ u8 object_descriptor_type; /* high nibble */
-} __packed;
-
-/*56 v1*/
-struct osdv1_cap_object_descriptor {
- union {
- struct {
-/*56*/ __be32 policy_access_tag;
-/*60*/ __be64 allowed_partition_id;
-/*68*/ __be64 allowed_object_id;
-/*76*/ __be32 reserved;
- } __packed obj_desc;
-
-/*56*/ u8 object_descriptor[24];
- };
-} __packed;
-/*80 v1*/
-
-/*56 v2*/
-struct osd_cap_object_descriptor {
- union {
- struct {
-/*56*/ __be32 allowed_attributes_access;
-/*60*/ __be32 policy_access_tag;
-/*64*/ __be16 boot_epoch;
-/*66*/ u8 reserved[6];
-/*72*/ __be64 allowed_partition_id;
-/*80*/ __be64 allowed_object_id;
-/*88*/ __be64 allowed_range_length;
-/*96*/ __be64 allowed_range_start;
- } __packed obj_desc;
-
-/*56*/ u8 object_descriptor[48];
- };
-} __packed;
-/*104 v2*/
-
-struct osdv1_capability {
- struct osd_capability_head h;
- struct osdv1_cap_object_descriptor od;
-} __packed;
-
-struct osd_capability {
- struct osd_capability_head h;
- struct osd_cap_object_descriptor od;
-} __packed;
-
-/**
- * osd_sec_set_caps - set cap-bits into the capabilities header
- *
- * @cap: The osd_capability_head to set cap bits to.
- * @bit_mask: Use an ORed list of enum osd_capability_bit_masks values
- *
- * permissions_bit_mask is unaligned use below to set into caps
- * in a version independent way
- */
-static inline void osd_sec_set_caps(struct osd_capability_head *cap,
- u16 bit_mask)
-{
- /*
- *Note: The bits above are defined LE order this is because this way
- * they can grow in the future to more then 16, and still retain
- * there constant values.
- */
- put_unaligned_le16(bit_mask, &cap->permissions_bit_mask);
-}
-
-/* osd2r05a sec 5.3: CDB continuation segment formats */
-enum osd_continuation_segment_format {
- CDB_CONTINUATION_FORMAT_V2 = 0x01,
-};
-
-struct osd_continuation_segment_header {
- u8 format;
- u8 reserved1;
- __be16 service_action;
- __be32 reserved2;
- u8 integrity_check[OSDv2_CRYPTO_KEYID_SIZE];
-} __packed;
-
-/* osd2r05a sec 5.4.1: CDB continuation descriptors */
-enum osd_continuation_descriptor_type {
- NO_MORE_DESCRIPTORS = 0x0000,
- SCATTER_GATHER_LIST = 0x0001,
- QUERY_LIST = 0x0002,
- USER_OBJECT = 0x0003,
- COPY_USER_OBJECT_SOURCE = 0x0101,
- EXTENSION_CAPABILITIES = 0xFFEE
-};
-
-struct osd_continuation_descriptor_header {
- __be16 type;
- u8 reserved;
- u8 pad_length;
- __be32 length;
-} __packed;
-
-
-/* osd2r05a sec 5.4.2: Scatter/gather list */
-struct osd_sg_list_entry {
- __be64 offset;
- __be64 len;
-};
-
-struct osd_sg_continuation_descriptor {
- struct osd_continuation_descriptor_header hdr;
- struct osd_sg_list_entry entries[];
-};
-
-#endif /* ndef __OSD_PROTOCOL_H__ */
diff --git a/include/scsi/osd_sec.h b/include/scsi/osd_sec.h
deleted file mode 100644
index 7abeb0f0db30..000000000000
--- a/include/scsi/osd_sec.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * osd_sec.h - OSD security manager API
- *
- * Copyright (C) 2008 Panasas Inc. All rights reserved.
- *
- * Authors:
- * Boaz Harrosh <ooo@electrozaur.com>
- * Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- */
-#ifndef __OSD_SEC_H__
-#define __OSD_SEC_H__
-
-#include <scsi/osd_protocol.h>
-#include <scsi/osd_types.h>
-
-/*
- * Contains types and constants of osd capabilities and security
- * encoding/decoding.
- * API is trying to keep security abstract so initiator of an object
- * based pNFS client knows as little as possible about security and
- * capabilities. It is the Server's osd-initiator place to know more.
- * Also can be used by osd-target.
- */
-void osd_sec_encode_caps(void *caps, ...);/* NI */
-void osd_sec_init_nosec_doall_caps(void *caps,
- const struct osd_obj_id *obj, bool is_collection, const bool is_v1);
-
-bool osd_is_sec_alldata(struct osd_security_parameters *sec_params);
-
-/* Conditionally sign the CDB according to security setting in ocdb
- * with cap_key */
-void osd_sec_sign_cdb(struct osd_cdb *ocdb, const u8 *cap_key);
-
-/* Unconditionally sign the BIO data with cap_key.
- * Check for osd_is_sec_alldata() was done prior to calling this. */
-void osd_sec_sign_data(void *data_integ, struct bio *bio, const u8 *cap_key);
-
-/* Version independent copy of caps into the cdb */
-void osd_set_caps(struct osd_cdb *cdb, const void *caps);
-
-#endif /* ndef __OSD_SEC_H__ */
diff --git a/include/scsi/osd_sense.h b/include/scsi/osd_sense.h
deleted file mode 100644
index d52aa93a0b2d..000000000000
--- a/include/scsi/osd_sense.h
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * osd_sense.h - OSD Related sense handling definitions.
- *
- * Copyright (C) 2008 Panasas Inc. All rights reserved.
- *
- * Authors:
- * Boaz Harrosh <ooo@electrozaur.com>
- * Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- * This file contains types and constants that are defined by the protocol
- * Note: All names and symbols are taken from the OSD standard's text.
- */
-#ifndef __OSD_SENSE_H__
-#define __OSD_SENSE_H__
-
-#include <scsi/osd_protocol.h>
-
-/* SPC3r23 4.5.6 Sense key and sense code definitions table 27 */
-enum scsi_sense_keys {
- scsi_sk_no_sense = 0x0,
- scsi_sk_recovered_error = 0x1,
- scsi_sk_not_ready = 0x2,
- scsi_sk_medium_error = 0x3,
- scsi_sk_hardware_error = 0x4,
- scsi_sk_illegal_request = 0x5,
- scsi_sk_unit_attention = 0x6,
- scsi_sk_data_protect = 0x7,
- scsi_sk_blank_check = 0x8,
- scsi_sk_vendor_specific = 0x9,
- scsi_sk_copy_aborted = 0xa,
- scsi_sk_aborted_command = 0xb,
- scsi_sk_volume_overflow = 0xd,
- scsi_sk_miscompare = 0xe,
- scsi_sk_reserved = 0xf,
-};
-
-/* SPC3r23 4.5.6 Sense key and sense code definitions table 28 */
-/* Note: only those which can be returned by an OSD target. Most of
- * these errors are taken care of by the generic scsi layer.
- */
-enum osd_additional_sense_codes {
- scsi_no_additional_sense_information = 0x0000,
- scsi_operation_in_progress = 0x0016,
- scsi_cleaning_requested = 0x0017,
- scsi_lunr_cause_not_reportable = 0x0400,
- scsi_logical_unit_is_in_process_of_becoming_ready = 0x0401,
- scsi_lunr_initializing_command_required = 0x0402,
- scsi_lunr_manual_intervention_required = 0x0403,
- scsi_lunr_operation_in_progress = 0x0407,
- scsi_lunr_selftest_in_progress = 0x0409,
- scsi_luna_asymmetric_access_state_transition = 0x040a,
- scsi_luna_target_port_in_standby_state = 0x040b,
- scsi_luna_target_port_in_unavailable_state = 0x040c,
- scsi_lunr_notify_enable_spinup_required = 0x0411,
- scsi_logical_unit_does_not_respond_to_selection = 0x0500,
- scsi_logical_unit_communication_failure = 0x0800,
- scsi_logical_unit_communication_timeout = 0x0801,
- scsi_logical_unit_communication_parity_error = 0x0802,
- scsi_error_log_overflow = 0x0a00,
- scsi_warning = 0x0b00,
- scsi_warning_specified_temperature_exceeded = 0x0b01,
- scsi_warning_enclosure_degraded = 0x0b02,
- scsi_write_error_unexpected_unsolicited_data = 0x0c0c,
- scsi_write_error_not_enough_unsolicited_data = 0x0c0d,
- scsi_invalid_information_unit = 0x0e00,
- scsi_invalid_field_in_command_information_unit = 0x0e03,
- scsi_read_error_failed_retransmission_request = 0x1113,
- scsi_parameter_list_length_error = 0x1a00,
- scsi_invalid_command_operation_code = 0x2000,
- scsi_invalid_field_in_cdb = 0x2400,
- osd_security_audit_value_frozen = 0x2404,
- osd_security_working_key_frozen = 0x2405,
- osd_nonce_not_unique = 0x2406,
- osd_nonce_timestamp_out_of_range = 0x2407,
- scsi_logical_unit_not_supported = 0x2500,
- scsi_invalid_field_in_parameter_list = 0x2600,
- scsi_parameter_not_supported = 0x2601,
- scsi_parameter_value_invalid = 0x2602,
- scsi_invalid_release_of_persistent_reservation = 0x2604,
- osd_invalid_dataout_buffer_integrity_check_value = 0x260f,
- scsi_not_ready_to_ready_change_medium_may_have_changed = 0x2800,
- scsi_power_on_reset_or_bus_device_reset_occurred = 0x2900,
- scsi_power_on_occurred = 0x2901,
- scsi_scsi_bus_reset_occurred = 0x2902,
- scsi_bus_device_reset_function_occurred = 0x2903,
- scsi_device_internal_reset = 0x2904,
- scsi_transceiver_mode_changed_to_single_ended = 0x2905,
- scsi_transceiver_mode_changed_to_lvd = 0x2906,
- scsi_i_t_nexus_loss_occurred = 0x2907,
- scsi_parameters_changed = 0x2a00,
- scsi_mode_parameters_changed = 0x2a01,
- scsi_asymmetric_access_state_changed = 0x2a06,
- scsi_priority_changed = 0x2a08,
- scsi_command_sequence_error = 0x2c00,
- scsi_previous_busy_status = 0x2c07,
- scsi_previous_task_set_full_status = 0x2c08,
- scsi_previous_reservation_conflict_status = 0x2c09,
- osd_partition_or_collection_contains_user_objects = 0x2c0a,
- scsi_commands_cleared_by_another_initiator = 0x2f00,
- scsi_cleaning_failure = 0x3007,
- scsi_enclosure_failure = 0x3400,
- scsi_enclosure_services_failure = 0x3500,
- scsi_unsupported_enclosure_function = 0x3501,
- scsi_enclosure_services_unavailable = 0x3502,
- scsi_enclosure_services_transfer_failure = 0x3503,
- scsi_enclosure_services_transfer_refused = 0x3504,
- scsi_enclosure_services_checksum_error = 0x3505,
- scsi_rounded_parameter = 0x3700,
- osd_read_past_end_of_user_object = 0x3b17,
- scsi_logical_unit_has_not_self_configured_yet = 0x3e00,
- scsi_logical_unit_failure = 0x3e01,
- scsi_timeout_on_logical_unit = 0x3e02,
- scsi_logical_unit_failed_selftest = 0x3e03,
- scsi_logical_unit_unable_to_update_selftest_log = 0x3e04,
- scsi_target_operating_conditions_have_changed = 0x3f00,
- scsi_microcode_has_been_changed = 0x3f01,
- scsi_inquiry_data_has_changed = 0x3f03,
- scsi_echo_buffer_overwritten = 0x3f0f,
- scsi_diagnostic_failure_on_component_nn_first = 0x4080,
- scsi_diagnostic_failure_on_component_nn_last = 0x40ff,
- scsi_message_error = 0x4300,
- scsi_internal_target_failure = 0x4400,
- scsi_select_or_reselect_failure = 0x4500,
- scsi_scsi_parity_error = 0x4700,
- scsi_data_phase_crc_error_detected = 0x4701,
- scsi_scsi_parity_error_detected_during_st_data_phase = 0x4702,
- scsi_asynchronous_information_protection_error_detected = 0x4704,
- scsi_protocol_service_crc_error = 0x4705,
- scsi_phy_test_function_in_progress = 0x4706,
- scsi_invalid_message_error = 0x4900,
- scsi_command_phase_error = 0x4a00,
- scsi_data_phase_error = 0x4b00,
- scsi_logical_unit_failed_self_configuration = 0x4c00,
- scsi_overlapped_commands_attempted = 0x4e00,
- osd_quota_error = 0x5507,
- scsi_failure_prediction_threshold_exceeded = 0x5d00,
- scsi_failure_prediction_threshold_exceeded_false = 0x5dff,
- scsi_voltage_fault = 0x6500,
-};
-
-enum scsi_descriptor_types {
- scsi_sense_information = 0x0,
- scsi_sense_command_specific_information = 0x1,
- scsi_sense_key_specific = 0x2,
- scsi_sense_field_replaceable_unit = 0x3,
- scsi_sense_stream_commands = 0x4,
- scsi_sense_block_commands = 0x5,
- osd_sense_object_identification = 0x6,
- osd_sense_response_integrity_check = 0x7,
- osd_sense_attribute_identification = 0x8,
- scsi_sense_ata_return = 0x9,
-
- scsi_sense_Reserved_first = 0x0A,
- scsi_sense_Reserved_last = 0x7F,
- scsi_sense_Vendor_specific_first = 0x80,
- scsi_sense_Vendor_specific_last = 0xFF,
-};
-
-struct scsi_sense_descriptor { /* for picking into desc type */
- u8 descriptor_type; /* one of enum scsi_descriptor_types */
- u8 additional_length; /* n - 1 */
- u8 data[];
-} __packed;
-
-/* OSD deploys only scsi descriptor_based sense buffers */
-struct scsi_sense_descriptor_based {
-/*0*/ u8 response_code; /* 0x72 or 0x73 */
-/*1*/ u8 sense_key; /* one of enum scsi_sense_keys (4 lower bits) */
-/*2*/ __be16 additional_sense_code; /* enum osd_additional_sense_codes */
-/*4*/ u8 Reserved[3];
-/*7*/ u8 additional_sense_length; /* n - 7 */
-/*8*/ struct scsi_sense_descriptor ssd[0]; /* variable length, 1 or more */
-} __packed;
-
-/* some descriptors deployed by OSD */
-
-/* SPC3r23 4.5.2.3 Command-specific information sense data descriptor */
-/* Note: this is the same for descriptor_type=00 but with type=00 the
- * Reserved[0] == 0x80 (ie. bit-7 set)
- */
-struct scsi_sense_command_specific_data_descriptor {
-/*0*/ u8 descriptor_type; /* (00h/01h) */
-/*1*/ u8 additional_length; /* (0Ah) */
-/*2*/ u8 Reserved[2];
-/*4*/ __be64 information;
-} __packed;
-/*12*/
-
-struct scsi_sense_key_specific_data_descriptor {
-/*0*/ u8 descriptor_type; /* (02h) */
-/*1*/ u8 additional_length; /* (06h) */
-/*2*/ u8 Reserved[2];
-/* SKSV, C/D, Reserved (2), BPV, BIT POINTER (3) */
-/*4*/ u8 sksv_cd_bpv_bp;
-/*5*/ __be16 value; /* field-pointer/progress-value/retry-count/... */
-/*7*/ u8 Reserved2;
-} __packed;
-/*8*/
-
-/* 4.16.2.1 OSD error identification sense data descriptor - table 52 */
-/* Note: these bits are defined LE order for easy definition, this way the BIT()
- * number is the same as in the documentation. Below members at
- * osd_sense_identification_data_descriptor are therefore defined __le32.
- */
-enum osd_command_functions_bits {
- OSD_CFB_COMMAND = BIT(4),
- OSD_CFB_CMD_CAP_VERIFIED = BIT(5),
- OSD_CFB_VALIDATION = BIT(7),
- OSD_CFB_IMP_ST_ATT = BIT(12),
- OSD_CFB_SET_ATT = BIT(20),
- OSD_CFB_SA_CAP_VERIFIED = BIT(21),
- OSD_CFB_GET_ATT = BIT(28),
- OSD_CFB_GA_CAP_VERIFIED = BIT(29),
-};
-
-struct osd_sense_identification_data_descriptor {
-/*0*/ u8 descriptor_type; /* (06h) */
-/*1*/ u8 additional_length; /* (1Eh) */
-/*2*/ u8 Reserved[6];
-/*8*/ __le32 not_initiated_functions; /*osd_command_functions_bits*/
-/*12*/ __le32 completed_functions; /*osd_command_functions_bits*/
-/*16*/ __be64 partition_id;
-/*24*/ __be64 object_id;
-} __packed;
-/*32*/
-
-struct osd_sense_response_integrity_check_descriptor {
-/*0*/ u8 descriptor_type; /* (07h) */
-/*1*/ u8 additional_length; /* (20h) */
-/*2*/ u8 integrity_check_value[32]; /*FIXME: OSDv2_CRYPTO_KEYID_SIZE*/
-} __packed;
-/*34*/
-
-struct osd_sense_attributes_data_descriptor {
-/*0*/ u8 descriptor_type; /* (08h) */
-/*1*/ u8 additional_length; /* (n-2) */
-/*2*/ u8 Reserved[6];
- struct osd_sense_attr {
-/*8*/ __be32 attr_page;
-/*12*/ __be32 attr_id;
-/*16*/ } sense_attrs[0]; /* 1 or more */
-} __packed;
-/*variable*/
-
-/* Dig into scsi_sk_illegal_request/scsi_invalid_field_in_cdb errors */
-
-/*FIXME: Support also field in CAPS*/
-#define OSD_CDB_OFFSET(F) offsetof(struct osd_cdb_head, F)
-
-enum osdv2_cdb_field_offset {
- OSDv1_CFO_STARTING_BYTE = OSD_CDB_OFFSET(v1.start_address),
- OSD_CFO_STARTING_BYTE = OSD_CDB_OFFSET(v2.start_address),
- OSD_CFO_PARTITION_ID = OSD_CDB_OFFSET(partition),
- OSD_CFO_OBJECT_ID = OSD_CDB_OFFSET(object),
- OSD_CFO_PERMISSIONS = sizeof(struct osd_cdb_head) +
- offsetof(struct osd_capability_head,
- permissions_bit_mask),
-};
-
-#endif /* ndef __OSD_SENSE_H__ */
diff --git a/include/scsi/osd_types.h b/include/scsi/osd_types.h
deleted file mode 100644
index 48e8a165e136..000000000000
--- a/include/scsi/osd_types.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * osd_types.h - Types and constants which are not part of the protocol.
- *
- * Copyright (C) 2008 Panasas Inc. All rights reserved.
- *
- * Authors:
- * Boaz Harrosh <ooo@electrozaur.com>
- * Benny Halevy <bhalevy@panasas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2
- *
- * Contains types and constants that are implementation specific and are
- * used by more than one part of the osd library.
- * (Eg initiator/target/security_manager/...)
- */
-#ifndef __OSD_TYPES_H__
-#define __OSD_TYPES_H__
-
-struct osd_systemid {
- u8 data[OSD_SYSTEMID_LEN];
-};
-
-typedef u64 __bitwise osd_id;
-
-struct osd_obj_id {
- osd_id partition;
- osd_id id;
-};
-
-static const struct __weak osd_obj_id osd_root_object = {0, 0};
-
-struct osd_attr {
- u32 attr_page;
- u32 attr_id;
- u16 len; /* byte count of operand */
- void *val_ptr; /* in network order */
-};
-
-struct osd_sg_entry {
- u64 offset;
- u64 len;
-};
-
-#endif /* ndef __OSD_TYPES_H__ */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 2b539a1b3f62..a5fcdad4a03e 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -628,6 +628,9 @@ struct Scsi_Host {
/* Host responded with short (<36 bytes) INQUIRY result */
unsigned short_inquiry:1;
+ /* The transport requires the LUN bits NOT to be stored in CDB[1] */
+ unsigned no_scsi2_lun_in_cdb:1;
+
/*
* Optional work queue to be utilized by the transport
*/
@@ -639,9 +642,6 @@ struct Scsi_Host {
*/
struct workqueue_struct *tmf_work_q;
- /* The transport requires the LUN bits NOT to be stored in CDB[1] */
- unsigned no_scsi2_lun_in_cdb:1;
-
/*
* Value host_blocked counts down from
*/
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 15da45dc2a5d..b375c3303fe2 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -165,6 +165,9 @@ enum fc_tgtid_binding_type {
#define FC_PORT_ROLE_FCP_INITIATOR 0x02
#define FC_PORT_ROLE_IP_PORT 0x04
#define FC_PORT_ROLE_FCP_DUMMY_INITIATOR 0x08
+#define FC_PORT_ROLE_NVME_INITIATOR 0x10
+#define FC_PORT_ROLE_NVME_TARGET 0x20
+#define FC_PORT_ROLE_NVME_DISCOVERY 0x40
/* The following are for compatibility */
#define FC_RPORT_ROLE_UNKNOWN FC_PORT_ROLE_UNKNOWN
@@ -473,6 +476,7 @@ enum fc_host_event_code {
FCH_EVT_PORT_ONLINE = 0x202,
FCH_EVT_PORT_FABRIC = 0x204,
FCH_EVT_LINK_UNKNOWN = 0x500,
+ FCH_EVT_LINK_FPIN = 0x501,
FCH_EVT_VENDOR_UNIQUE = 0xffff,
};
@@ -755,7 +759,7 @@ fc_remote_port_chkready(struct fc_rport *rport)
return result;
}
-static inline u64 wwn_to_u64(u8 *wwn)
+static inline u64 wwn_to_u64(const u8 *wwn)
{
return get_unaligned_be64(wwn);
}
@@ -798,11 +802,17 @@ u32 fc_get_event_number(void);
void fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
enum fc_host_event_code event_code, u32 event_data);
void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
- u32 data_len, char * data_buf, u64 vendor_id);
+ u32 data_len, char *data_buf, u64 vendor_id);
+void fc_host_post_fc_event(struct Scsi_Host *shost, u32 event_number,
+ enum fc_host_event_code event_code,
+ u32 data_len, char *data_buf, u64 vendor_id);
/* Note: when specifying vendor_id to fc_host_post_vendor_event()
- * be sure to read the Vendor Type and ID formatting requirements
- * specified in scsi_netlink.h
+ * or fc_host_post_fc_event(), be sure to read the Vendor Type
+ * and ID formatting requirements specified in scsi_netlink.h
+ * Note: when calling fc_host_post_fc_event(), vendor_id may be
+ * specified as 0.
*/
+void fc_host_fpin_rcv(struct Scsi_Host *shost, u32 fpin_len, char *fpin_buf);
struct fc_vport *fc_vport_create(struct Scsi_Host *shost, int channel,
struct fc_vport_identifiers *);
int fc_vport_terminate(struct fc_vport *vport);
diff --git a/include/soc/at91/atmel-sfr.h b/include/soc/at91/atmel-sfr.h
index 482337af06b8..532fd784e86c 100644
--- a/include/soc/at91/atmel-sfr.h
+++ b/include/soc/at91/atmel-sfr.h
@@ -14,21 +14,41 @@
#define _LINUX_MFD_SYSCON_ATMEL_SFR_H
#define AT91_SFR_DDRCFG 0x04 /* DDR Configuration Register */
+#define AT91_SFR_CCFG_EBICSA 0x04 /* EBI Chip Select Register */
/* 0x08 ~ 0x0c: Reserved */
#define AT91_SFR_OHCIICR 0x10 /* OHCI INT Configuration Register */
#define AT91_SFR_OHCIISR 0x14 /* OHCI INT Status Register */
#define AT91_SFR_UTMICKTRIM 0x30 /* UTMI Clock Trimming Register */
+#define AT91_SFR_UTMISWAP 0x3c /* UTMI DP/DM Pin Swapping Register */
+#define AT91_SFR_LS 0x7c /* Light Sleep Register */
#define AT91_SFR_I2SCLKSEL 0x90 /* I2SC Register */
+#define AT91_SFR_WPMR 0xe4 /* Write Protection Mode Register */
/* Field definitions */
-#define AT91_OHCIICR_SUSPEND_A BIT(8)
-#define AT91_OHCIICR_SUSPEND_B BIT(9)
-#define AT91_OHCIICR_SUSPEND_C BIT(10)
+#define AT91_SFR_CCFG_EBI_CSA(cs, val) ((val) << (cs))
+#define AT91_SFR_CCFG_EBI_DBPUC BIT(8)
+#define AT91_SFR_CCFG_EBI_DBPDC BIT(9)
+#define AT91_SFR_CCFG_EBI_DRIVE BIT(17)
+#define AT91_SFR_CCFG_NFD0_ON_D16 BIT(24)
+#define AT91_SFR_CCFG_DDR_MP_EN BIT(25)
-#define AT91_OHCIICR_USB_SUSPEND (AT91_OHCIICR_SUSPEND_A | \
- AT91_OHCIICR_SUSPEND_B | \
- AT91_OHCIICR_SUSPEND_C)
+#define AT91_SFR_OHCIICR_RES(x) BIT(x)
+#define AT91_SFR_OHCIICR_ARIE BIT(4)
+#define AT91_SFR_OHCIICR_APPSTART BIT(5)
+#define AT91_SFR_OHCIICR_USB_SUSP(x) BIT(8 + (x))
+#define AT91_SFR_OHCIICR_UDPPUDIS BIT(23)
+#define AT91_OHCIICR_USB_SUSPEND GENMASK(10, 8)
-#define AT91_UTMICKTRIM_FREQ GENMASK(1, 0)
+#define AT91_SFR_OHCIISR_RIS(x) BIT(x)
+
+#define AT91_UTMICKTRIM_FREQ GENMASK(1, 0)
+
+#define AT91_SFR_UTMISWAP_PORT(x) BIT(x)
+
+#define AT91_SFR_LS_VALUE(x) BIT(x)
+#define AT91_SFR_LS_MEM_POWER_GATING_ULP1_EN BIT(16)
+
+#define AT91_SFR_WPMR_WPEN BIT(0)
+#define AT91_SFR_WPMR_WPKEY_MASK GENMASK(31, 8)
#endif /* _LINUX_MFD_SYSCON_ATMEL_SFR_H */
diff --git a/include/soc/rockchip/rk3399_grf.h b/include/soc/rockchip/rk3399_grf.h
new file mode 100644
index 000000000000..3eebabcb2812
--- /dev/null
+++ b/include/soc/rockchip/rk3399_grf.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Rockchip General Register Files definitions
+ *
+ * Copyright (c) 2018, Collabora Ltd.
+ * Author: Enric Balletbo i Serra <enric.balletbo@collabora.com>
+ */
+
+#ifndef __SOC_RK3399_GRF_H
+#define __SOC_RK3399_GRF_H
+
+/* PMU GRF Registers */
+#define RK3399_PMUGRF_OS_REG2 0x308
+#define RK3399_PMUGRF_DDRTYPE_SHIFT 13
+#define RK3399_PMUGRF_DDRTYPE_MASK 7
+#define RK3399_PMUGRF_DDRTYPE_DDR3 3
+#define RK3399_PMUGRF_DDRTYPE_LPDDR2 5
+#define RK3399_PMUGRF_DDRTYPE_LPDDR3 6
+#define RK3399_PMUGRF_DDRTYPE_LPDDR4 7
+
+#endif
diff --git a/include/soc/rockchip/rockchip_sip.h b/include/soc/rockchip/rockchip_sip.h
index 7e28092c4d3d..ad9482c56797 100644
--- a/include/soc/rockchip/rockchip_sip.h
+++ b/include/soc/rockchip/rockchip_sip.h
@@ -23,5 +23,6 @@
#define ROCKCHIP_SIP_CONFIG_DRAM_GET_RATE 0x05
#define ROCKCHIP_SIP_CONFIG_DRAM_CLR_IRQ 0x06
#define ROCKCHIP_SIP_CONFIG_DRAM_SET_PARAM 0x07
+#define ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD 0x08
#endif
diff --git a/include/sound/core.h b/include/sound/core.h
index e923c23e05dd..c90ebbc8d9c4 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -226,7 +226,6 @@ int copy_from_user_toio(volatile void __iomem *dst, const void __user *src, size
/* init.c */
-extern struct snd_card *snd_cards[SNDRV_CARDS];
int snd_card_locked(int card);
#if IS_ENABLED(CONFIG_SND_MIXER_OSS)
#define SND_MIXER_OSS_NOTIFY_REGISTER 0
@@ -251,7 +250,20 @@ int snd_card_add_dev_attr(struct snd_card *card,
int snd_component_add(struct snd_card *card, const char *component);
int snd_card_file_add(struct snd_card *card, struct file *file);
int snd_card_file_remove(struct snd_card *card, struct file *file);
-#define snd_card_unref(card) put_device(&(card)->card_dev)
+
+struct snd_card *snd_card_ref(int card);
+
+/**
+ * snd_card_unref - Unreference the card object
+ * @card: the card object to unreference
+ *
+ * Call this function for the card object that was obtained via snd_card_ref()
+ * or snd_lookup_minor_data().
+ */
+static inline void snd_card_unref(struct snd_card *card)
+{
+ put_device(&card->card_dev);
+}
#define snd_card_set_dev(card, devptr) ((card)->dev = (devptr))
diff --git a/include/sound/da7219.h b/include/sound/da7219.h
index 1bfcb16f2d10..4a36954c86c5 100644
--- a/include/sound/da7219.h
+++ b/include/sound/da7219.h
@@ -33,10 +33,16 @@ enum da7219_mic_amp_in_sel {
struct da7219_aad_pdata;
+enum da7219_dai_clks {
+ DA7219_DAI_WCLK_IDX = 0,
+ DA7219_DAI_BCLK_IDX,
+ DA7219_DAI_NUM_CLKS,
+};
+
struct da7219_pdata {
bool wakeup_source;
- const char *dai_clks_name;
+ const char *dai_clk_names[DA7219_DAI_NUM_CLKS];
/* Mic */
enum da7219_micbias_voltage micbias_lvl;
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 45f944d57982..896c3f45503b 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -297,7 +297,7 @@ struct hdac_rb {
* @num_streams: streams supported
* @idx: HDA link index
* @hlink_list: link list of HDA links
- * @lock: lock for link mgmt
+ * @lock: lock for link and display power mgmt
* @cmd_dma_state: state of cmd DMAs: CORB and RIRB
*/
struct hdac_bus {
@@ -363,21 +363,20 @@ struct hdac_bus {
/* locks */
spinlock_t reg_lock;
struct mutex cmd_mutex;
+ struct mutex lock;
/* DRM component interface */
struct drm_audio_component *audio_component;
long display_power_status;
- bool display_power_active;
+ unsigned long display_power_active;
/* parameters required for enhanced capabilities */
int num_streams;
int idx;
+ /* link management */
struct list_head hlink_list;
-
- struct mutex lock;
bool cmd_dma_state;
-
};
int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 1ac0dd82a916..4c6f3b5a7cff 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -151,9 +151,5 @@ int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
struct snd_dma_buffer *dmab);
void snd_dma_free_pages(struct snd_dma_buffer *dmab);
-/* basic memory allocation functions */
-void *snd_malloc_pages(size_t size, gfp_t gfp_flags);
-void snd_free_pages(void *ptr, size_t size);
-
#endif /* __SOUND_MEMALLOC_H */
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index 4b9ee3009aa0..c7a5433e109a 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -73,7 +73,8 @@ __printf(3, 4)
int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
const char *name_fmt, ...);
int snd_seq_delete_kernel_client(int client);
-int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev, int atomic, int hop);
+int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev,
+ struct file *file, bool blocking);
int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event *ev, int atomic, int hop);
int snd_seq_kernel_client_ctl(int client, unsigned int cmd, void *arg);
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 7afe45389972..3429888347e7 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -10,10 +10,10 @@
#include <sound/soc.h>
-#define asoc_simple_card_init_hp(card, sjack, prefix) \
- asoc_simple_card_init_jack(card, sjack, 1, prefix)
-#define asoc_simple_card_init_mic(card, sjack, prefix) \
- asoc_simple_card_init_jack(card, sjack, 0, prefix)
+#define asoc_simple_init_hp(card, sjack, prefix) \
+ asoc_simple_init_jack(card, sjack, 1, prefix)
+#define asoc_simple_init_mic(card, sjack, prefix) \
+ asoc_simple_init_jack(card, sjack, 0, prefix)
struct asoc_simple_dai {
const char *name;
@@ -26,7 +26,7 @@ struct asoc_simple_dai {
struct clk *clk;
};
-struct asoc_simple_card_data {
+struct asoc_simple_data {
u32 convert_rate;
u32 convert_channels;
};
@@ -37,96 +37,180 @@ struct asoc_simple_jack {
struct snd_soc_jack_gpio gpio;
};
-int asoc_simple_card_parse_daifmt(struct device *dev,
- struct device_node *node,
- struct device_node *codec,
- char *prefix,
- unsigned int *retfmt);
+struct asoc_simple_priv {
+ struct snd_soc_card snd_card;
+ struct simple_dai_props {
+ struct asoc_simple_dai *cpu_dai;
+ struct asoc_simple_dai *codec_dai;
+ struct snd_soc_dai_link_component codecs; /* single codec */
+ struct snd_soc_dai_link_component platforms;
+ struct asoc_simple_data adata;
+ struct snd_soc_codec_conf *codec_conf;
+ unsigned int mclk_fs;
+ } *dai_props;
+ struct asoc_simple_jack hp_jack;
+ struct asoc_simple_jack mic_jack;
+ struct snd_soc_dai_link *dai_link;
+ struct asoc_simple_dai *dais;
+ struct snd_soc_codec_conf *codec_conf;
+ struct gpio_desc *pa_gpio;
+};
+#define simple_priv_to_card(priv) (&(priv)->snd_card)
+#define simple_priv_to_props(priv, i) ((priv)->dai_props + (i))
+#define simple_priv_to_dev(priv) (simple_priv_to_card(priv)->dev)
+#define simple_priv_to_link(priv, i) (simple_priv_to_card(priv)->dai_link + (i))
+
+struct link_info {
+ int dais; /* number of dai */
+ int link; /* number of link */
+ int conf; /* number of codec_conf */
+ int cpu; /* turn for CPU / Codec */
+};
+
+int asoc_simple_parse_daifmt(struct device *dev,
+ struct device_node *node,
+ struct device_node *codec,
+ char *prefix,
+ unsigned int *retfmt);
__printf(3, 4)
-int asoc_simple_card_set_dailink_name(struct device *dev,
- struct snd_soc_dai_link *dai_link,
- const char *fmt, ...);
-int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
- char *prefix);
-
-#define asoc_simple_card_parse_clk_cpu(dev, node, dai_link, simple_dai) \
- asoc_simple_card_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai, \
+int asoc_simple_set_dailink_name(struct device *dev,
+ struct snd_soc_dai_link *dai_link,
+ const char *fmt, ...);
+int asoc_simple_parse_card_name(struct snd_soc_card *card,
+ char *prefix);
+
+#define asoc_simple_parse_clk_cpu(dev, node, dai_link, simple_dai) \
+ asoc_simple_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai, \
dai_link->cpu_dai_name, NULL)
-#define asoc_simple_card_parse_clk_codec(dev, node, dai_link, simple_dai) \
- asoc_simple_card_parse_clk(dev, node, dai_link->codec_of_node, simple_dai,\
+#define asoc_simple_parse_clk_codec(dev, node, dai_link, simple_dai) \
+ asoc_simple_parse_clk(dev, node, dai_link->codec_of_node, simple_dai,\
dai_link->codec_dai_name, dai_link->codecs)
-int asoc_simple_card_parse_clk(struct device *dev,
- struct device_node *node,
- struct device_node *dai_of_node,
- struct asoc_simple_dai *simple_dai,
- const char *dai_name,
- struct snd_soc_dai_link_component *dlc);
-int asoc_simple_card_clk_enable(struct asoc_simple_dai *dai);
-void asoc_simple_card_clk_disable(struct asoc_simple_dai *dai);
-
-#define asoc_simple_card_parse_cpu(node, dai_link, \
- list_name, cells_name, is_single_link) \
- asoc_simple_card_parse_dai(node, NULL, \
- &dai_link->cpu_of_node, \
- &dai_link->cpu_dai_name, list_name, cells_name, is_single_link)
-#define asoc_simple_card_parse_codec(node, dai_link, list_name, cells_name) \
- asoc_simple_card_parse_dai(node, dai_link->codecs, \
+int asoc_simple_parse_clk(struct device *dev,
+ struct device_node *node,
+ struct device_node *dai_of_node,
+ struct asoc_simple_dai *simple_dai,
+ const char *dai_name,
+ struct snd_soc_dai_link_component *dlc);
+int asoc_simple_startup(struct snd_pcm_substream *substream);
+void asoc_simple_shutdown(struct snd_pcm_substream *substream);
+int asoc_simple_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+int asoc_simple_dai_init(struct snd_soc_pcm_runtime *rtd);
+int asoc_simple_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params);
+
+#define asoc_simple_parse_cpu(node, dai_link, is_single_link) \
+ asoc_simple_parse_dai(node, NULL, \
+ &dai_link->cpu_of_node, \
+ &dai_link->cpu_dai_name, is_single_link)
+#define asoc_simple_parse_codec(node, dai_link) \
+ asoc_simple_parse_dai(node, dai_link->codecs, \
&dai_link->codec_of_node, \
- &dai_link->codec_dai_name, \
- list_name, cells_name, NULL)
-#define asoc_simple_card_parse_platform(node, dai_link, list_name, cells_name) \
- asoc_simple_card_parse_dai(node, dai_link->platforms, \
- &dai_link->platform_of_node, \
- NULL, list_name, cells_name, NULL)
-int asoc_simple_card_parse_dai(struct device_node *node,
- struct snd_soc_dai_link_component *dlc,
- struct device_node **endpoint_np,
- const char **dai_name,
- const char *list_name,
- const char *cells_name,
- int *is_single_links);
-
-#define asoc_simple_card_parse_graph_cpu(ep, dai_link) \
- asoc_simple_card_parse_graph_dai(ep, NULL, \
- &dai_link->cpu_of_node, \
- &dai_link->cpu_dai_name)
-#define asoc_simple_card_parse_graph_codec(ep, dai_link) \
- asoc_simple_card_parse_graph_dai(ep, dai_link->codecs, \
- &dai_link->codec_of_node, \
- &dai_link->codec_dai_name)
-int asoc_simple_card_parse_graph_dai(struct device_node *ep,
- struct snd_soc_dai_link_component *dlc,
- struct device_node **endpoint_np,
- const char **dai_name);
-
-#define asoc_simple_card_of_parse_tdm(np, dai) \
+ &dai_link->codec_dai_name, NULL)
+#define asoc_simple_parse_platform(node, dai_link) \
+ asoc_simple_parse_dai(node, dai_link->platforms, \
+ &dai_link->platform_of_node, NULL, NULL)
+
+#define asoc_simple_parse_tdm(np, dai) \
snd_soc_of_parse_tdm_slot(np, &(dai)->tx_slot_mask, \
&(dai)->rx_slot_mask, \
&(dai)->slots, \
&(dai)->slot_width);
-int asoc_simple_card_init_dai(struct snd_soc_dai *dai,
- struct asoc_simple_dai *simple_dai);
-
-void asoc_simple_card_canonicalize_platform(struct snd_soc_dai_link *dai_link);
-void asoc_simple_card_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
+void asoc_simple_canonicalize_platform(struct snd_soc_dai_link *dai_link);
+void asoc_simple_canonicalize_cpu(struct snd_soc_dai_link *dai_link,
int is_single_links);
-int asoc_simple_card_clean_reference(struct snd_soc_card *card);
+int asoc_simple_clean_reference(struct snd_soc_card *card);
-void asoc_simple_card_convert_fixup(struct asoc_simple_card_data *data,
+void asoc_simple_convert_fixup(struct asoc_simple_data *data,
struct snd_pcm_hw_params *params);
-void asoc_simple_card_parse_convert(struct device *dev,
- struct device_node *np, char *prefix,
- struct asoc_simple_card_data *data);
+void asoc_simple_parse_convert(struct device *dev,
+ struct device_node *np, char *prefix,
+ struct asoc_simple_data *data);
-int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
+int asoc_simple_parse_routing(struct snd_soc_card *card,
char *prefix);
-int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
+int asoc_simple_parse_widgets(struct snd_soc_card *card,
char *prefix);
+int asoc_simple_parse_pin_switches(struct snd_soc_card *card,
+ char *prefix);
-int asoc_simple_card_init_jack(struct snd_soc_card *card,
+int asoc_simple_init_jack(struct snd_soc_card *card,
struct asoc_simple_jack *sjack,
int is_hp, char *prefix);
+int asoc_simple_init_priv(struct asoc_simple_priv *priv,
+ struct link_info *li);
+
+#ifdef DEBUG
+inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
+ char *name,
+ struct asoc_simple_dai *dai)
+{
+ struct device *dev = simple_priv_to_dev(priv);
+
+ if (dai->name)
+ dev_dbg(dev, "%s dai name = %s\n",
+ name, dai->name);
+ if (dai->sysclk)
+ dev_dbg(dev, "%s sysclk = %d\n",
+ name, dai->sysclk);
+
+ dev_dbg(dev, "%s direction = %s\n",
+ name, dai->clk_direction ? "OUT" : "IN");
+
+ if (dai->slots)
+ dev_dbg(dev, "%s slots = %d\n", name, dai->slots);
+ if (dai->slot_width)
+ dev_dbg(dev, "%s slot width = %d\n", name, dai->slot_width);
+ if (dai->tx_slot_mask)
+ dev_dbg(dev, "%s tx slot mask = %d\n", name, dai->tx_slot_mask);
+ if (dai->rx_slot_mask)
+ dev_dbg(dev, "%s rx slot mask = %d\n", name, dai->rx_slot_mask);
+ if (dai->clk)
+ dev_dbg(dev, "%s clk %luHz\n", name, clk_get_rate(dai->clk));
+}
+
+inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
+{
+ struct snd_soc_card *card = simple_priv_to_card(priv);
+ struct device *dev = simple_priv_to_dev(priv);
+
+ int i;
+
+ if (card->name)
+ dev_dbg(dev, "Card Name: %s\n", card->name);
+
+ for (i = 0; i < card->num_links; i++) {
+ struct simple_dai_props *props = simple_priv_to_props(priv, i);
+ struct snd_soc_dai_link *link = simple_priv_to_link(priv, i);
+
+ dev_dbg(dev, "DAI%d\n", i);
+
+ asoc_simple_debug_dai(priv, "cpu", props->cpu_dai);
+ asoc_simple_debug_dai(priv, "codec", props->codec_dai);
+
+ if (link->name)
+ dev_dbg(dev, "dai name = %s\n", link->name);
+
+ dev_dbg(dev, "dai format = %04x\n", link->dai_fmt);
+
+ if (props->adata.convert_rate)
+ dev_dbg(dev, "convert_rate = %d\n",
+ props->adata.convert_rate);
+ if (props->adata.convert_channels)
+ dev_dbg(dev, "convert_channels = %d\n",
+ props->adata.convert_channels);
+ if (props->codec_conf && props->codec_conf->name_prefix)
+ dev_dbg(dev, "name prefix = %s\n",
+ props->codec_conf->name_prefix);
+ if (props->mclk_fs)
+ dev_dbg(dev, "mclk-fs = %d\n",
+ props->mclk_fs);
+ }
+}
+#else
+#define asoc_simple_debug_info(priv)
+#endif /* DEBUG */
#endif /* __SIMPLE_CARD_UTILS_H */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index eb7db605955b..482b4ea87c3c 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -802,8 +802,13 @@ struct snd_soc_component_driver {
int probe_order;
int remove_order;
- /* signal if the module handling the component cannot be removed */
- unsigned int ignore_module_refcount:1;
+ /*
+ * signal if the module handling the component should not be removed
+ * if a pcm is open. Setting this would prevent the module
+ * refcount being incremented in probe() but allow it be incremented
+ * when a pcm is opened and decremented when it is closed.
+ */
+ unsigned int module_get_upon_open:1;
/* bits */
unsigned int idle_bias_on:1;
@@ -1083,6 +1088,8 @@ struct snd_soc_card {
struct mutex mutex;
struct mutex dapm_mutex;
+ spinlock_t dpcm_lock;
+
bool instantiated;
bool topology_shortname_created;
diff --git a/include/sound/sof.h b/include/sound/sof.h
new file mode 100644
index 000000000000..4640566b54fe
--- /dev/null
+++ b/include/sound/sof.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ *
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_H
+#define __INCLUDE_SOUND_SOF_H
+
+#include <linux/pci.h>
+#include <sound/soc.h>
+#include <sound/soc-acpi.h>
+
+struct snd_sof_dsp_ops;
+
+/*
+ * SOF Platform data.
+ */
+struct snd_sof_pdata {
+ const struct firmware *fw;
+ const char *drv_name;
+ const char *name;
+ const char *platform;
+
+ struct device *dev;
+
+ /*
+ * notification callback used if the hardware initialization
+ * can take time or is handled in a workqueue. This callback
+ * can be used by the caller to e.g. enable runtime_pm
+ * or limit functionality until all low-level inits are
+ * complete.
+ */
+ void (*sof_probe_complete)(struct device *dev);
+
+ /* descriptor */
+ const struct sof_dev_desc *desc;
+
+ /* firmware and topology filenames */
+ const char *fw_filename_prefix;
+ const char *fw_filename;
+ const char *tplg_filename_prefix;
+ const char *tplg_filename;
+
+ /* machine */
+ struct platform_device *pdev_mach;
+ const struct snd_soc_acpi_mach *machine;
+
+ void *hw_pdata;
+};
+
+/*
+ * Descriptor used for setting up SOF platform data. This is used when
+ * ACPI/PCI data is missing or mapped differently.
+ */
+struct sof_dev_desc {
+ /* list of machines using this configuration */
+ struct snd_soc_acpi_mach *machines;
+
+ /* Platform resource indexes in BAR / ACPI resources. */
+ /* Must set to -1 if not used - add new items to end */
+ int resindex_lpe_base;
+ int resindex_pcicfg_base;
+ int resindex_imr_base;
+ int irqindex_host_ipc;
+ int resindex_dma_base;
+
+ /* DMA only valid when resindex_dma_base != -1*/
+ int dma_engine;
+ int dma_size;
+
+ /* IPC timeouts in ms */
+ int ipc_timeout;
+ int boot_timeout;
+
+ /* chip information for dsp */
+ const void *chip_info;
+
+ /* defaults for no codec mode */
+ const char *nocodec_fw_filename;
+ const char *nocodec_tplg_filename;
+
+ /* defaults paths for firmware and topology files */
+ const char *default_fw_path;
+ const char *default_tplg_path;
+
+ const struct snd_sof_dsp_ops *ops;
+ const struct sof_arch_ops *arch_ops;
+};
+
+int sof_nocodec_setup(struct device *dev,
+ struct snd_sof_pdata *sof_pdata,
+ struct snd_soc_acpi_mach *mach,
+ const struct sof_dev_desc *desc,
+ const struct snd_sof_dsp_ops *ops);
+#endif
diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h
new file mode 100644
index 000000000000..bded69e696d4
--- /dev/null
+++ b/include/sound/sof/control.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_CONTROL_H__
+#define __INCLUDE_SOUND_SOF_CONTROL_H__
+
+#include <uapi/sound/sof/header.h>
+#include <sound/sof/header.h>
+
+/*
+ * Component Mixers and Controls
+ */
+
+/* channel positions - uses same values as ALSA */
+enum sof_ipc_chmap {
+ SOF_CHMAP_UNKNOWN = 0,
+ SOF_CHMAP_NA, /**< N/A, silent */
+ SOF_CHMAP_MONO, /**< mono stream */
+ SOF_CHMAP_FL, /**< front left */
+ SOF_CHMAP_FR, /**< front right */
+ SOF_CHMAP_RL, /**< rear left */
+ SOF_CHMAP_RR, /**< rear right */
+ SOF_CHMAP_FC, /**< front centre */
+ SOF_CHMAP_LFE, /**< LFE */
+ SOF_CHMAP_SL, /**< side left */
+ SOF_CHMAP_SR, /**< side right */
+ SOF_CHMAP_RC, /**< rear centre */
+ SOF_CHMAP_FLC, /**< front left centre */
+ SOF_CHMAP_FRC, /**< front right centre */
+ SOF_CHMAP_RLC, /**< rear left centre */
+ SOF_CHMAP_RRC, /**< rear right centre */
+ SOF_CHMAP_FLW, /**< front left wide */
+ SOF_CHMAP_FRW, /**< front right wide */
+ SOF_CHMAP_FLH, /**< front left high */
+ SOF_CHMAP_FCH, /**< front centre high */
+ SOF_CHMAP_FRH, /**< front right high */
+ SOF_CHMAP_TC, /**< top centre */
+ SOF_CHMAP_TFL, /**< top front left */
+ SOF_CHMAP_TFR, /**< top front right */
+ SOF_CHMAP_TFC, /**< top front centre */
+ SOF_CHMAP_TRL, /**< top rear left */
+ SOF_CHMAP_TRR, /**< top rear right */
+ SOF_CHMAP_TRC, /**< top rear centre */
+ SOF_CHMAP_TFLC, /**< top front left centre */
+ SOF_CHMAP_TFRC, /**< top front right centre */
+ SOF_CHMAP_TSL, /**< top side left */
+ SOF_CHMAP_TSR, /**< top side right */
+ SOF_CHMAP_LLFE, /**< left LFE */
+ SOF_CHMAP_RLFE, /**< right LFE */
+ SOF_CHMAP_BC, /**< bottom centre */
+ SOF_CHMAP_BLC, /**< bottom left centre */
+ SOF_CHMAP_BRC, /**< bottom right centre */
+ SOF_CHMAP_LAST = SOF_CHMAP_BRC,
+};
+
+/* control data type and direction */
+enum sof_ipc_ctrl_type {
+ /* per channel data - uses struct sof_ipc_ctrl_value_chan */
+ SOF_CTRL_TYPE_VALUE_CHAN_GET = 0,
+ SOF_CTRL_TYPE_VALUE_CHAN_SET,
+ /* component data - uses struct sof_ipc_ctrl_value_comp */
+ SOF_CTRL_TYPE_VALUE_COMP_GET,
+ SOF_CTRL_TYPE_VALUE_COMP_SET,
+ /* bespoke data - uses struct sof_abi_hdr */
+ SOF_CTRL_TYPE_DATA_GET,
+ SOF_CTRL_TYPE_DATA_SET,
+};
+
+/* control command type */
+enum sof_ipc_ctrl_cmd {
+ SOF_CTRL_CMD_VOLUME = 0, /**< maps to ALSA volume style controls */
+ SOF_CTRL_CMD_ENUM, /**< maps to ALSA enum style controls */
+ SOF_CTRL_CMD_SWITCH, /**< maps to ALSA switch style controls */
+ SOF_CTRL_CMD_BINARY, /**< maps to ALSA binary style controls */
+};
+
+/* generic channel mapped value data */
+struct sof_ipc_ctrl_value_chan {
+ uint32_t channel; /**< channel map - enum sof_ipc_chmap */
+ uint32_t value;
+} __packed;
+
+/* generic component mapped value data */
+struct sof_ipc_ctrl_value_comp {
+ uint32_t index; /**< component source/sink/control index in control */
+ union {
+ uint32_t uvalue;
+ int32_t svalue;
+ };
+} __packed;
+
+/* generic control data */
+struct sof_ipc_ctrl_data {
+ struct sof_ipc_reply rhdr;
+ uint32_t comp_id;
+
+ /* control access and data type */
+ uint32_t type; /**< enum sof_ipc_ctrl_type */
+ uint32_t cmd; /**< enum sof_ipc_ctrl_cmd */
+ uint32_t index; /**< control index for comps > 1 control */
+
+ /* control data - can either be appended or DMAed from host */
+ struct sof_ipc_host_buffer buffer;
+ uint32_t num_elems; /**< in array elems or bytes for data type */
+ uint32_t elems_remaining; /**< elems remaining if sent in parts */
+
+ uint32_t msg_index; /**< for large messages sent in parts */
+
+ /* reserved for future use */
+ uint32_t reserved[6];
+
+ /* control data - add new types if needed */
+ union {
+ /* channel values can be used by volume type controls */
+ struct sof_ipc_ctrl_value_chan chanv[0];
+ /* component values used by routing controls like mux, mixer */
+ struct sof_ipc_ctrl_value_comp compv[0];
+ /* data can be used by binary controls */
+ struct sof_abi_hdr data[0];
+ };
+} __packed;
+
+/** Event type */
+enum sof_ipc_ctrl_event_type {
+ SOF_CTRL_EVENT_GENERIC = 0, /**< generic event */
+ SOF_CTRL_EVENT_GENERIC_METADATA, /**< generic event with metadata */
+ SOF_CTRL_EVENT_KD, /**< keyword detection event */
+ SOF_CTRL_EVENT_VAD, /**< voice activity detection event */
+};
+
+/**
+ * Generic notification data.
+ */
+struct sof_ipc_comp_event {
+ struct sof_ipc_reply rhdr;
+ uint16_t src_comp_type; /**< COMP_TYPE_ */
+ uint32_t src_comp_id; /**< source component id */
+ uint32_t event_type; /**< event type - SOF_CTRL_EVENT_* */
+ uint32_t num_elems; /**< in array elems or bytes for data type */
+
+ /* reserved for future use */
+ uint32_t reserved[8];
+
+ /* control data - add new types if needed */
+ union {
+ /* data can be used by binary controls */
+ struct sof_abi_hdr data[0];
+ /* event specific values */
+ uint32_t event_value;
+ };
+} __packed;
+
+#endif
diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h
new file mode 100644
index 000000000000..4bd83f7adddf
--- /dev/null
+++ b/include/sound/sof/dai-intel.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_INTEL_H__
+#define __INCLUDE_SOUND_SOF_DAI_INTEL_H__
+
+#include <sound/sof/header.h>
+
+ /* ssc1: TINTE */
+#define SOF_DAI_INTEL_SSP_QUIRK_TINTE (1 << 0)
+ /* ssc1: PINTE */
+#define SOF_DAI_INTEL_SSP_QUIRK_PINTE (1 << 1)
+ /* ssc2: SMTATF */
+#define SOF_DAI_INTEL_SSP_QUIRK_SMTATF (1 << 2)
+ /* ssc2: MMRATF */
+#define SOF_DAI_INTEL_SSP_QUIRK_MMRATF (1 << 3)
+ /* ssc2: PSPSTWFDFD */
+#define SOF_DAI_INTEL_SSP_QUIRK_PSPSTWFDFD (1 << 4)
+ /* ssc2: PSPSRWFDFD */
+#define SOF_DAI_INTEL_SSP_QUIRK_PSPSRWFDFD (1 << 5)
+/* ssc1: LBM */
+#define SOF_DAI_INTEL_SSP_QUIRK_LBM (1 << 6)
+
+ /* here is the possibility to define others aux macros */
+
+#define SOF_DAI_INTEL_SSP_FRAME_PULSE_WIDTH_MAX 38
+#define SOF_DAI_INTEL_SSP_SLOT_PADDING_MAX 31
+
+/* SSP clocks control settings
+ *
+ * Macros for clks_control field in sof_ipc_dai_ssp_params struct.
+ */
+
+/* mclk 0 disable */
+#define SOF_DAI_INTEL_SSP_MCLK_0_DISABLE BIT(0)
+/* mclk 1 disable */
+#define SOF_DAI_INTEL_SSP_MCLK_1_DISABLE BIT(1)
+/* mclk keep active */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_MCLK_KA BIT(2)
+/* bclk keep active */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_BCLK_KA BIT(3)
+/* fs keep active */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_FS_KA BIT(4)
+/* bclk idle */
+#define SOF_DAI_INTEL_SSP_CLKCTRL_BCLK_IDLE_HIGH BIT(5)
+
+/* SSP Configuration Request - SOF_IPC_DAI_SSP_CONFIG */
+struct sof_ipc_dai_ssp_params {
+ struct sof_ipc_hdr hdr;
+ uint16_t reserved1;
+ uint16_t mclk_id;
+
+ uint32_t mclk_rate; /* mclk frequency in Hz */
+ uint32_t fsync_rate; /* fsync frequency in Hz */
+ uint32_t bclk_rate; /* bclk frequency in Hz */
+
+ /* TDM */
+ uint32_t tdm_slots;
+ uint32_t rx_slots;
+ uint32_t tx_slots;
+
+ /* data */
+ uint32_t sample_valid_bits;
+ uint16_t tdm_slot_width;
+ uint16_t reserved2; /* alignment */
+
+ /* MCLK */
+ uint32_t mclk_direction;
+
+ uint16_t frame_pulse_width;
+ uint16_t tdm_per_slot_padding_flag;
+ uint32_t clks_control;
+ uint32_t quirks;
+} __packed;
+
+/* HDA Configuration Request - SOF_IPC_DAI_HDA_CONFIG */
+struct sof_ipc_dai_hda_params {
+ struct sof_ipc_hdr hdr;
+ uint32_t link_dma_ch;
+} __packed;
+
+/* DMIC Configuration Request - SOF_IPC_DAI_DMIC_CONFIG */
+
+/* This struct is defined per 2ch PDM controller available in the platform.
+ * Normally it is sufficient to set the used microphone specific enables to 1
+ * and keep other parameters as zero. The customizations are:
+ *
+ * 1. If a device mixes different microphones types with different polarity
+ * and/or the absolute polarity matters the PCM signal from a microphone
+ * can be inverted with the controls.
+ *
+ * 2. If the microphones in a stereo pair do not appear in captured stream
+ * in desired order due to board schematics choises they can be swapped with
+ * the clk_edge parameter.
+ *
+ * 3. If PDM bit errors are seen in capture (poor quality) the skew parameter
+ * that delays the sampling time of data by half cycles of DMIC source clock
+ * can be tried for improvement. However there is no guarantee for this to fix
+ * data integrity problems.
+ */
+struct sof_ipc_dai_dmic_pdm_ctrl {
+ struct sof_ipc_hdr hdr;
+ uint16_t id; /**< PDM controller ID */
+
+ uint16_t enable_mic_a; /**< Use A (left) channel mic (0 or 1)*/
+ uint16_t enable_mic_b; /**< Use B (right) channel mic (0 or 1)*/
+
+ uint16_t polarity_mic_a; /**< Optionally invert mic A signal (0 or 1) */
+ uint16_t polarity_mic_b; /**< Optionally invert mic B signal (0 or 1) */
+
+ uint16_t clk_edge; /**< Optionally swap data clock edge (0 or 1) */
+ uint16_t skew; /**< Adjust PDM data sampling vs. clock (0..15) */
+
+ uint16_t reserved[3]; /**< Make sure the total size is 4 bytes aligned */
+} __packed;
+
+/* This struct contains the global settings for all 2ch PDM controllers. The
+ * version number used in configuration data is checked vs. version used by
+ * device driver src/drivers/dmic.c need to match. It is incremented from
+ * initial value 1 if updates done for the to driver would alter the operation
+ * of the microhone.
+ *
+ * Note: The microphone clock (pdmclk_min, pdmclk_max, duty_min, duty_max)
+ * parameters need to be set as defined in microphone data sheet. E.g. clock
+ * range 1.0 - 3.2 MHz is usually supported microphones. Some microphones are
+ * multi-mode capable and there may be denied mic clock frequencies between
+ * the modes. In such case set the clock range limits of the desired mode to
+ * avoid the driver to set clock to an illegal rate.
+ *
+ * The duty cycle could be set to 48-52% if not known. Generally these
+ * parameters can be altered within data sheet specified limits to match
+ * required audio application performance power.
+ *
+ * The microphone clock needs to be usually about 50-80 times the used audio
+ * sample rate. With highest sample rates above 48 kHz this can relaxed
+ * somewhat.
+ *
+ * The parameter wake_up_time describes how long time the microphone needs
+ * for the data line to produce valid output from mic clock start. The driver
+ * will mute the captured audio for the given time. The min_clock_on_time
+ * parameter is used to prevent too short clock bursts to happen. The driver
+ * will keep the clock active after capture stop if this time is not yet
+ * met. The unit for both is microseconds (us). Exceed of 100 ms will be
+ * treated as an error.
+ */
+struct sof_ipc_dai_dmic_params {
+ struct sof_ipc_hdr hdr;
+ uint32_t driver_ipc_version; /**< Version (1..N) */
+
+ uint32_t pdmclk_min; /**< Minimum microphone clock in Hz (100000..N) */
+ uint32_t pdmclk_max; /**< Maximum microphone clock in Hz (min...N) */
+
+ uint32_t fifo_fs; /**< FIFO sample rate in Hz (8000..96000) */
+ uint32_t reserved_1; /**< Reserved */
+ uint16_t fifo_bits; /**< FIFO word length (16 or 32) */
+ uint16_t reserved_2; /**< Reserved */
+
+ uint16_t duty_min; /**< Min. mic clock duty cycle in % (20..80) */
+ uint16_t duty_max; /**< Max. mic clock duty cycle in % (min..80) */
+
+ uint32_t num_pdm_active; /**< Number of active pdm controllers */
+
+ uint32_t wake_up_time; /**< Time from clock start to data (us) */
+ uint32_t min_clock_on_time; /**< Min. time that clk is kept on (us) */
+
+ /* reserved for future use */
+ uint32_t reserved[6];
+
+ /**< variable number of pdm controller config */
+ struct sof_ipc_dai_dmic_pdm_ctrl pdm[0];
+} __packed;
+
+#endif
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
new file mode 100644
index 000000000000..3b67c93ff101
--- /dev/null
+++ b/include/sound/sof/dai.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_H__
+#define __INCLUDE_SOUND_SOF_DAI_H__
+
+#include <sound/sof/header.h>
+#include <sound/sof/dai-intel.h>
+
+/*
+ * DAI Configuration.
+ *
+ * Each different DAI type will have it's own structure and IPC cmd.
+ */
+
+#define SOF_DAI_FMT_I2S 1 /**< I2S mode */
+#define SOF_DAI_FMT_RIGHT_J 2 /**< Right Justified mode */
+#define SOF_DAI_FMT_LEFT_J 3 /**< Left Justified mode */
+#define SOF_DAI_FMT_DSP_A 4 /**< L data MSB after FRM LRC */
+#define SOF_DAI_FMT_DSP_B 5 /**< L data MSB during FRM LRC */
+#define SOF_DAI_FMT_PDM 6 /**< Pulse density modulation */
+
+#define SOF_DAI_FMT_CONT (1 << 4) /**< continuous clock */
+#define SOF_DAI_FMT_GATED (0 << 4) /**< clock is gated */
+
+#define SOF_DAI_FMT_NB_NF (0 << 8) /**< normal bit clock + frame */
+#define SOF_DAI_FMT_NB_IF (2 << 8) /**< normal BCLK + inv FRM */
+#define SOF_DAI_FMT_IB_NF (3 << 8) /**< invert BCLK + nor FRM */
+#define SOF_DAI_FMT_IB_IF (4 << 8) /**< invert BCLK + FRM */
+
+#define SOF_DAI_FMT_CBM_CFM (0 << 12) /**< codec clk & FRM master */
+#define SOF_DAI_FMT_CBS_CFM (2 << 12) /**< codec clk slave & FRM master */
+#define SOF_DAI_FMT_CBM_CFS (3 << 12) /**< codec clk master & frame slave */
+#define SOF_DAI_FMT_CBS_CFS (4 << 12) /**< codec clk & FRM slave */
+
+#define SOF_DAI_FMT_FORMAT_MASK 0x000f
+#define SOF_DAI_FMT_CLOCK_MASK 0x00f0
+#define SOF_DAI_FMT_INV_MASK 0x0f00
+#define SOF_DAI_FMT_MASTER_MASK 0xf000
+
+/** \brief Types of DAI */
+enum sof_ipc_dai_type {
+ SOF_DAI_INTEL_NONE = 0, /**< None */
+ SOF_DAI_INTEL_SSP, /**< Intel SSP */
+ SOF_DAI_INTEL_DMIC, /**< Intel DMIC */
+ SOF_DAI_INTEL_HDA, /**< Intel HD/A */
+};
+
+/* general purpose DAI configuration */
+struct sof_ipc_dai_config {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t type; /**< DAI type - enum sof_ipc_dai_type */
+ uint32_t dai_index; /**< index of this type dai */
+
+ /* physical protocol and clocking */
+ uint16_t format; /**< SOF_DAI_FMT_ */
+ uint16_t reserved16; /**< alignment */
+
+ /* reserved for future use */
+ uint32_t reserved[8];
+
+ /* HW specific data */
+ union {
+ struct sof_ipc_dai_ssp_params ssp;
+ struct sof_ipc_dai_dmic_params dmic;
+ struct sof_ipc_dai_hda_params hda;
+ };
+} __packed;
+
+#endif
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
new file mode 100644
index 000000000000..ccb6a004b37b
--- /dev/null
+++ b/include/sound/sof/header.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_HEADER_H__
+#define __INCLUDE_SOUND_SOF_HEADER_H__
+
+#include <uapi/sound/sof/abi.h>
+
+/** \addtogroup sof_uapi uAPI
+ * SOF uAPI specification.
+ * @{
+ */
+
+/*
+ * IPC messages have a prefixed 32 bit identifier made up as follows :-
+ *
+ * 0xGCCCNNNN where
+ * G is global cmd type (4 bits)
+ * C is command type (12 bits)
+ * I is the ID number (16 bits) - monotonic and overflows
+ *
+ * This is sent at the start of the IPM message in the mailbox. Messages should
+ * not be sent in the doorbell (special exceptions for firmware .
+ */
+
+/* Global Message - Generic */
+#define SOF_GLB_TYPE_SHIFT 28
+#define SOF_GLB_TYPE_MASK (0xf << SOF_GLB_TYPE_SHIFT)
+#define SOF_GLB_TYPE(x) ((x) << SOF_GLB_TYPE_SHIFT)
+
+/* Command Message - Generic */
+#define SOF_CMD_TYPE_SHIFT 16
+#define SOF_CMD_TYPE_MASK (0xfff << SOF_CMD_TYPE_SHIFT)
+#define SOF_CMD_TYPE(x) ((x) << SOF_CMD_TYPE_SHIFT)
+
+/* Global Message Types */
+#define SOF_IPC_GLB_REPLY SOF_GLB_TYPE(0x1U)
+#define SOF_IPC_GLB_COMPOUND SOF_GLB_TYPE(0x2U)
+#define SOF_IPC_GLB_TPLG_MSG SOF_GLB_TYPE(0x3U)
+#define SOF_IPC_GLB_PM_MSG SOF_GLB_TYPE(0x4U)
+#define SOF_IPC_GLB_COMP_MSG SOF_GLB_TYPE(0x5U)
+#define SOF_IPC_GLB_STREAM_MSG SOF_GLB_TYPE(0x6U)
+#define SOF_IPC_FW_READY SOF_GLB_TYPE(0x7U)
+#define SOF_IPC_GLB_DAI_MSG SOF_GLB_TYPE(0x8U)
+#define SOF_IPC_GLB_TRACE_MSG SOF_GLB_TYPE(0x9U)
+
+/*
+ * DSP Command Message Types
+ */
+
+/* topology */
+#define SOF_IPC_TPLG_COMP_NEW SOF_CMD_TYPE(0x001)
+#define SOF_IPC_TPLG_COMP_FREE SOF_CMD_TYPE(0x002)
+#define SOF_IPC_TPLG_COMP_CONNECT SOF_CMD_TYPE(0x003)
+#define SOF_IPC_TPLG_PIPE_NEW SOF_CMD_TYPE(0x010)
+#define SOF_IPC_TPLG_PIPE_FREE SOF_CMD_TYPE(0x011)
+#define SOF_IPC_TPLG_PIPE_CONNECT SOF_CMD_TYPE(0x012)
+#define SOF_IPC_TPLG_PIPE_COMPLETE SOF_CMD_TYPE(0x013)
+#define SOF_IPC_TPLG_BUFFER_NEW SOF_CMD_TYPE(0x020)
+#define SOF_IPC_TPLG_BUFFER_FREE SOF_CMD_TYPE(0x021)
+
+/* PM */
+#define SOF_IPC_PM_CTX_SAVE SOF_CMD_TYPE(0x001)
+#define SOF_IPC_PM_CTX_RESTORE SOF_CMD_TYPE(0x002)
+#define SOF_IPC_PM_CTX_SIZE SOF_CMD_TYPE(0x003)
+#define SOF_IPC_PM_CLK_SET SOF_CMD_TYPE(0x004)
+#define SOF_IPC_PM_CLK_GET SOF_CMD_TYPE(0x005)
+#define SOF_IPC_PM_CLK_REQ SOF_CMD_TYPE(0x006)
+#define SOF_IPC_PM_CORE_ENABLE SOF_CMD_TYPE(0x007)
+
+/* component runtime config - multiple different types */
+#define SOF_IPC_COMP_SET_VALUE SOF_CMD_TYPE(0x001)
+#define SOF_IPC_COMP_GET_VALUE SOF_CMD_TYPE(0x002)
+#define SOF_IPC_COMP_SET_DATA SOF_CMD_TYPE(0x003)
+#define SOF_IPC_COMP_GET_DATA SOF_CMD_TYPE(0x004)
+
+/* DAI messages */
+#define SOF_IPC_DAI_CONFIG SOF_CMD_TYPE(0x001)
+#define SOF_IPC_DAI_LOOPBACK SOF_CMD_TYPE(0x002)
+
+/* stream */
+#define SOF_IPC_STREAM_PCM_PARAMS SOF_CMD_TYPE(0x001)
+#define SOF_IPC_STREAM_PCM_PARAMS_REPLY SOF_CMD_TYPE(0x002)
+#define SOF_IPC_STREAM_PCM_FREE SOF_CMD_TYPE(0x003)
+#define SOF_IPC_STREAM_TRIG_START SOF_CMD_TYPE(0x004)
+#define SOF_IPC_STREAM_TRIG_STOP SOF_CMD_TYPE(0x005)
+#define SOF_IPC_STREAM_TRIG_PAUSE SOF_CMD_TYPE(0x006)
+#define SOF_IPC_STREAM_TRIG_RELEASE SOF_CMD_TYPE(0x007)
+#define SOF_IPC_STREAM_TRIG_DRAIN SOF_CMD_TYPE(0x008)
+#define SOF_IPC_STREAM_TRIG_XRUN SOF_CMD_TYPE(0x009)
+#define SOF_IPC_STREAM_POSITION SOF_CMD_TYPE(0x00a)
+#define SOF_IPC_STREAM_VORBIS_PARAMS SOF_CMD_TYPE(0x010)
+#define SOF_IPC_STREAM_VORBIS_FREE SOF_CMD_TYPE(0x011)
+
+/* trace and debug */
+#define SOF_IPC_TRACE_DMA_PARAMS SOF_CMD_TYPE(0x001)
+#define SOF_IPC_TRACE_DMA_POSITION SOF_CMD_TYPE(0x002)
+
+/* Get message component id */
+#define SOF_IPC_MESSAGE_ID(x) ((x) & 0xffff)
+
+/* maximum message size for mailbox Tx/Rx */
+#define SOF_IPC_MSG_MAX_SIZE 384
+
+/*
+ * Structure Header - Header for all IPC structures except command structs.
+ * The size can be greater than the structure size and that means there is
+ * extended bespoke data beyond the end of the structure including variable
+ * arrays.
+ */
+
+struct sof_ipc_hdr {
+ uint32_t size; /**< size of structure */
+} __packed;
+
+/*
+ * Command Header - Header for all IPC commands. Identifies IPC message.
+ * The size can be greater than the structure size and that means there is
+ * extended bespoke data beyond the end of the structure including variable
+ * arrays.
+ */
+
+struct sof_ipc_cmd_hdr {
+ uint32_t size; /**< size of structure */
+ uint32_t cmd; /**< SOF_IPC_GLB_ + cmd */
+} __packed;
+
+/*
+ * Generic reply message. Some commands override this with their own reply
+ * types that must include this at start.
+ */
+struct sof_ipc_reply {
+ struct sof_ipc_cmd_hdr hdr;
+ int32_t error; /**< negative error numbers */
+} __packed;
+
+/*
+ * Compound commands - SOF_IPC_GLB_COMPOUND.
+ *
+ * Compound commands are sent to the DSP as a single IPC operation. The
+ * commands are split into blocks and each block has a header. This header
+ * identifies the command type and the number of commands before the next
+ * header.
+ */
+
+struct sof_ipc_compound_hdr {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t count; /**< count of 0 means end of compound sequence */
+} __packed;
+
+/** @}*/
+
+#endif
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
new file mode 100644
index 000000000000..21dae04d8183
--- /dev/null
+++ b/include/sound/sof/info.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_INFO_H__
+#define __INCLUDE_SOUND_SOF_INFO_H__
+
+#include <sound/sof/header.h>
+#include <sound/sof/stream.h>
+
+/*
+ * Firmware boot and version
+ */
+
+#define SOF_IPC_MAX_ELEMS 16
+
+/* extended data types that can be appended onto end of sof_ipc_fw_ready */
+enum sof_ipc_ext_data {
+ SOF_IPC_EXT_DMA_BUFFER = 0,
+ SOF_IPC_EXT_WINDOW,
+};
+
+/* FW version - SOF_IPC_GLB_VERSION */
+struct sof_ipc_fw_version {
+ struct sof_ipc_hdr hdr;
+ uint16_t major;
+ uint16_t minor;
+ uint16_t micro;
+ uint16_t build;
+ uint8_t date[12];
+ uint8_t time[10];
+ uint8_t tag[6];
+ uint32_t abi_version;
+
+ /* reserved for future use */
+ uint32_t reserved[4];
+} __packed;
+
+/* FW ready Message - sent by firmware when boot has completed */
+struct sof_ipc_fw_ready {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t dspbox_offset; /* dsp initiated IPC mailbox */
+ uint32_t hostbox_offset; /* host initiated IPC mailbox */
+ uint32_t dspbox_size;
+ uint32_t hostbox_size;
+ struct sof_ipc_fw_version version;
+
+ /* Miscellaneous debug flags showing build/debug features enabled */
+ union {
+ uint64_t reserved;
+ struct {
+ uint64_t build:1;
+ uint64_t locks:1;
+ uint64_t locks_verbose:1;
+ uint64_t gdb:1;
+ } bits;
+ } debug;
+
+ /* reserved for future use */
+ uint32_t reserved[4];
+} __packed;
+
+/*
+ * Extended Firmware data. All optional, depends on platform/arch.
+ */
+enum sof_ipc_region {
+ SOF_IPC_REGION_DOWNBOX = 0,
+ SOF_IPC_REGION_UPBOX,
+ SOF_IPC_REGION_TRACE,
+ SOF_IPC_REGION_DEBUG,
+ SOF_IPC_REGION_STREAM,
+ SOF_IPC_REGION_REGS,
+ SOF_IPC_REGION_EXCEPTION,
+};
+
+struct sof_ipc_ext_data_hdr {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t type; /**< SOF_IPC_EXT_ */
+} __packed;
+
+struct sof_ipc_dma_buffer_elem {
+ struct sof_ipc_hdr hdr;
+ uint32_t type; /**< SOF_IPC_REGION_ */
+ uint32_t id; /**< platform specific - used to map to host memory */
+ struct sof_ipc_host_buffer buffer;
+} __packed;
+
+/* extended data DMA buffers for IPC, trace and debug */
+struct sof_ipc_dma_buffer_data {
+ struct sof_ipc_ext_data_hdr ext_hdr;
+ uint32_t num_buffers;
+
+ /* host files in buffer[n].buffer */
+ struct sof_ipc_dma_buffer_elem buffer[];
+} __packed;
+
+struct sof_ipc_window_elem {
+ struct sof_ipc_hdr hdr;
+ uint32_t type; /**< SOF_IPC_REGION_ */
+ uint32_t id; /**< platform specific - used to map to host memory */
+ uint32_t flags; /**< R, W, RW, etc - to define */
+ uint32_t size; /**< size of region in bytes */
+ /* offset in window region as windows can be partitioned */
+ uint32_t offset;
+} __packed;
+
+/* extended data memory windows for IPC, trace and debug */
+struct sof_ipc_window {
+ struct sof_ipc_ext_data_hdr ext_hdr;
+ uint32_t num_windows;
+ struct sof_ipc_window_elem window[];
+} __packed;
+
+#endif
diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h
new file mode 100644
index 000000000000..8ae3ad45bdf7
--- /dev/null
+++ b/include/sound/sof/pm.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_PM_H__
+#define __INCLUDE_SOUND_SOF_PM_H__
+
+#include <sound/sof/header.h>
+
+/*
+ * PM
+ */
+
+/* PM context element */
+struct sof_ipc_pm_ctx_elem {
+ struct sof_ipc_hdr hdr;
+ uint32_t type;
+ uint32_t size;
+ uint64_t addr;
+} __packed;
+
+/*
+ * PM context - SOF_IPC_PM_CTX_SAVE, SOF_IPC_PM_CTX_RESTORE,
+ * SOF_IPC_PM_CTX_SIZE
+ */
+struct sof_ipc_pm_ctx {
+ struct sof_ipc_cmd_hdr hdr;
+ struct sof_ipc_host_buffer buffer;
+ uint32_t num_elems;
+ uint32_t size;
+
+ /* reserved for future use */
+ uint32_t reserved[8];
+
+ struct sof_ipc_pm_ctx_elem elems[];
+} __packed;
+
+/* enable or disable cores - SOF_IPC_PM_CORE_ENABLE */
+struct sof_ipc_pm_core_config {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t enable_mask;
+} __packed;
+
+#endif
diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h
new file mode 100644
index 000000000000..643f175cb479
--- /dev/null
+++ b/include/sound/sof/stream.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_STREAM_H__
+#define __INCLUDE_SOUND_SOF_STREAM_H__
+
+#include <sound/sof/header.h>
+
+/*
+ * Stream configuration.
+ */
+
+#define SOF_IPC_MAX_CHANNELS 8
+
+/* common sample rates for use in masks */
+#define SOF_RATE_8000 (1 << 0) /**< 8000Hz */
+#define SOF_RATE_11025 (1 << 1) /**< 11025Hz */
+#define SOF_RATE_12000 (1 << 2) /**< 12000Hz */
+#define SOF_RATE_16000 (1 << 3) /**< 16000Hz */
+#define SOF_RATE_22050 (1 << 4) /**< 22050Hz */
+#define SOF_RATE_24000 (1 << 5) /**< 24000Hz */
+#define SOF_RATE_32000 (1 << 6) /**< 32000Hz */
+#define SOF_RATE_44100 (1 << 7) /**< 44100Hz */
+#define SOF_RATE_48000 (1 << 8) /**< 48000Hz */
+#define SOF_RATE_64000 (1 << 9) /**< 64000Hz */
+#define SOF_RATE_88200 (1 << 10) /**< 88200Hz */
+#define SOF_RATE_96000 (1 << 11) /**< 96000Hz */
+#define SOF_RATE_176400 (1 << 12) /**< 176400Hz */
+#define SOF_RATE_192000 (1 << 13) /**< 192000Hz */
+
+/* continuous and non-standard rates for flexibility */
+#define SOF_RATE_CONTINUOUS (1 << 30) /**< range */
+#define SOF_RATE_KNOT (1 << 31) /**< non-continuous */
+
+/* generic PCM flags for runtime settings */
+#define SOF_PCM_FLAG_XRUN_STOP (1 << 0) /**< Stop on any XRUN */
+
+/* stream PCM frame format */
+enum sof_ipc_frame {
+ SOF_IPC_FRAME_S16_LE = 0,
+ SOF_IPC_FRAME_S24_4LE,
+ SOF_IPC_FRAME_S32_LE,
+ SOF_IPC_FRAME_FLOAT,
+ /* other formats here */
+};
+
+/* stream buffer format */
+enum sof_ipc_buffer_format {
+ SOF_IPC_BUFFER_INTERLEAVED,
+ SOF_IPC_BUFFER_NONINTERLEAVED,
+ /* other formats here */
+};
+
+/* stream direction */
+enum sof_ipc_stream_direction {
+ SOF_IPC_STREAM_PLAYBACK = 0,
+ SOF_IPC_STREAM_CAPTURE,
+};
+
+/* stream ring info */
+struct sof_ipc_host_buffer {
+ struct sof_ipc_hdr hdr;
+ uint32_t phy_addr;
+ uint32_t pages;
+ uint32_t size;
+ uint32_t reserved[3];
+} __packed;
+
+struct sof_ipc_stream_params {
+ struct sof_ipc_hdr hdr;
+ struct sof_ipc_host_buffer buffer;
+ uint32_t direction; /**< enum sof_ipc_stream_direction */
+ uint32_t frame_fmt; /**< enum sof_ipc_frame */
+ uint32_t buffer_fmt; /**< enum sof_ipc_buffer_format */
+ uint32_t rate;
+ uint16_t stream_tag;
+ uint16_t channels;
+ uint16_t sample_valid_bytes;
+ uint16_t sample_container_bytes;
+
+ /* for notifying host period has completed - 0 means no period IRQ */
+ uint32_t host_period_bytes;
+
+ uint32_t reserved[2];
+ uint16_t chmap[SOF_IPC_MAX_CHANNELS]; /**< channel map - SOF_CHMAP_ */
+} __packed;
+
+/* PCM params info - SOF_IPC_STREAM_PCM_PARAMS */
+struct sof_ipc_pcm_params {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t comp_id;
+ uint32_t flags; /**< generic PCM flags - SOF_PCM_FLAG_ */
+ uint32_t reserved[2];
+ struct sof_ipc_stream_params params;
+} __packed;
+
+/* PCM params info reply - SOF_IPC_STREAM_PCM_PARAMS_REPLY */
+struct sof_ipc_pcm_params_reply {
+ struct sof_ipc_reply rhdr;
+ uint32_t comp_id;
+ uint32_t posn_offset;
+} __packed;
+
+/* free stream - SOF_IPC_STREAM_PCM_PARAMS */
+struct sof_ipc_stream {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t comp_id;
+} __packed;
+
+/* flags indicating which time stamps are in sync with each other */
+#define SOF_TIME_HOST_SYNC (1 << 0)
+#define SOF_TIME_DAI_SYNC (1 << 1)
+#define SOF_TIME_WALL_SYNC (1 << 2)
+#define SOF_TIME_STAMP_SYNC (1 << 3)
+
+/* flags indicating which time stamps are valid */
+#define SOF_TIME_HOST_VALID (1 << 8)
+#define SOF_TIME_DAI_VALID (1 << 9)
+#define SOF_TIME_WALL_VALID (1 << 10)
+#define SOF_TIME_STAMP_VALID (1 << 11)
+
+/* flags indicating time stamps are 64bit else 3use low 32bit */
+#define SOF_TIME_HOST_64 (1 << 16)
+#define SOF_TIME_DAI_64 (1 << 17)
+#define SOF_TIME_WALL_64 (1 << 18)
+#define SOF_TIME_STAMP_64 (1 << 19)
+
+struct sof_ipc_stream_posn {
+ struct sof_ipc_reply rhdr;
+ uint32_t comp_id; /**< host component ID */
+ uint32_t flags; /**< SOF_TIME_ */
+ uint32_t wallclock_hz; /**< frequency of wallclock in Hz */
+ uint32_t timestamp_ns; /**< resolution of timestamp in ns */
+ uint64_t host_posn; /**< host DMA position in bytes */
+ uint64_t dai_posn; /**< DAI DMA position in bytes */
+ uint64_t comp_posn; /**< comp position in bytes */
+ uint64_t wallclock; /**< audio wall clock */
+ uint64_t timestamp; /**< system time stamp */
+ uint32_t xrun_comp_id; /**< comp ID of XRUN component */
+ int32_t xrun_size; /**< XRUN size in bytes */
+} __packed;
+
+#endif
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
new file mode 100644
index 000000000000..46b2a7e63167
--- /dev/null
+++ b/include/sound/sof/topology.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_TOPOLOGY_H__
+#define __INCLUDE_SOUND_SOF_TOPOLOGY_H__
+
+#include <sound/sof/header.h>
+
+/*
+ * Component
+ */
+
+/* types of component */
+enum sof_comp_type {
+ SOF_COMP_NONE = 0,
+ SOF_COMP_HOST,
+ SOF_COMP_DAI,
+ SOF_COMP_SG_HOST, /**< scatter gather variant */
+ SOF_COMP_SG_DAI, /**< scatter gather variant */
+ SOF_COMP_VOLUME,
+ SOF_COMP_MIXER,
+ SOF_COMP_MUX,
+ SOF_COMP_SRC,
+ SOF_COMP_SPLITTER,
+ SOF_COMP_TONE,
+ SOF_COMP_SWITCH,
+ SOF_COMP_BUFFER,
+ SOF_COMP_EQ_IIR,
+ SOF_COMP_EQ_FIR,
+ SOF_COMP_KEYWORD_DETECT,
+ SOF_COMP_KPB, /* A key phrase buffer component */
+ SOF_COMP_SELECTOR, /**< channel selector component */
+ /* keep FILEREAD/FILEWRITE as the last ones */
+ SOF_COMP_FILEREAD = 10000, /**< host test based file IO */
+ SOF_COMP_FILEWRITE = 10001, /**< host test based file IO */
+};
+
+/* XRUN action for component */
+#define SOF_XRUN_STOP 1 /**< stop stream */
+#define SOF_XRUN_UNDER_ZERO 2 /**< send 0s to sink */
+#define SOF_XRUN_OVER_NULL 4 /**< send data to NULL */
+
+/* create new generic component - SOF_IPC_TPLG_COMP_NEW */
+struct sof_ipc_comp {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t id;
+ enum sof_comp_type type;
+ uint32_t pipeline_id;
+
+ /* reserved for future use */
+ uint32_t reserved[2];
+} __packed;
+
+/*
+ * Component Buffers
+ */
+
+/*
+ * SOF memory capabilities, add new ones at the end
+ */
+#define SOF_MEM_CAPS_RAM (1 << 0)
+#define SOF_MEM_CAPS_ROM (1 << 1)
+#define SOF_MEM_CAPS_EXT (1 << 2) /**< external */
+#define SOF_MEM_CAPS_LP (1 << 3) /**< low power */
+#define SOF_MEM_CAPS_HP (1 << 4) /**< high performance */
+#define SOF_MEM_CAPS_DMA (1 << 5) /**< DMA'able */
+#define SOF_MEM_CAPS_CACHE (1 << 6) /**< cacheable */
+#define SOF_MEM_CAPS_EXEC (1 << 7) /**< executable */
+
+/* create new component buffer - SOF_IPC_TPLG_BUFFER_NEW */
+struct sof_ipc_buffer {
+ struct sof_ipc_comp comp;
+ uint32_t size; /**< buffer size in bytes */
+ uint32_t caps; /**< SOF_MEM_CAPS_ */
+} __packed;
+
+/* generic component config data - must always be after struct sof_ipc_comp */
+struct sof_ipc_comp_config {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t periods_sink; /**< 0 means variable */
+ uint32_t periods_source; /**< 0 means variable */
+ uint32_t reserved1; /**< reserved */
+ uint32_t frame_fmt; /**< SOF_IPC_FRAME_ */
+ uint32_t xrun_action;
+
+ /* reserved for future use */
+ uint32_t reserved[2];
+} __packed;
+
+/* generic host component */
+struct sof_ipc_comp_host {
+ struct sof_ipc_comp comp;
+ struct sof_ipc_comp_config config;
+ uint32_t direction; /**< SOF_IPC_STREAM_ */
+ uint32_t no_irq; /**< don't send periodic IRQ to host/DSP */
+ uint32_t dmac_config; /**< DMA engine specific */
+} __packed;
+
+/* generic DAI component */
+struct sof_ipc_comp_dai {
+ struct sof_ipc_comp comp;
+ struct sof_ipc_comp_config config;
+ uint32_t direction; /**< SOF_IPC_STREAM_ */
+ uint32_t dai_index; /**< index of this type dai */
+ uint32_t type; /**< DAI type - SOF_DAI_ */
+ uint32_t reserved; /**< reserved */
+} __packed;
+
+/* generic mixer component */
+struct sof_ipc_comp_mixer {
+ struct sof_ipc_comp comp;
+ struct sof_ipc_comp_config config;
+} __packed;
+
+/* volume ramping types */
+enum sof_volume_ramp {
+ SOF_VOLUME_LINEAR = 0,
+ SOF_VOLUME_LOG,
+ SOF_VOLUME_LINEAR_ZC,
+ SOF_VOLUME_LOG_ZC,
+};
+
+/* generic volume component */
+struct sof_ipc_comp_volume {
+ struct sof_ipc_comp comp;
+ struct sof_ipc_comp_config config;
+ uint32_t channels;
+ uint32_t min_value;
+ uint32_t max_value;
+ uint32_t ramp; /**< SOF_VOLUME_ */
+ uint32_t initial_ramp; /**< ramp space in ms */
+} __packed;
+
+/* generic SRC component */
+struct sof_ipc_comp_src {
+ struct sof_ipc_comp comp;
+ struct sof_ipc_comp_config config;
+ /* either source or sink rate must be non zero */
+ uint32_t source_rate; /**< source rate or 0 for variable */
+ uint32_t sink_rate; /**< sink rate or 0 for variable */
+ uint32_t rate_mask; /**< SOF_RATE_ supported rates */
+} __packed;
+
+/* generic MUX component */
+struct sof_ipc_comp_mux {
+ struct sof_ipc_comp comp;
+ struct sof_ipc_comp_config config;
+} __packed;
+
+/* generic tone generator component */
+struct sof_ipc_comp_tone {
+ struct sof_ipc_comp comp;
+ struct sof_ipc_comp_config config;
+ int32_t sample_rate;
+ int32_t frequency;
+ int32_t amplitude;
+ int32_t freq_mult;
+ int32_t ampl_mult;
+ int32_t length;
+ int32_t period;
+ int32_t repeats;
+ int32_t ramp_step;
+} __packed;
+
+/** \brief Types of processing components */
+enum sof_ipc_process_type {
+ SOF_PROCESS_NONE = 0, /**< None */
+ SOF_PROCESS_EQFIR, /**< Intel FIR */
+ SOF_PROCESS_EQIIR, /**< Intel IIR */
+ SOF_PROCESS_KEYWORD_DETECT, /**< Keyword Detection */
+ SOF_PROCESS_KPB, /**< KeyPhrase Buffer Manager */
+ SOF_PROCESS_CHAN_SELECTOR, /**< Channel Selector */
+};
+
+/* generic "effect", "codec" or proprietary processing component */
+struct sof_ipc_comp_process {
+ struct sof_ipc_comp comp;
+ struct sof_ipc_comp_config config;
+ uint32_t size; /**< size of bespoke data section in bytes */
+ uint32_t type; /**< sof_ipc_process_type */
+
+ /* reserved for future use */
+ uint32_t reserved[7];
+
+ unsigned char data[0];
+} __packed;
+
+/* frees components, buffers and pipelines
+ * SOF_IPC_TPLG_COMP_FREE, SOF_IPC_TPLG_PIPE_FREE, SOF_IPC_TPLG_BUFFER_FREE
+ */
+struct sof_ipc_free {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t id;
+} __packed;
+
+struct sof_ipc_comp_reply {
+ struct sof_ipc_reply rhdr;
+ uint32_t id;
+ uint32_t offset;
+} __packed;
+
+/*
+ * Pipeline
+ */
+
+/** \brief Types of pipeline scheduling time domains */
+enum sof_ipc_pipe_sched_time_domain {
+ SOF_TIME_DOMAIN_DMA = 0, /**< DMA interrupt */
+ SOF_TIME_DOMAIN_TIMER, /**< Timer interrupt */
+};
+
+/* new pipeline - SOF_IPC_TPLG_PIPE_NEW */
+struct sof_ipc_pipe_new {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t comp_id; /**< component id for pipeline */
+ uint32_t pipeline_id; /**< pipeline id */
+ uint32_t sched_id; /**< Scheduling component id */
+ uint32_t core; /**< core we run on */
+ uint32_t period; /**< execution period in us*/
+ uint32_t priority; /**< priority level 0 (low) to 10 (max) */
+ uint32_t period_mips; /**< worst case instruction count per period */
+ uint32_t frames_per_sched;/**< output frames of pipeline, 0 is variable */
+ uint32_t xrun_limit_usecs; /**< report xruns greater than limit */
+ uint32_t time_domain; /**< scheduling time domain */
+} __packed;
+
+/* pipeline construction complete - SOF_IPC_TPLG_PIPE_COMPLETE */
+struct sof_ipc_pipe_ready {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t comp_id;
+} __packed;
+
+struct sof_ipc_pipe_free {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t comp_id;
+} __packed;
+
+/* connect two components in pipeline - SOF_IPC_TPLG_COMP_CONNECT */
+struct sof_ipc_pipe_comp_connect {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t source_id;
+ uint32_t sink_id;
+} __packed;
+
+/* external events */
+enum sof_event_types {
+ SOF_EVENT_NONE = 0,
+ SOF_KEYWORD_DETECT_DAPM_EVENT,
+};
+
+#endif
diff --git a/include/sound/sof/trace.h b/include/sound/sof/trace.h
new file mode 100644
index 000000000000..7d211f319a92
--- /dev/null
+++ b/include/sound/sof/trace.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_TRACE_H__
+#define __INCLUDE_SOUND_SOF_TRACE_H__
+
+#include <sound/sof/header.h>
+#include <sound/sof/stream.h>
+
+/*
+ * DMA for Trace
+ */
+
+#define SOF_TRACE_FILENAME_SIZE 32
+
+/* DMA for Trace params info - SOF_IPC_DEBUG_DMA_PARAMS */
+struct sof_ipc_dma_trace_params {
+ struct sof_ipc_cmd_hdr hdr;
+ struct sof_ipc_host_buffer buffer;
+ uint32_t stream_tag;
+} __packed;
+
+/* DMA for Trace params info - SOF_IPC_DEBUG_DMA_PARAMS */
+struct sof_ipc_dma_trace_posn {
+ struct sof_ipc_reply rhdr;
+ uint32_t host_offset; /* Offset of DMA host buffer */
+ uint32_t overflow; /* overflow bytes if any */
+ uint32_t messages; /* total trace messages */
+} __packed;
+
+/*
+ * Commom debug
+ */
+
+/*
+ * SOF panic codes
+ */
+#define SOF_IPC_PANIC_MAGIC 0x0dead000
+#define SOF_IPC_PANIC_MAGIC_MASK 0x0ffff000
+#define SOF_IPC_PANIC_CODE_MASK 0x00000fff
+#define SOF_IPC_PANIC_MEM (SOF_IPC_PANIC_MAGIC | 0x0)
+#define SOF_IPC_PANIC_WORK (SOF_IPC_PANIC_MAGIC | 0x1)
+#define SOF_IPC_PANIC_IPC (SOF_IPC_PANIC_MAGIC | 0x2)
+#define SOF_IPC_PANIC_ARCH (SOF_IPC_PANIC_MAGIC | 0x3)
+#define SOF_IPC_PANIC_PLATFORM (SOF_IPC_PANIC_MAGIC | 0x4)
+#define SOF_IPC_PANIC_TASK (SOF_IPC_PANIC_MAGIC | 0x5)
+#define SOF_IPC_PANIC_EXCEPTION (SOF_IPC_PANIC_MAGIC | 0x6)
+#define SOF_IPC_PANIC_DEADLOCK (SOF_IPC_PANIC_MAGIC | 0x7)
+#define SOF_IPC_PANIC_STACK (SOF_IPC_PANIC_MAGIC | 0x8)
+#define SOF_IPC_PANIC_IDLE (SOF_IPC_PANIC_MAGIC | 0x9)
+#define SOF_IPC_PANIC_WFI (SOF_IPC_PANIC_MAGIC | 0xa)
+#define SOF_IPC_PANIC_ASSERT (SOF_IPC_PANIC_MAGIC | 0xb)
+
+/* panic info include filename and line number */
+struct sof_ipc_panic_info {
+ struct sof_ipc_hdr hdr;
+ uint32_t code; /* SOF_IPC_PANIC_ */
+ char filename[SOF_TRACE_FILENAME_SIZE];
+ uint32_t linenum;
+} __packed;
+
+#endif
diff --git a/include/sound/sof/xtensa.h b/include/sound/sof/xtensa.h
new file mode 100644
index 000000000000..a7189984000d
--- /dev/null
+++ b/include/sound/sof/xtensa.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_XTENSA_H__
+#define __INCLUDE_SOUND_SOF_XTENSA_H__
+
+#include <sound/sof/header.h>
+
+/*
+ * Architecture specific debug
+ */
+
+/* Xtensa Firmware Oops data */
+struct sof_ipc_dsp_oops_xtensa {
+ struct sof_ipc_hdr hdr;
+ uint32_t exccause;
+ uint32_t excvaddr;
+ uint32_t ps;
+ uint32_t epc1;
+ uint32_t epc2;
+ uint32_t epc3;
+ uint32_t epc4;
+ uint32_t epc5;
+ uint32_t epc6;
+ uint32_t epc7;
+ uint32_t eps2;
+ uint32_t eps3;
+ uint32_t eps4;
+ uint32_t eps5;
+ uint32_t eps6;
+ uint32_t eps7;
+ uint32_t depc;
+ uint32_t intenable;
+ uint32_t interrupt;
+ uint32_t sar;
+ uint32_t stack;
+} __packed;
+
+#endif
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 24c398f4a68f..a49d37140a64 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -473,6 +473,7 @@ struct iscsi_cmd {
struct timer_list dataout_timer;
/* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
struct kvec *iov_data;
+ void *overflow_buf;
/* Iovecs for miscellaneous purposes */
#define ISCSI_MISC_IOVECS 5
struct kvec iov_misc[ISCSI_MISC_IOVECS];
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 19a5bf4214fc..7c9716fe731e 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -795,8 +795,8 @@ struct se_device {
spinlock_t se_tmr_lock;
spinlock_t qf_cmd_lock;
struct semaphore caw_sem;
- /* Used for legacy SPC-2 reservationsa */
- struct se_node_acl *dev_reserved_node_acl;
+ /* Used for legacy SPC-2 reservations */
+ struct se_session *reservation_holder;
/* Used for ALUA Logical Unit Group membership */
struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
/* Used for SPC-3 Persistent Reservations */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 8ed90407f062..063f133e47c2 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -142,6 +142,7 @@ void transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
void transport_free_session(struct se_session *);
+void target_spc2_release(struct se_node_acl *nacl);
void target_put_nacl(struct se_node_acl *);
void transport_deregister_session_configfs(struct se_session *);
void transport_deregister_session(struct se_session *);
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index 505dae0bed80..d6e556c0a085 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -69,8 +69,7 @@ __bpf_trace_##call(void *__data, proto) \
* to make sure that if the tracepoint handling changes, the
* bpf probe will fail to compile unless it too is updated.
*/
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
+#define __DEFINE_EVENT(template, call, proto, args, size) \
static inline void bpf_test_probe_##call(void) \
{ \
check_trace_callback_type_##call(__bpf_trace_##template); \
@@ -81,12 +80,36 @@ __bpf_trace_tp_map_##call = { \
.tp = &__tracepoint_##call, \
.bpf_func = (void *)__bpf_trace_##template, \
.num_args = COUNT_ARGS(args), \
+ .writable_size = size, \
};
+#define FIRST(x, ...) x
+
+#undef DEFINE_EVENT_WRITABLE
+#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
+static inline void bpf_test_buffer_##call(void) \
+{ \
+ /* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \
+ * BUILD_BUG_ON_ZERO() uses a different mechanism that is not \
+ * dead-code-eliminated. \
+ */ \
+ FIRST(proto); \
+ (void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \
+} \
+__DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), 0)
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef DEFINE_EVENT_WRITABLE
+#undef __DEFINE_EVENT
+#undef FIRST
+
#endif /* CONFIG_BPF_EVENTS */
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index e3f005eae1f7..562f854ac4bf 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -33,8 +33,10 @@ enum afs_call_trace {
enum afs_fs_operation {
afs_FS_FetchData = 130, /* AFS Fetch file data */
+ afs_FS_FetchACL = 131, /* AFS Fetch file ACL */
afs_FS_FetchStatus = 132, /* AFS Fetch file status */
afs_FS_StoreData = 133, /* AFS Store file data */
+ afs_FS_StoreACL = 134, /* AFS Store file ACL */
afs_FS_StoreStatus = 135, /* AFS Store file status */
afs_FS_RemoveFile = 136, /* AFS Remove a file */
afs_FS_CreateFile = 137, /* AFS Create a file */
@@ -108,8 +110,12 @@ enum afs_edit_dir_reason {
afs_edit_dir_for_create,
afs_edit_dir_for_link,
afs_edit_dir_for_mkdir,
- afs_edit_dir_for_rename,
+ afs_edit_dir_for_rename_0,
+ afs_edit_dir_for_rename_1,
+ afs_edit_dir_for_rename_2,
afs_edit_dir_for_rmdir,
+ afs_edit_dir_for_silly_0,
+ afs_edit_dir_for_silly_1,
afs_edit_dir_for_symlink,
afs_edit_dir_for_unlink,
};
@@ -152,6 +158,43 @@ enum afs_file_error {
afs_file_error_writeback_fail,
};
+enum afs_flock_event {
+ afs_flock_acquired,
+ afs_flock_callback_break,
+ afs_flock_defer_unlock,
+ afs_flock_extend_fail,
+ afs_flock_fail_other,
+ afs_flock_fail_perm,
+ afs_flock_no_lockers,
+ afs_flock_release_fail,
+ afs_flock_silly_delete,
+ afs_flock_timestamp,
+ afs_flock_try_to_lock,
+ afs_flock_vfs_lock,
+ afs_flock_vfs_locking,
+ afs_flock_waited,
+ afs_flock_waiting,
+ afs_flock_work_extending,
+ afs_flock_work_retry,
+ afs_flock_work_unlocking,
+ afs_flock_would_block,
+};
+
+enum afs_flock_operation {
+ afs_flock_op_copy_lock,
+ afs_flock_op_flock,
+ afs_flock_op_grant,
+ afs_flock_op_lock,
+ afs_flock_op_release_lock,
+ afs_flock_op_return_ok,
+ afs_flock_op_return_eagain,
+ afs_flock_op_return_edeadlk,
+ afs_flock_op_return_error,
+ afs_flock_op_set_lock,
+ afs_flock_op_unlock,
+ afs_flock_op_wake,
+};
+
#endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
/*
@@ -237,8 +280,12 @@ enum afs_file_error {
EM(afs_edit_dir_for_create, "Create") \
EM(afs_edit_dir_for_link, "Link ") \
EM(afs_edit_dir_for_mkdir, "MkDir ") \
- EM(afs_edit_dir_for_rename, "Rename") \
+ EM(afs_edit_dir_for_rename_0, "Renam0") \
+ EM(afs_edit_dir_for_rename_1, "Renam1") \
+ EM(afs_edit_dir_for_rename_2, "Renam2") \
EM(afs_edit_dir_for_rmdir, "RmDir ") \
+ EM(afs_edit_dir_for_silly_0, "S_Ren0") \
+ EM(afs_edit_dir_for_silly_1, "S_Ren1") \
EM(afs_edit_dir_for_symlink, "Symlnk") \
E_(afs_edit_dir_for_unlink, "Unlink")
@@ -277,6 +324,56 @@ enum afs_file_error {
EM(afs_file_error_mntpt, "MNTPT_READ_FAILED") \
E_(afs_file_error_writeback_fail, "WRITEBACK_FAILED")
+#define afs_flock_types \
+ EM(F_RDLCK, "RDLCK") \
+ EM(F_WRLCK, "WRLCK") \
+ E_(F_UNLCK, "UNLCK")
+
+#define afs_flock_states \
+ EM(AFS_VNODE_LOCK_NONE, "NONE") \
+ EM(AFS_VNODE_LOCK_WAITING_FOR_CB, "WAIT_FOR_CB") \
+ EM(AFS_VNODE_LOCK_SETTING, "SETTING") \
+ EM(AFS_VNODE_LOCK_GRANTED, "GRANTED") \
+ EM(AFS_VNODE_LOCK_EXTENDING, "EXTENDING") \
+ EM(AFS_VNODE_LOCK_NEED_UNLOCK, "NEED_UNLOCK") \
+ EM(AFS_VNODE_LOCK_UNLOCKING, "UNLOCKING") \
+ E_(AFS_VNODE_LOCK_DELETED, "DELETED")
+
+#define afs_flock_events \
+ EM(afs_flock_acquired, "Acquired") \
+ EM(afs_flock_callback_break, "Callback") \
+ EM(afs_flock_defer_unlock, "D-Unlock") \
+ EM(afs_flock_extend_fail, "Ext_Fail") \
+ EM(afs_flock_fail_other, "ErrOther") \
+ EM(afs_flock_fail_perm, "ErrPerm ") \
+ EM(afs_flock_no_lockers, "NoLocker") \
+ EM(afs_flock_release_fail, "Rel_Fail") \
+ EM(afs_flock_silly_delete, "SillyDel") \
+ EM(afs_flock_timestamp, "Timestmp") \
+ EM(afs_flock_try_to_lock, "TryToLck") \
+ EM(afs_flock_vfs_lock, "VFSLock ") \
+ EM(afs_flock_vfs_locking, "VFSLking") \
+ EM(afs_flock_waited, "Waited ") \
+ EM(afs_flock_waiting, "Waiting ") \
+ EM(afs_flock_work_extending, "Extendng") \
+ EM(afs_flock_work_retry, "Retry ") \
+ EM(afs_flock_work_unlocking, "Unlcking") \
+ E_(afs_flock_would_block, "EWOULDBL")
+
+#define afs_flock_operations \
+ EM(afs_flock_op_copy_lock, "COPY ") \
+ EM(afs_flock_op_flock, "->flock ") \
+ EM(afs_flock_op_grant, "GRANT ") \
+ EM(afs_flock_op_lock, "->lock ") \
+ EM(afs_flock_op_release_lock, "RELEASE ") \
+ EM(afs_flock_op_return_ok, "<-OK ") \
+ EM(afs_flock_op_return_edeadlk, "<-EDEADL") \
+ EM(afs_flock_op_return_eagain, "<-EAGAIN") \
+ EM(afs_flock_op_return_error, "<-ERROR ") \
+ EM(afs_flock_op_set_lock, "SET ") \
+ EM(afs_flock_op_unlock, "UNLOCK ") \
+ E_(afs_flock_op_wake, "WAKE ")
+
/*
* Export enum symbols via userspace.
*/
@@ -293,6 +390,8 @@ afs_edit_dir_reasons;
afs_eproto_causes;
afs_io_errors;
afs_file_errors;
+afs_flock_types;
+afs_flock_operations;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -442,6 +541,123 @@ TRACE_EVENT(afs_make_fs_call,
__print_symbolic(__entry->op, afs_fs_operations))
);
+TRACE_EVENT(afs_make_fs_calli,
+ TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
+ unsigned int i),
+
+ TP_ARGS(call, fid, i),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(unsigned int, i )
+ __field(enum afs_fs_operation, op )
+ __field_struct(struct afs_fid, fid )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->i = i;
+ __entry->op = call->operation_ID;
+ if (fid) {
+ __entry->fid = *fid;
+ } else {
+ __entry->fid.vid = 0;
+ __entry->fid.vnode = 0;
+ __entry->fid.unique = 0;
+ }
+ ),
+
+ TP_printk("c=%08x %06llx:%06llx:%06x %s i=%u",
+ __entry->call,
+ __entry->fid.vid,
+ __entry->fid.vnode,
+ __entry->fid.unique,
+ __print_symbolic(__entry->op, afs_fs_operations),
+ __entry->i)
+ );
+
+TRACE_EVENT(afs_make_fs_call1,
+ TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
+ const char *name),
+
+ TP_ARGS(call, fid, name),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(enum afs_fs_operation, op )
+ __field_struct(struct afs_fid, fid )
+ __array(char, name, 24 )
+ ),
+
+ TP_fast_assign(
+ int __len = strlen(name);
+ __len = min(__len, 23);
+ __entry->call = call->debug_id;
+ __entry->op = call->operation_ID;
+ if (fid) {
+ __entry->fid = *fid;
+ } else {
+ __entry->fid.vid = 0;
+ __entry->fid.vnode = 0;
+ __entry->fid.unique = 0;
+ }
+ memcpy(__entry->name, name, __len);
+ __entry->name[__len] = 0;
+ ),
+
+ TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\"",
+ __entry->call,
+ __entry->fid.vid,
+ __entry->fid.vnode,
+ __entry->fid.unique,
+ __print_symbolic(__entry->op, afs_fs_operations),
+ __entry->name)
+ );
+
+TRACE_EVENT(afs_make_fs_call2,
+ TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
+ const char *name, const char *name2),
+
+ TP_ARGS(call, fid, name, name2),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(enum afs_fs_operation, op )
+ __field_struct(struct afs_fid, fid )
+ __array(char, name, 24 )
+ __array(char, name2, 24 )
+ ),
+
+ TP_fast_assign(
+ int __len = strlen(name);
+ int __len2 = strlen(name2);
+ __len = min(__len, 23);
+ __len2 = min(__len2, 23);
+ __entry->call = call->debug_id;
+ __entry->op = call->operation_ID;
+ if (fid) {
+ __entry->fid = *fid;
+ } else {
+ __entry->fid.vid = 0;
+ __entry->fid.vnode = 0;
+ __entry->fid.unique = 0;
+ }
+ memcpy(__entry->name, name, __len);
+ __entry->name[__len] = 0;
+ memcpy(__entry->name2, name2, __len2);
+ __entry->name2[__len2] = 0;
+ ),
+
+ TP_printk("c=%08x %06llx:%06llx:%06x %s \"%s\" \"%s\"",
+ __entry->call,
+ __entry->fid.vid,
+ __entry->fid.vnode,
+ __entry->fid.unique,
+ __print_symbolic(__entry->op, afs_fs_operations),
+ __entry->name,
+ __entry->name2)
+ );
+
TRACE_EVENT(afs_make_vl_call,
TP_PROTO(struct afs_call *call),
@@ -639,6 +855,38 @@ TRACE_EVENT(afs_call_state,
__entry->ret, __entry->abort)
);
+TRACE_EVENT(afs_lookup,
+ TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name,
+ struct afs_vnode *vnode),
+
+ TP_ARGS(dvnode, name, vnode),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, dfid )
+ __field_struct(struct afs_fid, fid )
+ __array(char, name, 24 )
+ ),
+
+ TP_fast_assign(
+ int __len = min_t(int, name->len, 23);
+ __entry->dfid = dvnode->fid;
+ if (vnode) {
+ __entry->fid = vnode->fid;
+ } else {
+ __entry->fid.vid = 0;
+ __entry->fid.vnode = 0;
+ __entry->fid.unique = 0;
+ }
+ memcpy(__entry->name, name->name, __len);
+ __entry->name[__len] = 0;
+ ),
+
+ TP_printk("d=%llx:%llx:%x \"%s\" f=%llx:%x",
+ __entry->dfid.vid, __entry->dfid.vnode, __entry->dfid.unique,
+ __entry->name,
+ __entry->fid.vnode, __entry->fid.unique)
+ );
+
TRACE_EVENT(afs_edit_dir,
TP_PROTO(struct afs_vnode *dvnode,
enum afs_edit_dir_reason why,
@@ -660,12 +908,12 @@ TRACE_EVENT(afs_edit_dir,
__field(unsigned short, slot )
__field(unsigned int, f_vnode )
__field(unsigned int, f_unique )
- __array(char, name, 18 )
+ __array(char, name, 24 )
),
TP_fast_assign(
int __len = strlen(name);
- __len = min(__len, 17);
+ __len = min(__len, 23);
__entry->vnode = dvnode->fid.vnode;
__entry->unique = dvnode->fid.unique;
__entry->why = why;
@@ -678,7 +926,7 @@ TRACE_EVENT(afs_edit_dir,
__entry->name[__len] = 0;
),
- TP_printk("d=%x:%x %s %s %u[%u] f=%x:%x %s",
+ TP_printk("d=%x:%x %s %s %u[%u] f=%x:%x \"%s\"",
__entry->vnode, __entry->unique,
__print_symbolic(__entry->why, afs_edit_dir_reasons),
__print_symbolic(__entry->op, afs_edit_dir_ops),
@@ -796,6 +1044,133 @@ TRACE_EVENT(afs_cm_no_server_u,
__entry->call, __entry->op_id, &__entry->uuid)
);
+TRACE_EVENT(afs_flock_ev,
+ TP_PROTO(struct afs_vnode *vnode, struct file_lock *fl,
+ enum afs_flock_event event, int error),
+
+ TP_ARGS(vnode, fl, event, error),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, fid )
+ __field(enum afs_flock_event, event )
+ __field(enum afs_lock_state, state )
+ __field(int, error )
+ __field(unsigned int, debug_id )
+ ),
+
+ TP_fast_assign(
+ __entry->fid = vnode->fid;
+ __entry->event = event;
+ __entry->state = vnode->lock_state;
+ __entry->error = error;
+ __entry->debug_id = fl ? fl->fl_u.afs.debug_id : 0;
+ ),
+
+ TP_printk("%llx:%llx:%x %04x %s s=%s e=%d",
+ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+ __entry->debug_id,
+ __print_symbolic(__entry->event, afs_flock_events),
+ __print_symbolic(__entry->state, afs_flock_states),
+ __entry->error)
+ );
+
+TRACE_EVENT(afs_flock_op,
+ TP_PROTO(struct afs_vnode *vnode, struct file_lock *fl,
+ enum afs_flock_operation op),
+
+ TP_ARGS(vnode, fl, op),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, fid )
+ __field(loff_t, from )
+ __field(loff_t, len )
+ __field(enum afs_flock_operation, op )
+ __field(unsigned char, type )
+ __field(unsigned int, flags )
+ __field(unsigned int, debug_id )
+ ),
+
+ TP_fast_assign(
+ __entry->fid = vnode->fid;
+ __entry->from = fl->fl_start;
+ __entry->len = fl->fl_end - fl->fl_start + 1;
+ __entry->op = op;
+ __entry->type = fl->fl_type;
+ __entry->flags = fl->fl_flags;
+ __entry->debug_id = fl->fl_u.afs.debug_id;
+ ),
+
+ TP_printk("%llx:%llx:%x %04x %s t=%s R=%llx/%llx f=%x",
+ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+ __entry->debug_id,
+ __print_symbolic(__entry->op, afs_flock_operations),
+ __print_symbolic(__entry->type, afs_flock_types),
+ __entry->from, __entry->len, __entry->flags)
+ );
+
+TRACE_EVENT(afs_reload_dir,
+ TP_PROTO(struct afs_vnode *vnode),
+
+ TP_ARGS(vnode),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, fid )
+ ),
+
+ TP_fast_assign(
+ __entry->fid = vnode->fid;
+ ),
+
+ TP_printk("%llx:%llx:%x",
+ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique)
+ );
+
+TRACE_EVENT(afs_silly_rename,
+ TP_PROTO(struct afs_vnode *vnode, bool done),
+
+ TP_ARGS(vnode, done),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, fid )
+ __field(bool, done )
+ ),
+
+ TP_fast_assign(
+ __entry->fid = vnode->fid;
+ __entry->done = done;
+ ),
+
+ TP_printk("%llx:%llx:%x done=%u",
+ __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+ __entry->done)
+ );
+
+TRACE_EVENT(afs_get_tree,
+ TP_PROTO(struct afs_cell *cell, struct afs_volume *volume),
+
+ TP_ARGS(cell, volume),
+
+ TP_STRUCT__entry(
+ __field(u64, vid )
+ __array(char, cell, 24 )
+ __array(char, volume, 24 )
+ ),
+
+ TP_fast_assign(
+ int __len;
+ __entry->vid = volume->vid;
+ __len = min_t(int, cell->name_len, 23);
+ memcpy(__entry->cell, cell->name, __len);
+ __entry->cell[__len] = 0;
+ __len = min_t(int, volume->name_len, 23);
+ memcpy(__entry->volume, volume->name, __len);
+ __entry->volume[__len] = 0;
+ ),
+
+ TP_printk("--- MOUNT %s:%s %llx",
+ __entry->cell, __entry->volume, __entry->vid)
+ );
+
#endif /* _TRACE_AFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/bpf_test_run.h b/include/trace/events/bpf_test_run.h
new file mode 100644
index 000000000000..265447e3f71a
--- /dev/null
+++ b/include/trace/events/bpf_test_run.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bpf_test_run
+
+#if !defined(_TRACE_BPF_TEST_RUN_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BPF_TEST_RUN_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(bpf_test_finish,
+
+ TP_PROTO(int *err),
+
+ TP_ARGS(err),
+
+ TP_STRUCT__entry(
+ __field(int, err)
+ ),
+
+ TP_fast_assign(
+ __entry->err = *err;
+ ),
+
+ TP_printk("bpf_test_finish with err=%d", __entry->err)
+);
+
+#ifdef DEFINE_EVENT_WRITABLE
+#undef BPF_TEST_RUN_DEFINE_EVENT
+#define BPF_TEST_RUN_DEFINE_EVENT(template, call, proto, args, size) \
+ DEFINE_EVENT_WRITABLE(template, call, PARAMS(proto), \
+ PARAMS(args), size)
+#else
+#undef BPF_TEST_RUN_DEFINE_EVENT
+#define BPF_TEST_RUN_DEFINE_EVENT(template, call, proto, args, size) \
+ DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args))
+#endif
+
+BPF_TEST_RUN_DEFINE_EVENT(bpf_test_finish, bpf_test_finish,
+
+ TP_PROTO(int *err),
+
+ TP_ARGS(err),
+
+ sizeof(int)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index ab1cc33adbac..f9eff010fc7e 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -27,6 +27,7 @@ struct btrfs_work;
struct __btrfs_workqueue;
struct btrfs_qgroup_extent_record;
struct btrfs_qgroup;
+struct extent_io_tree;
struct prelim_ref;
TRACE_DEFINE_ENUM(FLUSH_DELAYED_ITEMS_NR);
@@ -77,6 +78,17 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
{ BTRFS_QGROUP_RSV_META_PERTRANS, "META_PERTRANS" }, \
{ BTRFS_QGROUP_RSV_META_PREALLOC, "META_PREALLOC" })
+#define show_extent_io_tree_owner(owner) \
+ __print_symbolic(owner, \
+ { IO_TREE_FS_INFO_FREED_EXTENTS0, "FREED_EXTENTS0" }, \
+ { IO_TREE_FS_INFO_FREED_EXTENTS1, "FREED_EXTENTS1" }, \
+ { IO_TREE_INODE_IO, "INODE_IO" }, \
+ { IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE" }, \
+ { IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS" }, \
+ { IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES" }, \
+ { IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES" }, \
+ { IO_TREE_SELFTEST, "SELFTEST" })
+
#define BTRFS_GROUP_FLAGS \
{ BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
{ BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
@@ -88,11 +100,34 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
{ BTRFS_BLOCK_GROUP_RAID5, "RAID5"}, \
{ BTRFS_BLOCK_GROUP_RAID6, "RAID6"}
+#define EXTENT_FLAGS \
+ { EXTENT_DIRTY, "DIRTY"}, \
+ { EXTENT_UPTODATE, "UPTODATE"}, \
+ { EXTENT_LOCKED, "LOCKED"}, \
+ { EXTENT_NEW, "NEW"}, \
+ { EXTENT_DELALLOC, "DELALLOC"}, \
+ { EXTENT_DEFRAG, "DEFRAG"}, \
+ { EXTENT_BOUNDARY, "BOUNDARY"}, \
+ { EXTENT_NODATASUM, "NODATASUM"}, \
+ { EXTENT_CLEAR_META_RESV, "CLEAR_META_RESV"}, \
+ { EXTENT_NEED_WAIT, "NEED_WAIT"}, \
+ { EXTENT_DAMAGED, "DAMAGED"}, \
+ { EXTENT_NORESERVE, "NORESERVE"}, \
+ { EXTENT_QGROUP_RESERVED, "QGROUP_RESERVED"}, \
+ { EXTENT_CLEAR_DATA_RESV, "CLEAR_DATA_RESV"}, \
+ { EXTENT_DELALLOC_NEW, "DELALLOC_NEW"}
+
#define BTRFS_FSID_SIZE 16
#define TP_STRUCT__entry_fsid __array(u8, fsid, BTRFS_FSID_SIZE)
#define TP_fast_assign_fsid(fs_info) \
- memcpy(__entry->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE)
+({ \
+ if (fs_info) \
+ memcpy(__entry->fsid, fs_info->fs_devices->fsid, \
+ BTRFS_FSID_SIZE); \
+ else \
+ memset(__entry->fsid, 0, BTRFS_FSID_SIZE); \
+})
#define TP_STRUCT__entry_btrfs(args...) \
TP_STRUCT__entry( \
@@ -1345,7 +1380,7 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->normal_work = &work->normal_work;
),
- TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%pf ordered_func=%p "
+ TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%ps ordered_func=%p "
"ordered_free=%p",
__entry->work, __entry->normal_work, __entry->wq,
__entry->func, __entry->ordered_func, __entry->ordered_free)
@@ -1850,6 +1885,212 @@ DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group,
TP_ARGS(bg_cache)
);
+TRACE_EVENT(btrfs_set_extent_bit,
+ TP_PROTO(const struct extent_io_tree *tree,
+ u64 start, u64 len, unsigned set_bits),
+
+ TP_ARGS(tree, start, len, set_bits),
+
+ TP_STRUCT__entry_btrfs(
+ __field( unsigned, owner )
+ __field( u64, ino )
+ __field( u64, rootid )
+ __field( u64, start )
+ __field( u64, len )
+ __field( unsigned, set_bits)
+ ),
+
+ TP_fast_assign_btrfs(tree->fs_info,
+ __entry->owner = tree->owner;
+ if (tree->private_data) {
+ struct inode *inode = tree->private_data;
+
+ __entry->ino = btrfs_ino(BTRFS_I(inode));
+ __entry->rootid =
+ BTRFS_I(inode)->root->root_key.objectid;
+ } else {
+ __entry->ino = 0;
+ __entry->rootid = 0;
+ }
+ __entry->start = start;
+ __entry->len = len;
+ __entry->set_bits = set_bits;
+ ),
+
+ TP_printk_btrfs(
+ "io_tree=%s ino=%llu root=%llu start=%llu len=%llu set_bits=%s",
+ show_extent_io_tree_owner(__entry->owner), __entry->ino,
+ __entry->rootid, __entry->start, __entry->len,
+ __print_flags(__entry->set_bits, "|", EXTENT_FLAGS))
+);
+
+TRACE_EVENT(btrfs_clear_extent_bit,
+ TP_PROTO(const struct extent_io_tree *tree,
+ u64 start, u64 len, unsigned clear_bits),
+
+ TP_ARGS(tree, start, len, clear_bits),
+
+ TP_STRUCT__entry_btrfs(
+ __field( unsigned, owner )
+ __field( u64, ino )
+ __field( u64, rootid )
+ __field( u64, start )
+ __field( u64, len )
+ __field( unsigned, clear_bits)
+ ),
+
+ TP_fast_assign_btrfs(tree->fs_info,
+ __entry->owner = tree->owner;
+ if (tree->private_data) {
+ struct inode *inode = tree->private_data;
+
+ __entry->ino = btrfs_ino(BTRFS_I(inode));
+ __entry->rootid =
+ BTRFS_I(inode)->root->root_key.objectid;
+ } else {
+ __entry->ino = 0;
+ __entry->rootid = 0;
+ }
+ __entry->start = start;
+ __entry->len = len;
+ __entry->clear_bits = clear_bits;
+ ),
+
+ TP_printk_btrfs(
+ "io_tree=%s ino=%llu root=%llu start=%llu len=%llu clear_bits=%s",
+ show_extent_io_tree_owner(__entry->owner), __entry->ino,
+ __entry->rootid, __entry->start, __entry->len,
+ __print_flags(__entry->clear_bits, "|", EXTENT_FLAGS))
+);
+
+TRACE_EVENT(btrfs_convert_extent_bit,
+ TP_PROTO(const struct extent_io_tree *tree,
+ u64 start, u64 len, unsigned set_bits, unsigned clear_bits),
+
+ TP_ARGS(tree, start, len, set_bits, clear_bits),
+
+ TP_STRUCT__entry_btrfs(
+ __field( unsigned, owner )
+ __field( u64, ino )
+ __field( u64, rootid )
+ __field( u64, start )
+ __field( u64, len )
+ __field( unsigned, set_bits)
+ __field( unsigned, clear_bits)
+ ),
+
+ TP_fast_assign_btrfs(tree->fs_info,
+ __entry->owner = tree->owner;
+ if (tree->private_data) {
+ struct inode *inode = tree->private_data;
+
+ __entry->ino = btrfs_ino(BTRFS_I(inode));
+ __entry->rootid =
+ BTRFS_I(inode)->root->root_key.objectid;
+ } else {
+ __entry->ino = 0;
+ __entry->rootid = 0;
+ }
+ __entry->start = start;
+ __entry->len = len;
+ __entry->set_bits = set_bits;
+ __entry->clear_bits = clear_bits;
+ ),
+
+ TP_printk_btrfs(
+"io_tree=%s ino=%llu root=%llu start=%llu len=%llu set_bits=%s clear_bits=%s",
+ show_extent_io_tree_owner(__entry->owner), __entry->ino,
+ __entry->rootid, __entry->start, __entry->len,
+ __print_flags(__entry->set_bits , "|", EXTENT_FLAGS),
+ __print_flags(__entry->clear_bits, "|", EXTENT_FLAGS))
+);
+
+DECLARE_EVENT_CLASS(btrfs_sleep_tree_lock,
+ TP_PROTO(const struct extent_buffer *eb, u64 start_ns),
+
+ TP_ARGS(eb, start_ns),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, block )
+ __field( u64, generation )
+ __field( u64, start_ns )
+ __field( u64, end_ns )
+ __field( u64, diff_ns )
+ __field( u64, owner )
+ __field( int, is_log_tree )
+ ),
+
+ TP_fast_assign_btrfs(eb->fs_info,
+ __entry->block = eb->start;
+ __entry->generation = btrfs_header_generation(eb);
+ __entry->start_ns = start_ns;
+ __entry->end_ns = ktime_get_ns();
+ __entry->diff_ns = __entry->end_ns - start_ns;
+ __entry->owner = btrfs_header_owner(eb);
+ __entry->is_log_tree = (eb->log_index >= 0);
+ ),
+
+ TP_printk_btrfs(
+"block=%llu generation=%llu start_ns=%llu end_ns=%llu diff_ns=%llu owner=%llu is_log_tree=%d",
+ __entry->block, __entry->generation,
+ __entry->start_ns, __entry->end_ns, __entry->diff_ns,
+ __entry->owner, __entry->is_log_tree)
+);
+
+DEFINE_EVENT(btrfs_sleep_tree_lock, btrfs_tree_read_lock,
+ TP_PROTO(const struct extent_buffer *eb, u64 start_ns),
+
+ TP_ARGS(eb, start_ns)
+);
+
+DEFINE_EVENT(btrfs_sleep_tree_lock, btrfs_tree_lock,
+ TP_PROTO(const struct extent_buffer *eb, u64 start_ns),
+
+ TP_ARGS(eb, start_ns)
+);
+
+DECLARE_EVENT_CLASS(btrfs_locking_events,
+ TP_PROTO(const struct extent_buffer *eb),
+
+ TP_ARGS(eb),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, block )
+ __field( u64, generation )
+ __field( u64, owner )
+ __field( int, is_log_tree )
+ ),
+
+ TP_fast_assign_btrfs(eb->fs_info,
+ __entry->block = eb->start;
+ __entry->generation = btrfs_header_generation(eb);
+ __entry->owner = btrfs_header_owner(eb);
+ __entry->is_log_tree = (eb->log_index >= 0);
+ ),
+
+ TP_printk_btrfs("block=%llu generation=%llu owner=%llu is_log_tree=%d",
+ __entry->block, __entry->generation,
+ __entry->owner, __entry->is_log_tree)
+);
+
+#define DEFINE_BTRFS_LOCK_EVENT(name) \
+DEFINE_EVENT(btrfs_locking_events, name, \
+ TP_PROTO(const struct extent_buffer *eb), \
+ \
+ TP_ARGS(eb) \
+)
+
+DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_unlock);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_clear_lock_blocking_read);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_clear_lock_blocking_write);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_write_lock);
+DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index a401ff5e7847..a566cc521476 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -103,6 +103,20 @@ DEFINE_EVENT(cgroup, cgroup_rename,
TP_ARGS(cgrp, path)
);
+DEFINE_EVENT(cgroup, cgroup_freeze,
+
+ TP_PROTO(struct cgroup *cgrp, const char *path),
+
+ TP_ARGS(cgrp, path)
+);
+
+DEFINE_EVENT(cgroup, cgroup_unfreeze,
+
+ TP_PROTO(struct cgroup *cgrp, const char *path),
+
+ TP_ARGS(cgrp, path)
+);
+
DECLARE_EVENT_CLASS(cgroup_migrate,
TP_PROTO(struct cgroup *dst_cgrp, const char *path,
@@ -149,6 +163,47 @@ DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks,
TP_ARGS(dst_cgrp, path, task, threadgroup)
);
+DECLARE_EVENT_CLASS(cgroup_event,
+
+ TP_PROTO(struct cgroup *cgrp, const char *path, int val),
+
+ TP_ARGS(cgrp, path, val),
+
+ TP_STRUCT__entry(
+ __field( int, root )
+ __field( int, id )
+ __field( int, level )
+ __string( path, path )
+ __field( int, val )
+ ),
+
+ TP_fast_assign(
+ __entry->root = cgrp->root->hierarchy_id;
+ __entry->id = cgrp->id;
+ __entry->level = cgrp->level;
+ __assign_str(path, path);
+ __entry->val = val;
+ ),
+
+ TP_printk("root=%d id=%d level=%d path=%s val=%d",
+ __entry->root, __entry->id, __entry->level, __get_str(path),
+ __entry->val)
+);
+
+DEFINE_EVENT(cgroup_event, cgroup_notify_populated,
+
+ TP_PROTO(struct cgroup *cgrp, const char *path, int val),
+
+ TP_ARGS(cgrp, path, val)
+);
+
+DEFINE_EVENT(cgroup_event, cgroup_notify_frozen,
+
+ TP_PROTO(struct cgroup *cgrp, const char *path, int val),
+
+ TP_ARGS(cgrp, path, val)
+);
+
#endif /* _TRACE_CGROUP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 6074eff3d766..e5bf6ee4e814 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -64,6 +64,7 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken)
);
+#ifdef CONFIG_COMPACTION
TRACE_EVENT(mm_compaction_migratepages,
TP_PROTO(unsigned long nr_all,
@@ -132,7 +133,6 @@ TRACE_EVENT(mm_compaction_begin,
__entry->sync ? "sync" : "async")
);
-#ifdef CONFIG_COMPACTION
TRACE_EVENT(mm_compaction_end,
TP_PROTO(unsigned long zone_start, unsigned long migrate_pfn,
unsigned long free_pfn, unsigned long zone_end, bool sync,
@@ -166,7 +166,6 @@ TRACE_EVENT(mm_compaction_end,
__entry->sync ? "sync" : "async",
__print_symbolic(__entry->status, COMPACTION_STATUS))
);
-#endif
TRACE_EVENT(mm_compaction_try_to_compact_pages,
@@ -189,13 +188,12 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages,
__entry->prio = prio;
),
- TP_printk("order=%d gfp_mask=0x%x priority=%d",
+ TP_printk("order=%d gfp_mask=%s priority=%d",
__entry->order,
- __entry->gfp_mask,
+ show_gfp_flags(__entry->gfp_mask),
__entry->prio)
);
-#ifdef CONFIG_COMPACTION
DECLARE_EVENT_CLASS(mm_compaction_suitable_template,
TP_PROTO(struct zone *zone,
@@ -296,7 +294,6 @@ DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset,
TP_ARGS(zone, order)
);
-#endif
TRACE_EVENT(mm_compaction_kcompactd_sleep,
@@ -352,6 +349,7 @@ DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake,
TP_ARGS(nid, order, classzone_idx)
);
+#endif
#endif /* _TRACE_COMPACTION_H */
diff --git a/include/trace/events/cpuhp.h b/include/trace/events/cpuhp.h
index fe1d6e8cd99d..ad16f77310c6 100644
--- a/include/trace/events/cpuhp.h
+++ b/include/trace/events/cpuhp.h
@@ -30,7 +30,7 @@ TRACE_EVENT(cpuhp_enter,
__entry->fun = fun;
),
- TP_printk("cpu: %04u target: %3d step: %3d (%pf)",
+ TP_printk("cpu: %04u target: %3d step: %3d (%ps)",
__entry->cpu, __entry->target, __entry->idx, __entry->fun)
);
@@ -58,7 +58,7 @@ TRACE_EVENT(cpuhp_multi_enter,
__entry->fun = fun;
),
- TP_printk("cpu: %04u target: %3d step: %3d (%pf)",
+ TP_printk("cpu: %04u target: %3d step: %3d (%ps)",
__entry->cpu, __entry->target, __entry->idx, __entry->fun)
);
diff --git a/include/trace/events/devfreq.h b/include/trace/events/devfreq.h
new file mode 100644
index 000000000000..cf5b8772175d
--- /dev/null
+++ b/include/trace/events/devfreq.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM devfreq
+
+#if !defined(_TRACE_DEVFREQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DEVFREQ_H
+
+#include <linux/devfreq.h>
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(devfreq_monitor,
+ TP_PROTO(struct devfreq *devfreq),
+
+ TP_ARGS(devfreq),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, freq)
+ __field(unsigned long, busy_time)
+ __field(unsigned long, total_time)
+ __field(unsigned int, polling_ms)
+ __string(dev_name, dev_name(&devfreq->dev))
+ ),
+
+ TP_fast_assign(
+ __entry->freq = devfreq->previous_freq;
+ __entry->busy_time = devfreq->last_status.busy_time;
+ __entry->total_time = devfreq->last_status.total_time;
+ __entry->polling_ms = devfreq->profile->polling_ms;
+ __assign_str(dev_name, dev_name(&devfreq->dev));
+ ),
+
+ TP_printk("dev_name=%s freq=%lu polling_ms=%u load=%lu",
+ __get_str(dev_name), __entry->freq, __entry->polling_ms,
+ __entry->total_time == 0 ? 0 :
+ (100 * __entry->busy_time) / __entry->total_time)
+);
+#endif /* _TRACE_DEVFREQ_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index a3916b4dd57e..53b96f12300c 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -533,6 +533,37 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
__entry->err)
);
+TRACE_EVENT(f2fs_file_write_iter,
+
+ TP_PROTO(struct inode *inode, unsigned long offset,
+ unsigned long length, int ret),
+
+ TP_ARGS(inode, offset, length, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(unsigned long, offset)
+ __field(unsigned long, length)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->offset = offset;
+ __entry->length = length;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, "
+ "offset = %lu, length = %lu, written(err) = %d",
+ show_dev_ino(__entry),
+ __entry->offset,
+ __entry->length,
+ __entry->ret)
+);
+
TRACE_EVENT(f2fs_map_blocks,
TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret),
@@ -1253,6 +1284,32 @@ DEFINE_EVENT(f2fs__page, f2fs_commit_inmem_page,
TP_ARGS(page, type)
);
+TRACE_EVENT(f2fs_filemap_fault,
+
+ TP_PROTO(struct inode *inode, pgoff_t index, unsigned long ret),
+
+ TP_ARGS(inode, index, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(pgoff_t, index)
+ __field(unsigned long, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->index = index;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, index = %lu, ret = %lx",
+ show_dev_ino(__entry),
+ (unsigned long)__entry->index,
+ __entry->ret)
+);
+
TRACE_EVENT(f2fs_writepages,
TP_PROTO(struct inode *inode, struct writeback_control *wbc, int type),
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 6271bab63bfb..6f2a4dc35e37 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -13,9 +13,9 @@
TRACE_EVENT(fib_table_lookup,
TP_PROTO(u32 tb_id, const struct flowi4 *flp,
- const struct fib_nh *nh, int err),
+ const struct fib_nh_common *nhc, int err),
- TP_ARGS(tb_id, flp, nh, err),
+ TP_ARGS(tb_id, flp, nhc, err),
TP_STRUCT__entry(
__field( u32, tb_id )
@@ -28,14 +28,17 @@ TRACE_EVENT(fib_table_lookup,
__field( __u8, flags )
__array( __u8, src, 4 )
__array( __u8, dst, 4 )
- __array( __u8, gw, 4 )
- __array( __u8, saddr, 4 )
+ __array( __u8, gw4, 4 )
+ __array( __u8, gw6, 16 )
__field( u16, sport )
__field( u16, dport )
__dynamic_array(char, name, IFNAMSIZ )
),
TP_fast_assign(
+ struct in6_addr in6_zero = {};
+ struct net_device *dev;
+ struct in6_addr *in6;
__be32 *p32;
__entry->tb_id = tb_id;
@@ -62,30 +65,37 @@ TRACE_EVENT(fib_table_lookup,
__entry->dport = 0;
}
- if (nh) {
- p32 = (__be32 *) __entry->saddr;
- *p32 = nh->nh_saddr;
+ dev = nhc ? nhc->nhc_dev : NULL;
+ __assign_str(name, dev ? dev->name : "-");
- p32 = (__be32 *) __entry->gw;
- *p32 = nh->nh_gw;
+ if (nhc) {
+ if (nhc->nhc_gw_family == AF_INET) {
+ p32 = (__be32 *) __entry->gw4;
+ *p32 = nhc->nhc_gw.ipv4;
- __assign_str(name, nh->nh_dev ? nh->nh_dev->name : "-");
- } else {
- p32 = (__be32 *) __entry->saddr;
- *p32 = 0;
+ in6 = (struct in6_addr *)__entry->gw6;
+ *in6 = in6_zero;
+ } else if (nhc->nhc_gw_family == AF_INET6) {
+ p32 = (__be32 *) __entry->gw4;
+ *p32 = 0;
- p32 = (__be32 *) __entry->gw;
+ in6 = (struct in6_addr *)__entry->gw6;
+ *in6 = nhc->nhc_gw.ipv6;
+ }
+ } else {
+ p32 = (__be32 *) __entry->gw4;
*p32 = 0;
- __assign_str(name, "-");
+ in6 = (struct in6_addr *)__entry->gw6;
+ *in6 = in6_zero;
}
),
- TP_printk("table %u oif %d iif %d proto %u %pI4/%u -> %pI4/%u tos %d scope %d flags %x ==> dev %s gw %pI4 src %pI4 err %d",
+ TP_printk("table %u oif %d iif %d proto %u %pI4/%u -> %pI4/%u tos %d scope %d flags %x ==> dev %s gw %pI4/%pI6c err %d",
__entry->tb_id, __entry->oif, __entry->iif, __entry->proto,
__entry->src, __entry->sport, __entry->dst, __entry->dport,
__entry->tos, __entry->scope, __entry->flags,
- __get_str(name), __entry->gw, __entry->saddr, __entry->err)
+ __get_str(name), __entry->gw4, __entry->gw6, __entry->err)
);
#endif /* _TRACE_FIB_H */
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index b088b54d699c..c6abdcc77c12 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -12,10 +12,10 @@
TRACE_EVENT(fib6_table_lookup,
- TP_PROTO(const struct net *net, const struct fib6_info *f6i,
+ TP_PROTO(const struct net *net, const struct fib6_result *res,
struct fib6_table *table, const struct flowi6 *flp),
- TP_ARGS(net, f6i, table, flp),
+ TP_ARGS(net, res, table, flp),
TP_STRUCT__entry(
__field( u32, tb_id )
@@ -39,7 +39,7 @@ TRACE_EVENT(fib6_table_lookup,
struct in6_addr *in6;
__entry->tb_id = table->tb6_id;
- __entry->err = ip6_rt_type_to_error(f6i->fib6_type);
+ __entry->err = ip6_rt_type_to_error(res->fib6_type);
__entry->oif = flp->flowi6_oif;
__entry->iif = flp->flowi6_iif;
__entry->tos = ip6_tclass(flp->flowlabel);
@@ -62,20 +62,20 @@ TRACE_EVENT(fib6_table_lookup,
__entry->dport = 0;
}
- if (f6i->fib6_nh.nh_dev) {
- __assign_str(name, f6i->fib6_nh.nh_dev);
+ if (res->nh && res->nh->fib_nh_dev) {
+ __assign_str(name, res->nh->fib_nh_dev);
} else {
__assign_str(name, "-");
}
- if (f6i == net->ipv6.fib6_null_entry) {
+ if (res->f6i == net->ipv6.fib6_null_entry) {
struct in6_addr in6_zero = {};
in6 = (struct in6_addr *)__entry->gw;
*in6 = in6_zero;
- } else if (f6i) {
+ } else if (res->nh) {
in6 = (struct in6_addr *)__entry->gw;
- *in6 = f6i->fib6_nh.nh_gw;
+ *in6 = res->nh->fib_nh_gw6;
}
),
diff --git a/include/trace/events/gpio.h b/include/trace/events/gpio.h
index 5c189a22c489..3aa9fd86d748 100644
--- a/include/trace/events/gpio.h
+++ b/include/trace/events/gpio.h
@@ -2,10 +2,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM gpio
-#ifndef CONFIG_TRACING_EVENTS_GPIO
-#define NOTRACE
-#endif
-
#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_GPIO_H
diff --git a/include/trace/events/ib_mad.h b/include/trace/events/ib_mad.h
new file mode 100644
index 000000000000..59363a083ecb
--- /dev/null
+++ b/include/trace/events/ib_mad.h
@@ -0,0 +1,390 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+
+/*
+ * Copyright (c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ib_mad
+
+#if !defined(_TRACE_IB_MAD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IB_MAD_H
+
+#include <linux/tracepoint.h>
+#include <rdma/ib_mad.h>
+
+#ifdef CONFIG_TRACEPOINTS
+struct trace_event_raw_ib_mad_send_template;
+static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
+ struct ib_mad_qp_info *qp_info,
+ struct trace_event_raw_ib_mad_send_template *entry);
+#endif
+
+DECLARE_EVENT_CLASS(ib_mad_send_template,
+ TP_PROTO(struct ib_mad_send_wr_private *wr,
+ struct ib_mad_qp_info *qp_info),
+ TP_ARGS(wr, qp_info),
+
+ TP_STRUCT__entry(
+ __field(u8, base_version)
+ __field(u8, mgmt_class)
+ __field(u8, class_version)
+ __field(u8, port_num)
+ __field(u32, qp_num)
+ __field(u8, method)
+ __field(u8, sl)
+ __field(u16, attr_id)
+ __field(u32, attr_mod)
+ __field(u64, wrtid)
+ __field(u64, tid)
+ __field(u16, status)
+ __field(u16, class_specific)
+ __field(u32, length)
+ __field(u32, dlid)
+ __field(u32, rqpn)
+ __field(u32, rqkey)
+ __field(u32, dev_index)
+ __field(void *, agent_priv)
+ __field(unsigned long, timeout)
+ __field(int, retries_left)
+ __field(int, max_retries)
+ __field(int, retry)
+ __field(u16, pkey)
+ ),
+
+ TP_fast_assign(
+ __entry->dev_index = wr->mad_agent_priv->agent.device->index;
+ __entry->port_num = wr->mad_agent_priv->agent.port_num;
+ __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num;
+ __entry->agent_priv = wr->mad_agent_priv;
+ __entry->wrtid = wr->tid;
+ __entry->max_retries = wr->max_retries;
+ __entry->retries_left = wr->retries_left;
+ __entry->retry = wr->retry;
+ __entry->timeout = wr->timeout;
+ __entry->length = wr->send_buf.hdr_len +
+ wr->send_buf.data_len;
+ __entry->base_version =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->base_version;
+ __entry->mgmt_class =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->mgmt_class;
+ __entry->class_version =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->class_version;
+ __entry->method =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->method;
+ __entry->status =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->status;
+ __entry->class_specific =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->class_specific;
+ __entry->tid = ((struct ib_mad_hdr *)wr->send_buf.mad)->tid;
+ __entry->attr_id =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->attr_id;
+ __entry->attr_mod =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->attr_mod;
+ create_mad_addr_info(wr, qp_info, __entry);
+ ),
+
+ TP_printk("%d:%d QP%d agent %p: " \
+ "wrtid 0x%llx; %d/%d retries(%d); timeout %lu length %d : " \
+ "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \
+ "method 0x%x status 0x%x class_specific 0x%x tid 0x%llx " \
+ "attr_id 0x%x attr_mod 0x%x => dlid 0x%08x sl %d "\
+ "pkey 0x%x rpqn 0x%x rqpkey 0x%x",
+ __entry->dev_index, __entry->port_num, __entry->qp_num,
+ __entry->agent_priv, be64_to_cpu(__entry->wrtid),
+ __entry->retries_left, __entry->max_retries,
+ __entry->retry, __entry->timeout, __entry->length,
+ __entry->base_version, __entry->mgmt_class,
+ __entry->class_version,
+ __entry->method, be16_to_cpu(__entry->status),
+ be16_to_cpu(__entry->class_specific),
+ be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
+ be32_to_cpu(__entry->attr_mod),
+ be32_to_cpu(__entry->dlid), __entry->sl, __entry->pkey,
+ __entry->rqpn, __entry->rqkey
+ )
+);
+
+DEFINE_EVENT(ib_mad_send_template, ib_mad_error_handler,
+ TP_PROTO(struct ib_mad_send_wr_private *wr,
+ struct ib_mad_qp_info *qp_info),
+ TP_ARGS(wr, qp_info));
+DEFINE_EVENT(ib_mad_send_template, ib_mad_ib_send_mad,
+ TP_PROTO(struct ib_mad_send_wr_private *wr,
+ struct ib_mad_qp_info *qp_info),
+ TP_ARGS(wr, qp_info));
+DEFINE_EVENT(ib_mad_send_template, ib_mad_send_done_resend,
+ TP_PROTO(struct ib_mad_send_wr_private *wr,
+ struct ib_mad_qp_info *qp_info),
+ TP_ARGS(wr, qp_info));
+
+TRACE_EVENT(ib_mad_send_done_handler,
+ TP_PROTO(struct ib_mad_send_wr_private *wr, struct ib_wc *wc),
+ TP_ARGS(wr, wc),
+
+ TP_STRUCT__entry(
+ __field(u8, port_num)
+ __field(u8, base_version)
+ __field(u8, mgmt_class)
+ __field(u8, class_version)
+ __field(u32, qp_num)
+ __field(u64, wrtid)
+ __field(u16, status)
+ __field(u16, wc_status)
+ __field(u32, length)
+ __field(void *, agent_priv)
+ __field(unsigned long, timeout)
+ __field(u32, dev_index)
+ __field(int, retries_left)
+ __field(int, max_retries)
+ __field(int, retry)
+ __field(u8, method)
+ ),
+
+ TP_fast_assign(
+ __entry->dev_index = wr->mad_agent_priv->agent.device->index;
+ __entry->port_num = wr->mad_agent_priv->agent.port_num;
+ __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num;
+ __entry->agent_priv = wr->mad_agent_priv;
+ __entry->wrtid = wr->tid;
+ __entry->max_retries = wr->max_retries;
+ __entry->retries_left = wr->retries_left;
+ __entry->retry = wr->retry;
+ __entry->timeout = wr->timeout;
+ __entry->base_version =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->base_version;
+ __entry->mgmt_class =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->mgmt_class;
+ __entry->class_version =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->class_version;
+ __entry->method =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->method;
+ __entry->status =
+ ((struct ib_mad_hdr *)wr->send_buf.mad)->status;
+ __entry->wc_status = wc->status;
+ __entry->length = wc->byte_len;
+ ),
+
+ TP_printk("%d:%d QP%d : SEND WC Status %d : agent %p: " \
+ "wrtid 0x%llx %d/%d retries(%d) timeout %lu length %d: " \
+ "hdr : base_ver 0x%x class 0x%x class_ver 0x%x " \
+ "method 0x%x status 0x%x",
+ __entry->dev_index, __entry->port_num, __entry->qp_num,
+ __entry->wc_status,
+ __entry->agent_priv, be64_to_cpu(__entry->wrtid),
+ __entry->retries_left, __entry->max_retries,
+ __entry->retry, __entry->timeout,
+ __entry->length,
+ __entry->base_version, __entry->mgmt_class,
+ __entry->class_version, __entry->method,
+ be16_to_cpu(__entry->status)
+ )
+);
+
+TRACE_EVENT(ib_mad_recv_done_handler,
+ TP_PROTO(struct ib_mad_qp_info *qp_info, struct ib_wc *wc,
+ struct ib_mad_hdr *mad_hdr),
+ TP_ARGS(qp_info, wc, mad_hdr),
+
+ TP_STRUCT__entry(
+ __field(u8, base_version)
+ __field(u8, mgmt_class)
+ __field(u8, class_version)
+ __field(u8, port_num)
+ __field(u32, qp_num)
+ __field(u16, status)
+ __field(u16, class_specific)
+ __field(u32, length)
+ __field(u64, tid)
+ __field(u8, method)
+ __field(u8, sl)
+ __field(u16, attr_id)
+ __field(u32, attr_mod)
+ __field(u16, src_qp)
+ __field(u16, wc_status)
+ __field(u32, slid)
+ __field(u32, dev_index)
+ __field(u16, pkey)
+ ),
+
+ TP_fast_assign(
+ __entry->dev_index = qp_info->port_priv->device->index;
+ __entry->port_num = qp_info->port_priv->port_num;
+ __entry->qp_num = qp_info->qp->qp_num;
+ __entry->length = wc->byte_len;
+ __entry->base_version = mad_hdr->base_version;
+ __entry->mgmt_class = mad_hdr->mgmt_class;
+ __entry->class_version = mad_hdr->class_version;
+ __entry->method = mad_hdr->method;
+ __entry->status = mad_hdr->status;
+ __entry->class_specific = mad_hdr->class_specific;
+ __entry->tid = mad_hdr->tid;
+ __entry->attr_id = mad_hdr->attr_id;
+ __entry->attr_mod = mad_hdr->attr_mod;
+ __entry->slid = wc->slid;
+ __entry->src_qp = wc->src_qp;
+ __entry->sl = wc->sl;
+ ib_query_pkey(qp_info->port_priv->device,
+ qp_info->port_priv->port_num,
+ wc->pkey_index, &__entry->pkey);
+ __entry->wc_status = wc->status;
+ ),
+
+ TP_printk("%d:%d QP%d : RECV WC Status %d : length %d : hdr : " \
+ "base_ver 0x%02x class 0x%02x class_ver 0x%02x " \
+ "method 0x%02x status 0x%04x class_specific 0x%04x " \
+ "tid 0x%016llx attr_id 0x%04x attr_mod 0x%08x " \
+ "slid 0x%08x src QP%d, sl %d pkey 0x%04x",
+ __entry->dev_index, __entry->port_num, __entry->qp_num,
+ __entry->wc_status,
+ __entry->length,
+ __entry->base_version, __entry->mgmt_class,
+ __entry->class_version, __entry->method,
+ be16_to_cpu(__entry->status),
+ be16_to_cpu(__entry->class_specific),
+ be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
+ be32_to_cpu(__entry->attr_mod),
+ __entry->slid, __entry->src_qp, __entry->sl, __entry->pkey
+ )
+);
+
+DECLARE_EVENT_CLASS(ib_mad_agent_template,
+ TP_PROTO(struct ib_mad_agent_private *agent),
+ TP_ARGS(agent),
+
+ TP_STRUCT__entry(
+ __field(u32, dev_index)
+ __field(u32, hi_tid)
+ __field(u8, port_num)
+ __field(u8, mgmt_class)
+ __field(u8, mgmt_class_version)
+ ),
+
+ TP_fast_assign(
+ __entry->dev_index = agent->agent.device->index;
+ __entry->port_num = agent->agent.port_num;
+ __entry->hi_tid = agent->agent.hi_tid;
+
+ if (agent->reg_req) {
+ __entry->mgmt_class = agent->reg_req->mgmt_class;
+ __entry->mgmt_class_version =
+ agent->reg_req->mgmt_class_version;
+ } else {
+ __entry->mgmt_class = 0;
+ __entry->mgmt_class_version = 0;
+ }
+ ),
+
+ TP_printk("%d:%d mad agent : hi_tid 0x%08x class 0x%02x class_ver 0x%02x",
+ __entry->dev_index, __entry->port_num,
+ __entry->hi_tid, __entry->mgmt_class,
+ __entry->mgmt_class_version
+ )
+);
+DEFINE_EVENT(ib_mad_agent_template, ib_mad_recv_done_agent,
+ TP_PROTO(struct ib_mad_agent_private *agent),
+ TP_ARGS(agent));
+DEFINE_EVENT(ib_mad_agent_template, ib_mad_send_done_agent,
+ TP_PROTO(struct ib_mad_agent_private *agent),
+ TP_ARGS(agent));
+DEFINE_EVENT(ib_mad_agent_template, ib_mad_create_agent,
+ TP_PROTO(struct ib_mad_agent_private *agent),
+ TP_ARGS(agent));
+DEFINE_EVENT(ib_mad_agent_template, ib_mad_unregister_agent,
+ TP_PROTO(struct ib_mad_agent_private *agent),
+ TP_ARGS(agent));
+
+
+
+DECLARE_EVENT_CLASS(ib_mad_opa_smi_template,
+ TP_PROTO(struct opa_smp *smp),
+ TP_ARGS(smp),
+
+ TP_STRUCT__entry(
+ __field(u64, mkey)
+ __field(u32, dr_slid)
+ __field(u32, dr_dlid)
+ __field(u8, hop_ptr)
+ __field(u8, hop_cnt)
+ __array(u8, initial_path, OPA_SMP_MAX_PATH_HOPS)
+ __array(u8, return_path, OPA_SMP_MAX_PATH_HOPS)
+ ),
+
+ TP_fast_assign(
+ __entry->hop_ptr = smp->hop_ptr;
+ __entry->hop_cnt = smp->hop_cnt;
+ __entry->mkey = smp->mkey;
+ __entry->dr_slid = smp->route.dr.dr_slid;
+ __entry->dr_dlid = smp->route.dr.dr_dlid;
+ memcpy(__entry->initial_path, smp->route.dr.initial_path,
+ OPA_SMP_MAX_PATH_HOPS);
+ memcpy(__entry->return_path, smp->route.dr.return_path,
+ OPA_SMP_MAX_PATH_HOPS);
+ ),
+
+ TP_printk("OPA SMP: hop_ptr %d hop_cnt %d " \
+ "mkey 0x%016llx dr_slid 0x%08x dr_dlid 0x%08x " \
+ "initial_path %*ph return_path %*ph ",
+ __entry->hop_ptr, __entry->hop_cnt,
+ be64_to_cpu(__entry->mkey), be32_to_cpu(__entry->dr_slid),
+ be32_to_cpu(__entry->dr_dlid),
+ OPA_SMP_MAX_PATH_HOPS, __entry->initial_path,
+ OPA_SMP_MAX_PATH_HOPS, __entry->return_path
+ )
+);
+
+DEFINE_EVENT(ib_mad_opa_smi_template, ib_mad_handle_opa_smi,
+ TP_PROTO(struct opa_smp *smp),
+ TP_ARGS(smp));
+DEFINE_EVENT(ib_mad_opa_smi_template, ib_mad_handle_out_opa_smi,
+ TP_PROTO(struct opa_smp *smp),
+ TP_ARGS(smp));
+
+
+DECLARE_EVENT_CLASS(ib_mad_opa_ib_template,
+ TP_PROTO(struct ib_smp *smp),
+ TP_ARGS(smp),
+
+ TP_STRUCT__entry(
+ __field(u64, mkey)
+ __field(u32, dr_slid)
+ __field(u32, dr_dlid)
+ __field(u8, hop_ptr)
+ __field(u8, hop_cnt)
+ __array(u8, initial_path, IB_SMP_MAX_PATH_HOPS)
+ __array(u8, return_path, IB_SMP_MAX_PATH_HOPS)
+ ),
+
+ TP_fast_assign(
+ __entry->hop_ptr = smp->hop_ptr;
+ __entry->hop_cnt = smp->hop_cnt;
+ __entry->mkey = smp->mkey;
+ __entry->dr_slid = smp->dr_slid;
+ __entry->dr_dlid = smp->dr_dlid;
+ memcpy(__entry->initial_path, smp->initial_path,
+ IB_SMP_MAX_PATH_HOPS);
+ memcpy(__entry->return_path, smp->return_path,
+ IB_SMP_MAX_PATH_HOPS);
+ ),
+
+ TP_printk("OPA SMP: hop_ptr %d hop_cnt %d " \
+ "mkey 0x%016llx dr_slid 0x%04x dr_dlid 0x%04x " \
+ "initial_path %*ph return_path %*ph ",
+ __entry->hop_ptr, __entry->hop_cnt,
+ be64_to_cpu(__entry->mkey), be16_to_cpu(__entry->dr_slid),
+ be16_to_cpu(__entry->dr_dlid),
+ IB_SMP_MAX_PATH_HOPS, __entry->initial_path,
+ IB_SMP_MAX_PATH_HOPS, __entry->return_path
+ )
+);
+
+DEFINE_EVENT(ib_mad_opa_ib_template, ib_mad_handle_ib_smi,
+ TP_PROTO(struct ib_smp *smp),
+ TP_ARGS(smp));
+DEFINE_EVENT(ib_mad_opa_ib_template, ib_mad_handle_out_ib_smi,
+ TP_PROTO(struct ib_smp *smp),
+ TP_ARGS(smp));
+
+#endif /* _TRACE_IB_MAD_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ib_umad.h b/include/trace/events/ib_umad.h
new file mode 100644
index 000000000000..c393a19a0f60
--- /dev/null
+++ b/include/trace/events/ib_umad.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+
+/*
+ * Copyright (c) 2018 Intel Corporation. All rights reserved.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ib_umad
+
+#if !defined(_TRACE_IB_UMAD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IB_UMAD_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(ib_umad_template,
+ TP_PROTO(struct ib_umad_file *file, struct ib_user_mad_hdr *umad_hdr,
+ struct ib_mad_hdr *mad_hdr),
+ TP_ARGS(file, umad_hdr, mad_hdr),
+
+ TP_STRUCT__entry(
+ __field(u8, port_num)
+ __field(u8, sl)
+ __field(u8, path_bits)
+ __field(u8, grh_present)
+ __field(u32, id)
+ __field(u32, status)
+ __field(u32, timeout_ms)
+ __field(u32, retires)
+ __field(u32, length)
+ __field(u32, qpn)
+ __field(u32, qkey)
+ __field(u8, gid_index)
+ __field(u8, hop_limit)
+ __field(u16, lid)
+ __field(u16, attr_id)
+ __field(u16, pkey_index)
+ __field(u8, base_version)
+ __field(u8, mgmt_class)
+ __field(u8, class_version)
+ __field(u8, method)
+ __field(u32, flow_label)
+ __field(u16, mad_status)
+ __field(u16, class_specific)
+ __field(u32, attr_mod)
+ __field(u64, tid)
+ __array(u8, gid, 16)
+ __field(u32, dev_index)
+ __field(u8, traffic_class)
+ ),
+
+ TP_fast_assign(
+ __entry->dev_index = file->port->ib_dev->index;
+ __entry->port_num = file->port->port_num;
+
+ __entry->id = umad_hdr->id;
+ __entry->status = umad_hdr->status;
+ __entry->timeout_ms = umad_hdr->timeout_ms;
+ __entry->retires = umad_hdr->retries;
+ __entry->length = umad_hdr->length;
+ __entry->qpn = umad_hdr->qpn;
+ __entry->qkey = umad_hdr->qkey;
+ __entry->lid = umad_hdr->lid;
+ __entry->sl = umad_hdr->sl;
+ __entry->path_bits = umad_hdr->path_bits;
+ __entry->grh_present = umad_hdr->grh_present;
+ __entry->gid_index = umad_hdr->gid_index;
+ __entry->hop_limit = umad_hdr->hop_limit;
+ __entry->traffic_class = umad_hdr->traffic_class;
+ memcpy(__entry->gid, umad_hdr->gid, sizeof(umad_hdr->gid));
+ __entry->flow_label = umad_hdr->flow_label;
+ __entry->pkey_index = umad_hdr->pkey_index;
+
+ __entry->base_version = mad_hdr->base_version;
+ __entry->mgmt_class = mad_hdr->mgmt_class;
+ __entry->class_version = mad_hdr->class_version;
+ __entry->method = mad_hdr->method;
+ __entry->mad_status = mad_hdr->status;
+ __entry->class_specific = mad_hdr->class_specific;
+ __entry->tid = mad_hdr->tid;
+ __entry->attr_id = mad_hdr->attr_id;
+ __entry->attr_mod = mad_hdr->attr_mod;
+ ),
+
+ TP_printk("%d:%d umad_hdr: id 0x%08x status 0x%08x ms %u ret %u " \
+ "len %u QP%u qkey 0x%08x lid 0x%04x sl %u path_bits 0x%x " \
+ "grh 0x%x gidi %u hop_lim %u traf_cl %u gid %pI6c " \
+ "flow 0x%08x pkeyi %u MAD: base_ver 0x%x class 0x%x " \
+ "class_ver 0x%x method 0x%x status 0x%04x " \
+ "class_specific 0x%04x tid 0x%016llx attr_id 0x%04x " \
+ "attr_mod 0x%08x ",
+ __entry->dev_index, __entry->port_num,
+ __entry->id, __entry->status, __entry->timeout_ms,
+ __entry->retires, __entry->length, be32_to_cpu(__entry->qpn),
+ be32_to_cpu(__entry->qkey), be16_to_cpu(__entry->lid),
+ __entry->sl, __entry->path_bits, __entry->grh_present,
+ __entry->gid_index, __entry->hop_limit,
+ __entry->traffic_class, &__entry->gid,
+ be32_to_cpu(__entry->flow_label), __entry->pkey_index,
+ __entry->base_version, __entry->mgmt_class,
+ __entry->class_version, __entry->method,
+ be16_to_cpu(__entry->mad_status),
+ be16_to_cpu(__entry->class_specific),
+ be64_to_cpu(__entry->tid), be16_to_cpu(__entry->attr_id),
+ be32_to_cpu(__entry->attr_mod)
+ )
+);
+
+DEFINE_EVENT(ib_umad_template, ib_umad_write,
+ TP_PROTO(struct ib_umad_file *file, struct ib_user_mad_hdr *umad_hdr,
+ struct ib_mad_hdr *mad_hdr),
+ TP_ARGS(file, umad_hdr, mad_hdr));
+
+DEFINE_EVENT(ib_umad_template, ib_umad_read_recv,
+ TP_PROTO(struct ib_umad_file *file, struct ib_user_mad_hdr *umad_hdr,
+ struct ib_mad_hdr *mad_hdr),
+ TP_ARGS(file, umad_hdr, mad_hdr));
+
+DEFINE_EVENT(ib_umad_template, ib_umad_read_send,
+ TP_PROTO(struct ib_umad_file *file, struct ib_user_mad_hdr *umad_hdr,
+ struct ib_mad_hdr *mad_hdr),
+ TP_ARGS(file, umad_hdr, mad_hdr));
+
+#endif /* _TRACE_IB_UMAD_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/mlxsw.h b/include/trace/events/mlxsw.h
index 6a4cfaef33a2..19a25ed323a5 100644
--- a/include/trace/events/mlxsw.h
+++ b/include/trace/events/mlxsw.h
@@ -93,7 +93,7 @@ TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_migrate_end,
__entry->mlxsw_sp, __entry->vregion)
);
-TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_rehash_dis,
+TRACE_EVENT(mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed,
TP_PROTO(const struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_acl_tcam_vregion *vregion),
diff --git a/include/trace/events/nbd.h b/include/trace/events/nbd.h
new file mode 100644
index 000000000000..9849956f34d8
--- /dev/null
+++ b/include/trace/events/nbd.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM nbd
+
+#if !defined(_TRACE_NBD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_NBD_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(nbd_transport_event,
+
+ TP_PROTO(struct request *req, u64 handle),
+
+ TP_ARGS(req, handle),
+
+ TP_STRUCT__entry(
+ __field(struct request *, req)
+ __field(u64, handle)
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->handle = handle;
+ ),
+
+ TP_printk(
+ "nbd transport event: request %p, handle 0x%016llx",
+ __entry->req,
+ __entry->handle
+ )
+);
+
+DEFINE_EVENT(nbd_transport_event, nbd_header_sent,
+
+ TP_PROTO(struct request *req, u64 handle),
+
+ TP_ARGS(req, handle)
+);
+
+DEFINE_EVENT(nbd_transport_event, nbd_payload_sent,
+
+ TP_PROTO(struct request *req, u64 handle),
+
+ TP_ARGS(req, handle)
+);
+
+DEFINE_EVENT(nbd_transport_event, nbd_header_received,
+
+ TP_PROTO(struct request *req, u64 handle),
+
+ TP_ARGS(req, handle)
+);
+
+DEFINE_EVENT(nbd_transport_event, nbd_payload_received,
+
+ TP_PROTO(struct request *req, u64 handle),
+
+ TP_ARGS(req, handle)
+);
+
+DECLARE_EVENT_CLASS(nbd_send_request,
+
+ TP_PROTO(struct nbd_request *nbd_request, int index,
+ struct request *rq),
+
+ TP_ARGS(nbd_request, index, rq),
+
+ TP_STRUCT__entry(
+ __field(struct nbd_request *, nbd_request)
+ __field(u64, dev_index)
+ __field(struct request *, request)
+ ),
+
+ TP_fast_assign(
+ __entry->nbd_request = 0;
+ __entry->dev_index = index;
+ __entry->request = rq;
+ ),
+
+ TP_printk("nbd%lld: request %p", __entry->dev_index, __entry->request)
+);
+
+#ifdef DEFINE_EVENT_WRITABLE
+#undef NBD_DEFINE_EVENT
+#define NBD_DEFINE_EVENT(template, call, proto, args, size) \
+ DEFINE_EVENT_WRITABLE(template, call, PARAMS(proto), \
+ PARAMS(args), size)
+#else
+#undef NBD_DEFINE_EVENT
+#define NBD_DEFINE_EVENT(template, call, proto, args, size) \
+ DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args))
+#endif
+
+NBD_DEFINE_EVENT(nbd_send_request, nbd_send_request,
+
+ TP_PROTO(struct nbd_request *nbd_request, int index,
+ struct request *rq),
+
+ TP_ARGS(nbd_request, index, rq),
+
+ sizeof(struct nbd_request)
+);
+
+#endif
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 1efd7d9b25fe..2399073c3afc 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -95,6 +95,29 @@ TRACE_EVENT(net_dev_xmit,
__get_str(name), __entry->skbaddr, __entry->len, __entry->rc)
);
+TRACE_EVENT(net_dev_xmit_timeout,
+
+ TP_PROTO(struct net_device *dev,
+ int queue_index),
+
+ TP_ARGS(dev, queue_index),
+
+ TP_STRUCT__entry(
+ __string( name, dev->name )
+ __string( driver, netdev_drivername(dev))
+ __field( int, queue_index )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, dev->name);
+ __assign_str(driver, netdev_drivername(dev));
+ __entry->queue_index = queue_index;
+ ),
+
+ TP_printk("dev=%s driver=%s queue=%d",
+ __get_str(name), __get_str(driver), __entry->queue_index)
+);
+
DECLARE_EVENT_CLASS(net_dev_template,
TP_PROTO(struct sk_buff *skb),
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index 9a0d4ceeb166..95fba0471e5b 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -27,7 +27,7 @@ DECLARE_EVENT_CLASS(preemptirq_template,
__entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
),
- TP_printk("caller=%pF parent=%pF",
+ TP_printk("caller=%pS parent=%pS",
(void *)((unsigned long)(_stext) + __entry->caller_offs),
(void *)((unsigned long)(_stext) + __entry->parent_offs))
);
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
index 0560dfc33f1c..32c10a515e2d 100644
--- a/include/trace/events/random.h
+++ b/include/trace/events/random.h
@@ -62,15 +62,14 @@ DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
TRACE_EVENT(credit_entropy_bits,
TP_PROTO(const char *pool_name, int bits, int entropy_count,
- int entropy_total, unsigned long IP),
+ unsigned long IP),
- TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
+ TP_ARGS(pool_name, bits, entropy_count, IP),
TP_STRUCT__entry(
__field( const char *, pool_name )
__field( int, bits )
__field( int, entropy_count )
- __field( int, entropy_total )
__field(unsigned long, IP )
),
@@ -78,14 +77,12 @@ TRACE_EVENT(credit_entropy_bits,
__entry->pool_name = pool_name;
__entry->bits = bits;
__entry->entropy_count = entropy_count;
- __entry->entropy_total = entropy_total;
__entry->IP = IP;
),
- TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
- "caller %pS", __entry->pool_name, __entry->bits,
- __entry->entropy_count, __entry->entropy_total,
- (void *)__entry->IP)
+ TP_printk("%s pool: bits %d entropy_count %d caller %pS",
+ __entry->pool_name, __entry->bits,
+ __entry->entropy_count, (void *)__entry->IP)
);
TRACE_EVENT(push_to_pool,
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index f0c4d10e614b..80339fd14c1c 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -491,7 +491,7 @@ TRACE_EVENT(rcu_callback,
__entry->qlen = qlen;
),
- TP_printk("%s rhp=%p func=%pf %ld/%ld",
+ TP_printk("%s rhp=%p func=%ps %ld/%ld",
__entry->rcuname, __entry->rhp, __entry->func,
__entry->qlen_lazy, __entry->qlen)
);
@@ -587,7 +587,7 @@ TRACE_EVENT(rcu_invoke_callback,
__entry->func = rhp->func;
),
- TP_printk("%s rhp=%p func=%pf",
+ TP_printk("%s rhp=%p func=%ps",
__entry->rcuname, __entry->rhp, __entry->func)
);
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 962975b4313f..df9851cb82b2 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -511,6 +511,33 @@ TRACE_EVENT(xprtrdma_marshal,
)
);
+TRACE_EVENT(xprtrdma_marshal_failed,
+ TP_PROTO(const struct rpc_rqst *rqst,
+ int ret
+ ),
+
+ TP_ARGS(rqst, ret),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(u32, xid)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = rqst->rq_task->tk_pid;
+ __entry->client_id = rqst->rq_task->tk_client->cl_clid;
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->ret = ret;
+ ),
+
+ TP_printk("task:%u@%u xid=0x%08x: ret=%d",
+ __entry->task_id, __entry->client_id, __entry->xid,
+ __entry->ret
+ )
+);
+
TRACE_EVENT(xprtrdma_post_send,
TP_PROTO(
const struct rpcrdma_req *req,
diff --git a/include/trace/events/spi.h b/include/trace/events/spi.h
index aef6869f563d..0dd9171d2ad8 100644
--- a/include/trace/events/spi.h
+++ b/include/trace/events/spi.h
@@ -131,9 +131,11 @@ DECLARE_EVENT_CLASS(spi_transfer,
__field( struct spi_transfer *, xfer )
__field( int, len )
__dynamic_array(u8, rx_buf,
- spi_valid_rxbuf(msg, xfer) ? xfer->len : 0)
+ spi_valid_rxbuf(msg, xfer) ?
+ (xfer->len < 64 ? xfer->len : 64) : 0)
__dynamic_array(u8, tx_buf,
- spi_valid_txbuf(msg, xfer) ? xfer->len : 0)
+ spi_valid_txbuf(msg, xfer) ?
+ (xfer->len < 64 ? xfer->len : 64) : 0)
),
TP_fast_assign(
@@ -144,11 +146,11 @@ DECLARE_EVENT_CLASS(spi_transfer,
if (spi_valid_txbuf(msg, xfer))
memcpy(__get_dynamic_array(tx_buf),
- xfer->tx_buf, xfer->len);
+ xfer->tx_buf, __get_dynamic_array_len(tx_buf));
if (spi_valid_rxbuf(msg, xfer))
memcpy(__get_dynamic_array(rx_buf),
- xfer->rx_buf, xfer->len);
+ xfer->rx_buf, __get_dynamic_array_len(rx_buf));
),
TP_printk("spi%d.%d %p len=%d tx=[%*phD] rx=[%*phD]",
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 7e899e635d33..ffa3c51dbb1a 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -82,7 +82,6 @@ TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
-TRACE_DEFINE_ENUM(RPC_TASK_KILLED);
TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
TRACE_DEFINE_ENUM(RPC_TASK_SENT);
@@ -97,7 +96,6 @@ TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
{ RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \
- { RPC_TASK_KILLED, "KILLED" }, \
{ RPC_TASK_SOFT, "SOFT" }, \
{ RPC_TASK_SOFTCONN, "SOFTCONN" }, \
{ RPC_TASK_SENT, "SENT" }, \
@@ -111,6 +109,7 @@ TRACE_DEFINE_ENUM(RPC_TASK_ACTIVE);
TRACE_DEFINE_ENUM(RPC_TASK_NEED_XMIT);
TRACE_DEFINE_ENUM(RPC_TASK_NEED_RECV);
TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT);
+TRACE_DEFINE_ENUM(RPC_TASK_SIGNALLED);
#define rpc_show_runstate(flags) \
__print_flags(flags, "|", \
@@ -119,7 +118,8 @@ TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT);
{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \
{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \
{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \
- { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" })
+ { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \
+ { (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" })
DECLARE_EVENT_CLASS(rpc_task_running,
@@ -146,7 +146,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
__entry->flags = task->tk_flags;
),
- TP_printk("task:%u@%d flags=%s runstate=%s status=%d action=%pf",
+ TP_printk("task:%u@%d flags=%s runstate=%s status=%d action=%ps",
__entry->task_id, __entry->client_id,
rpc_show_task_flags(__entry->flags),
rpc_show_runstate(__entry->runstate),
@@ -186,7 +186,7 @@ DECLARE_EVENT_CLASS(rpc_task_queued,
__entry->client_id = task->tk_client ?
task->tk_client->cl_clid : -1;
__entry->task_id = task->tk_pid;
- __entry->timeout = task->tk_timeout;
+ __entry->timeout = rpc_task_timeout(task);
__entry->runstate = task->tk_runstate;
__entry->status = task->tk_status;
__entry->flags = task->tk_flags;
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 44a3259ed4a5..b6e0cbc2c71f 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -28,7 +28,7 @@ TRACE_EVENT_FN(sys_enter,
TP_fast_assign(
__entry->id = id;
- syscall_get_arguments(current, regs, 0, 6, __entry->args);
+ syscall_get_arguments(current, regs, __entry->args);
),
TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index a57e4ee989d6..b7a904825e7d 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -73,7 +73,7 @@ TRACE_EVENT(timer_start,
__entry->flags = flags;
),
- TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
+ TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s",
__entry->timer, __entry->function, __entry->expires,
(long)__entry->expires - __entry->now,
__entry->flags & TIMER_CPUMASK,
@@ -89,23 +89,27 @@ TRACE_EVENT(timer_start,
*/
TRACE_EVENT(timer_expire_entry,
- TP_PROTO(struct timer_list *timer),
+ TP_PROTO(struct timer_list *timer, unsigned long baseclk),
- TP_ARGS(timer),
+ TP_ARGS(timer, baseclk),
TP_STRUCT__entry(
__field( void *, timer )
__field( unsigned long, now )
__field( void *, function)
+ __field( unsigned long, baseclk )
),
TP_fast_assign(
__entry->timer = timer;
__entry->now = jiffies;
__entry->function = timer->function;
+ __entry->baseclk = baseclk;
),
- TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
+ TP_printk("timer=%p function=%ps now=%lu baseclk=%lu",
+ __entry->timer, __entry->function, __entry->now,
+ __entry->baseclk)
);
/**
@@ -210,7 +214,7 @@ TRACE_EVENT(hrtimer_start,
__entry->mode = mode;
),
- TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu "
+ TP_printk("hrtimer=%p function=%ps expires=%llu softexpires=%llu "
"mode=%s", __entry->hrtimer, __entry->function,
(unsigned long long) __entry->expires,
(unsigned long long) __entry->softexpires,
@@ -243,7 +247,8 @@ TRACE_EVENT(hrtimer_expire_entry,
__entry->function = hrtimer->function;
),
- TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
+ TP_printk("hrtimer=%p function=%ps now=%llu",
+ __entry->hrtimer, __entry->function,
(unsigned long long) __entry->now)
);
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index a1cb91342231..a5ab2973e8dc 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -27,17 +27,11 @@
{RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
) : "RECLAIM_WB_NONE"
-#define trace_reclaim_flags(page) ( \
- (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
+#define trace_reclaim_flags(file) ( \
+ (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
(RECLAIM_WB_ASYNC) \
)
-#define trace_shrink_flags(file) \
- ( \
- (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
- (RECLAIM_WB_ASYNC) \
- )
-
TRACE_EVENT(mm_vmscan_kswapd_sleep,
TP_PROTO(int nid),
@@ -73,7 +67,9 @@ TRACE_EVENT(mm_vmscan_kswapd_wake,
__entry->order = order;
),
- TP_printk("nid=%d zid=%d order=%d", __entry->nid, __entry->zid, __entry->order)
+ TP_printk("nid=%d order=%d",
+ __entry->nid,
+ __entry->order)
);
TRACE_EVENT(mm_vmscan_wakeup_kswapd,
@@ -96,60 +92,53 @@ TRACE_EVENT(mm_vmscan_wakeup_kswapd,
__entry->gfp_flags = gfp_flags;
),
- TP_printk("nid=%d zid=%d order=%d gfp_flags=%s",
+ TP_printk("nid=%d order=%d gfp_flags=%s",
__entry->nid,
- __entry->zid,
__entry->order,
show_gfp_flags(__entry->gfp_flags))
);
DECLARE_EVENT_CLASS(mm_vmscan_direct_reclaim_begin_template,
- TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
+ TP_PROTO(int order, gfp_t gfp_flags),
- TP_ARGS(order, may_writepage, gfp_flags, classzone_idx),
+ TP_ARGS(order, gfp_flags),
TP_STRUCT__entry(
__field( int, order )
- __field( int, may_writepage )
__field( gfp_t, gfp_flags )
- __field( int, classzone_idx )
),
TP_fast_assign(
__entry->order = order;
- __entry->may_writepage = may_writepage;
__entry->gfp_flags = gfp_flags;
- __entry->classzone_idx = classzone_idx;
),
- TP_printk("order=%d may_writepage=%d gfp_flags=%s classzone_idx=%d",
+ TP_printk("order=%d gfp_flags=%s",
__entry->order,
- __entry->may_writepage,
- show_gfp_flags(__entry->gfp_flags),
- __entry->classzone_idx)
+ show_gfp_flags(__entry->gfp_flags))
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_direct_reclaim_begin,
- TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
+ TP_PROTO(int order, gfp_t gfp_flags),
- TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
+ TP_ARGS(order, gfp_flags)
);
#ifdef CONFIG_MEMCG
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_reclaim_begin,
- TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
+ TP_PROTO(int order, gfp_t gfp_flags),
- TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
+ TP_ARGS(order, gfp_flags)
);
DEFINE_EVENT(mm_vmscan_direct_reclaim_begin_template, mm_vmscan_memcg_softlimit_reclaim_begin,
- TP_PROTO(int order, int may_writepage, gfp_t gfp_flags, int classzone_idx),
+ TP_PROTO(int order, gfp_t gfp_flags),
- TP_ARGS(order, may_writepage, gfp_flags, classzone_idx)
+ TP_ARGS(order, gfp_flags)
);
#endif /* CONFIG_MEMCG */
@@ -226,7 +215,7 @@ TRACE_EVENT(mm_shrink_slab_start,
__entry->priority = priority;
),
- TP_printk("%pF %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d",
+ TP_printk("%pS %p: nid: %d objects to shrink %ld gfp_flags %s cache items %ld delta %lld total_scan %ld priority %d",
__entry->shrink,
__entry->shr,
__entry->nid,
@@ -265,7 +254,7 @@ TRACE_EVENT(mm_shrink_slab_end,
__entry->total_scan = total_scan;
),
- TP_printk("%pF %p: nid: %d unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d",
+ TP_printk("%pS %p: nid: %d unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d",
__entry->shrink,
__entry->shr,
__entry->nid,
@@ -333,7 +322,8 @@ TRACE_EVENT(mm_vmscan_writepage,
TP_fast_assign(
__entry->pfn = page_to_pfn(page);
- __entry->reclaim_flags = trace_reclaim_flags(page);
+ __entry->reclaim_flags = trace_reclaim_flags(
+ page_is_file_cache(page));
),
TP_printk("page=%p pfn=%lu flags=%s",
@@ -358,7 +348,8 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
__field(unsigned long, nr_writeback)
__field(unsigned long, nr_congested)
__field(unsigned long, nr_immediate)
- __field(unsigned long, nr_activate)
+ __field(unsigned int, nr_activate0)
+ __field(unsigned int, nr_activate1)
__field(unsigned long, nr_ref_keep)
__field(unsigned long, nr_unmap_fail)
__field(int, priority)
@@ -373,20 +364,22 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
__entry->nr_writeback = stat->nr_writeback;
__entry->nr_congested = stat->nr_congested;
__entry->nr_immediate = stat->nr_immediate;
- __entry->nr_activate = stat->nr_activate;
+ __entry->nr_activate0 = stat->nr_activate[0];
+ __entry->nr_activate1 = stat->nr_activate[1];
__entry->nr_ref_keep = stat->nr_ref_keep;
__entry->nr_unmap_fail = stat->nr_unmap_fail;
__entry->priority = priority;
- __entry->reclaim_flags = trace_shrink_flags(file);
+ __entry->reclaim_flags = trace_reclaim_flags(file);
),
- TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate=%ld nr_ref_keep=%ld nr_unmap_fail=%ld priority=%d flags=%s",
+ TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate_anon=%d nr_activate_file=%d nr_ref_keep=%ld nr_unmap_fail=%ld priority=%d flags=%s",
__entry->nid,
__entry->nr_scanned, __entry->nr_reclaimed,
__entry->nr_dirty, __entry->nr_writeback,
__entry->nr_congested, __entry->nr_immediate,
- __entry->nr_activate, __entry->nr_ref_keep,
- __entry->nr_unmap_fail, __entry->priority,
+ __entry->nr_activate0, __entry->nr_activate1,
+ __entry->nr_ref_keep, __entry->nr_unmap_fail,
+ __entry->priority,
show_reclaim_flags(__entry->reclaim_flags))
);
@@ -415,7 +408,7 @@ TRACE_EVENT(mm_vmscan_lru_shrink_active,
__entry->nr_deactivated = nr_deactivated;
__entry->nr_referenced = nr_referenced;
__entry->priority = priority;
- __entry->reclaim_flags = trace_shrink_flags(file);
+ __entry->reclaim_flags = trace_reclaim_flags(file);
),
TP_printk("nid=%d nr_taken=%ld nr_active=%ld nr_deactivated=%ld nr_referenced=%ld priority=%d flags=%s",
@@ -454,7 +447,8 @@ TRACE_EVENT(mm_vmscan_inactive_list_is_low,
__entry->total_active = total_active;
__entry->active = active;
__entry->ratio = ratio;
- __entry->reclaim_flags = trace_shrink_flags(file) & RECLAIM_WB_LRU;
+ __entry->reclaim_flags = trace_reclaim_flags(file) &
+ RECLAIM_WB_LRU;
),
TP_printk("nid=%d reclaim_idx=%d total_inactive=%ld inactive=%ld total_active=%ld active=%ld ratio=%ld flags=%s",
@@ -465,6 +459,38 @@ TRACE_EVENT(mm_vmscan_inactive_list_is_low,
__entry->ratio,
show_reclaim_flags(__entry->reclaim_flags))
);
+
+TRACE_EVENT(mm_vmscan_node_reclaim_begin,
+
+ TP_PROTO(int nid, int order, gfp_t gfp_flags),
+
+ TP_ARGS(nid, order, gfp_flags),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, order)
+ __field(gfp_t, gfp_flags)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->order = order;
+ __entry->gfp_flags = gfp_flags;
+ ),
+
+ TP_printk("nid=%d order=%d gfp_flags=%s",
+ __entry->nid,
+ __entry->order,
+ show_gfp_flags(__entry->gfp_flags))
+);
+
+DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_node_reclaim_end,
+
+ TP_PROTO(unsigned long nr_reclaimed),
+
+ TP_ARGS(nr_reclaimed)
+);
+
#endif /* _TRACE_VMSCAN_H */
/* This part must be outside protection */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 9a761bc6a251..e172549283be 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -60,7 +60,7 @@ TRACE_EVENT(workqueue_queue_work,
__entry->cpu = pwq->pool->cpu;
),
- TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
+ TP_printk("work struct=%p function=%ps workqueue=%p req_cpu=%u cpu=%u",
__entry->work, __entry->function, __entry->workqueue,
__entry->req_cpu, __entry->cpu)
);
@@ -102,7 +102,7 @@ TRACE_EVENT(workqueue_execute_start,
__entry->function = work->func;
),
- TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
+ TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
);
/**
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 32db72c7c055..aa7f3aeac740 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -53,7 +53,7 @@ WB_WORK_REASON
struct wb_writeback_work;
-TRACE_EVENT(writeback_dirty_page,
+DECLARE_EVENT_CLASS(writeback_page_template,
TP_PROTO(struct page *page, struct address_space *mapping),
@@ -79,6 +79,20 @@ TRACE_EVENT(writeback_dirty_page,
)
);
+DEFINE_EVENT(writeback_page_template, writeback_dirty_page,
+
+ TP_PROTO(struct page *page, struct address_space *mapping),
+
+ TP_ARGS(page, mapping)
+);
+
+DEFINE_EVENT(writeback_page_template, wait_on_page_writeback,
+
+ TP_PROTO(struct page *page, struct address_space *mapping),
+
+ TP_ARGS(page, mapping)
+);
+
DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
TP_PROTO(struct inode *inode, int flags),
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index fdcf88bcf0ea..9a0e8af21310 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -73,7 +73,7 @@ TRACE_EVENT(xen_mc_callback,
__entry->fn = fn;
__entry->data = data;
),
- TP_printk("callback %pf, data %p",
+ TP_printk("callback %ps, data %p",
__entry->fn, __entry->data)
);
diff --git a/include/uapi/asm-generic/sockios.h b/include/uapi/asm-generic/sockios.h
index 64f658c7cec2..44fa3ed70483 100644
--- a/include/uapi/asm-generic/sockios.h
+++ b/include/uapi/asm-generic/sockios.h
@@ -8,7 +8,7 @@
#define FIOGETOWN 0x8903
#define SIOCGPGRP 0x8904
#define SIOCATMARK 0x8905
-#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
-#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+#define SIOCGSTAMP_OLD 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS_OLD 0x8907 /* Get stamp (timespec) */
#endif /* __ASM_GENERIC_SOCKIOS_H */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4a53f6cfa034..4788730dbe78 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -210,6 +210,9 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1)
/* indicate some job from this context once cause gpu hang */
#define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2)
+/* indicate some errors are detected by RAS */
+#define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3)
+#define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4)
/* Context priority level */
#define AMDGPU_CTX_PRIORITY_UNSET -2048
@@ -525,6 +528,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
#define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT 0x08
+#define AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL 0x09
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@@ -605,6 +610,12 @@ struct drm_amdgpu_cs_chunk_sem {
__u32 handle;
};
+struct drm_amdgpu_cs_chunk_syncobj {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+};
+
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1
#define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2
@@ -680,6 +691,7 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
/* Subquery id: Query DMCU firmware version */
#define AMDGPU_INFO_FW_DMCU 0x12
+ #define AMDGPU_INFO_FW_TA 0x13
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
@@ -733,6 +745,37 @@ struct drm_amdgpu_cs_chunk_data {
/* Number of VRAM page faults on CPU access. */
#define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E
#define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F
+/* query ras mask of enabled features*/
+#define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20
+
+/* RAS MASK: UMC (VRAM) */
+#define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0)
+/* RAS MASK: SDMA */
+#define AMDGPU_INFO_RAS_ENABLED_SDMA (1 << 1)
+/* RAS MASK: GFX */
+#define AMDGPU_INFO_RAS_ENABLED_GFX (1 << 2)
+/* RAS MASK: MMHUB */
+#define AMDGPU_INFO_RAS_ENABLED_MMHUB (1 << 3)
+/* RAS MASK: ATHUB */
+#define AMDGPU_INFO_RAS_ENABLED_ATHUB (1 << 4)
+/* RAS MASK: PCIE */
+#define AMDGPU_INFO_RAS_ENABLED_PCIE (1 << 5)
+/* RAS MASK: HDP */
+#define AMDGPU_INFO_RAS_ENABLED_HDP (1 << 6)
+/* RAS MASK: XGMI */
+#define AMDGPU_INFO_RAS_ENABLED_XGMI (1 << 7)
+/* RAS MASK: DF */
+#define AMDGPU_INFO_RAS_ENABLED_DF (1 << 8)
+/* RAS MASK: SMN */
+#define AMDGPU_INFO_RAS_ENABLED_SMN (1 << 9)
+/* RAS MASK: SEM */
+#define AMDGPU_INFO_RAS_ENABLED_SEM (1 << 10)
+/* RAS MASK: MP0 */
+#define AMDGPU_INFO_RAS_ENABLED_MP0 (1 << 11)
+/* RAS MASK: MP1 */
+#define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12)
+/* RAS MASK: FUSE */
+#define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13)
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 300f336633f2..661d73f9a919 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -649,6 +649,7 @@ struct drm_gem_open {
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
#define DRM_CAP_SYNCOBJ 0x13
+#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -735,8 +736,18 @@ struct drm_syncobj_handle {
__u32 pad;
};
+struct drm_syncobj_transfer {
+ __u32 src_handle;
+ __u32 dst_handle;
+ __u64 src_point;
+ __u64 dst_point;
+ __u32 flags;
+ __u32 pad;
+};
+
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@@ -747,12 +758,33 @@ struct drm_syncobj_wait {
__u32 pad;
};
+struct drm_syncobj_timeline_wait {
+ __u64 handles;
+ /* wait on specific timeline point for every handles*/
+ __u64 points;
+ /* absolute timeout */
+ __s64 timeout_nsec;
+ __u32 count_handles;
+ __u32 flags;
+ __u32 first_signaled; /* only valid when not waiting all */
+ __u32 pad;
+};
+
+
struct drm_syncobj_array {
__u64 handles;
__u32 count_handles;
__u32 pad;
};
+struct drm_syncobj_timeline_array {
+ __u64 handles;
+ __u64 points;
+ __u32 count_handles;
+ __u32 pad;
+};
+
+
/* Query current scanout sequence number */
struct drm_crtc_get_sequence {
__u32 crtc_id; /* requested crtc_id */
@@ -909,6 +941,11 @@ extern "C" {
#define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease)
#define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
+#define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
+#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
+#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index bab20298f422..3feeaa3f987a 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -144,6 +144,17 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/*
+ * Floating point 64bpp RGB
+ * IEEE 754-2008 binary16 half-precision float
+ * [15:0] sign:exponent:mantissa 1:5:10
+ */
+#define DRM_FORMAT_XRGB16161616F fourcc_code('X', 'R', '4', 'H') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616F fourcc_code('X', 'B', '4', 'H') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
@@ -151,7 +162,29 @@ extern "C" {
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
-#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_XYUV8888 fourcc_code('X', 'Y', 'U', 'V') /* [31:0] X:Y:Cb:Cr 8:8:8:8 little endian */
+#define DRM_FORMAT_VUY888 fourcc_code('V', 'U', '2', '4') /* [23:0] Cr:Cb:Y 8:8:8 little endian */
+#define DRM_FORMAT_VUY101010 fourcc_code('V', 'U', '3', '0') /* Y followed by U then V, 10:10:10. Non-linear modifier only */
+
+/*
+ * packed Y2xx indicate for each component, xx valid data occupy msb
+ * 16-xx padding occupy lsb
+ */
+#define DRM_FORMAT_Y210 fourcc_code('Y', '2', '1', '0') /* [63:0] Cr0:0:Y1:0:Cb0:0:Y0:0 10:6:10:6:10:6:10:6 little endian per 2 Y pixels */
+#define DRM_FORMAT_Y212 fourcc_code('Y', '2', '1', '2') /* [63:0] Cr0:0:Y1:0:Cb0:0:Y0:0 12:4:12:4:12:4:12:4 little endian per 2 Y pixels */
+#define DRM_FORMAT_Y216 fourcc_code('Y', '2', '1', '6') /* [63:0] Cr0:Y1:Cb0:Y0 16:16:16:16 little endian per 2 Y pixels */
+
+/*
+ * packed Y4xx indicate for each component, xx valid data occupy msb
+ * 16-xx padding occupy lsb except Y410
+ */
+#define DRM_FORMAT_Y410 fourcc_code('Y', '4', '1', '0') /* [31:0] A:Cr:Y:Cb 2:10:10:10 little endian */
+#define DRM_FORMAT_Y412 fourcc_code('Y', '4', '1', '2') /* [63:0] A:0:Cr:0:Y:0:Cb:0 12:4:12:4:12:4:12:4 little endian */
+#define DRM_FORMAT_Y416 fourcc_code('Y', '4', '1', '6') /* [63:0] A:Cr:Y:Cb 16:16:16:16 little endian */
+
+#define DRM_FORMAT_XVYU2101010 fourcc_code('X', 'V', '3', '0') /* [31:0] X:Cr:Y:Cb 2:10:10:10 little endian */
+#define DRM_FORMAT_XVYU12_16161616 fourcc_code('X', 'V', '3', '6') /* [63:0] X:0:Cr:0:Y:0:Cb:0 12:4:12:4:12:4:12:4 little endian */
+#define DRM_FORMAT_XVYU16161616 fourcc_code('X', 'V', '4', '8') /* [63:0] X:Cr:Y:Cb 16:16:16:16 little endian */
/*
* packed YCbCr420 2x2 tiled formats
@@ -168,6 +201,15 @@ extern "C" {
#define DRM_FORMAT_X0L2 fourcc_code('X', '0', 'L', '2')
/*
+ * 1-plane YUV 4:2:0
+ * In these formats, the component ordering is specified (Y, followed by U
+ * then V), but the exact Linear layout is undefined.
+ * These formats can only be used with a non-Linear modifier.
+ */
+#define DRM_FORMAT_YUV420_8BIT fourcc_code('Y', 'U', '0', '8')
+#define DRM_FORMAT_YUV420_10BIT fourcc_code('Y', 'U', '1', '0')
+
+/*
* 2 plane RGB + A
* index 0 = RGB plane, same format as the corresponding non _A8 format has
* index 1 = A plane, [7:0] A
@@ -200,6 +242,13 @@ extern "C" {
* index 0 = Y plane, [15:0] Y:x [10:6] little endian
* index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
*/
+#define DRM_FORMAT_P210 fourcc_code('P', '2', '1', '0') /* 2x1 subsampled Cr:Cb plane, 10 bit per channel */
+
+/*
+ * 2 plane YCbCr MSB aligned
+ * index 0 = Y plane, [15:0] Y:x [10:6] little endian
+ * index 1 = Cr:Cb plane, [31:0] Cr:x:Cb:x [10:6:10:6] little endian
+ */
#define DRM_FORMAT_P010 fourcc_code('P', '0', '1', '0') /* 2x2 subsampled Cr:Cb plane 10 bits per channel */
/*
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index a439c2e67896..83cd1636b9be 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -33,7 +33,6 @@
extern "C" {
#endif
-#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
@@ -622,7 +621,8 @@ struct drm_color_ctm {
struct drm_color_lut {
/*
- * Data is U0.16 fixed point format.
+ * Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
+ * 0xffff == 1.0.
*/
__u16 red;
__u16 green;
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 397810fa2d33..3a73f5316766 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -63,6 +63,28 @@ extern "C" {
#define I915_RESET_UEVENT "RESET"
/*
+ * i915_user_extension: Base class for defining a chain of extensions
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the __user boundary with pointers encapsulated inside u64.
+ */
+struct i915_user_extension {
+ __u64 next_extension;
+ __u32 name;
+ __u32 flags; /* All undefined bits must be zero. */
+ __u32 rsvd[4]; /* Reserved for future use; must be zero. */
+};
+
+/*
* MOCS indexes used for GPU surfaces, defining the cacheability of the
* surface data and the coherency for this data wrt. CPU vs. GPU accesses.
*/
@@ -99,9 +121,23 @@ enum drm_i915_gem_engine_class {
I915_ENGINE_CLASS_VIDEO = 2,
I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
+ /* should be kept compact */
+
I915_ENGINE_CLASS_INVALID = -1
};
+/*
+ * There may be more than one engine fulfilling any role within the system.
+ * Each engine of a class is given a unique instance number and therefore
+ * any engine can be specified by its class:instance tuplet. APIs that allow
+ * access to any engine in the system will use struct i915_engine_class_instance
+ * for this identification.
+ */
+struct i915_engine_class_instance {
+ __u16 engine_class; /* see enum drm_i915_gem_engine_class */
+ __u16 engine_instance;
+};
+
/**
* DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
*
@@ -319,6 +355,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_I915_QUERY 0x39
+/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -367,6 +404,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
#define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
+#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
#define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
#define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
@@ -476,6 +514,7 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
#define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
+#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
#define I915_PARAM_HUC_STATUS 42
@@ -559,6 +598,8 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_MMAP_GTT_COHERENT 52
+/* Must be kept compact -- no holes and well documented */
+
typedef struct drm_i915_getparam {
__s32 param;
/*
@@ -574,6 +615,7 @@ typedef struct drm_i915_getparam {
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
#define I915_SETPARAM_NUM_USED_FENCES 4
+/* Must be kept compact -- no holes */
typedef struct drm_i915_setparam {
int param;
@@ -972,7 +1014,7 @@ struct drm_i915_gem_execbuffer2 {
* struct drm_i915_gem_exec_fence *fences.
*/
__u64 cliprects_ptr;
-#define I915_EXEC_RING_MASK (7<<0)
+#define I915_EXEC_RING_MASK (0x3f)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
@@ -1120,32 +1162,34 @@ struct drm_i915_gem_busy {
* as busy may become idle before the ioctl is completed.
*
* Furthermore, if the object is busy, which engine is busy is only
- * provided as a guide. There are race conditions which prevent the
- * report of which engines are busy from being always accurate.
- * However, the converse is not true. If the object is idle, the
- * result of the ioctl, that all engines are idle, is accurate.
+ * provided as a guide and only indirectly by reporting its class
+ * (there may be more than one engine in each class). There are race
+ * conditions which prevent the report of which engines are busy from
+ * being always accurate. However, the converse is not true. If the
+ * object is idle, the result of the ioctl, that all engines are idle,
+ * is accurate.
*
* The returned dword is split into two fields to indicate both
- * the engines on which the object is being read, and the
- * engine on which it is currently being written (if any).
+ * the engine classess on which the object is being read, and the
+ * engine class on which it is currently being written (if any).
*
* The low word (bits 0:15) indicate if the object is being written
* to by any engine (there can only be one, as the GEM implicit
* synchronisation rules force writes to be serialised). Only the
- * engine for the last write is reported.
+ * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
+ * 1 not 0 etc) for the last write is reported.
*
- * The high word (bits 16:31) are a bitmask of which engines are
- * currently reading from the object. Multiple engines may be
+ * The high word (bits 16:31) are a bitmask of which engines classes
+ * are currently reading from the object. Multiple engines may be
* reading from the object simultaneously.
*
- * The value of each engine is the same as specified in the
- * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
- * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
- * the I915_EXEC_RENDER engine for execution, and so it is never
+ * The value of each engine class is the same as specified in the
+ * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
+ * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
* reported as active itself. Some hardware may have parallel
* execution engines, e.g. multiple media engines, which are
- * mapped to the same identifier in the EXECBUFFER2 ioctl and
- * so are not separately reported for busyness.
+ * mapped to the same class identifier and so are not separately
+ * reported for busyness.
*
* Caveat emptor:
* Only the boolean result of this query is reliable; that is whether
@@ -1412,65 +1456,17 @@ struct drm_i915_gem_wait {
};
struct drm_i915_gem_context_create {
- /* output: id of new context*/
- __u32 ctx_id;
- __u32 pad;
-};
-
-struct drm_i915_gem_context_destroy {
- __u32 ctx_id;
+ __u32 ctx_id; /* output: id of new context*/
__u32 pad;
};
-struct drm_i915_reg_read {
- /*
- * Register offset.
- * For 64bit wide registers where the upper 32bits don't immediately
- * follow the lower 32bits, the offset of the lower 32bits must
- * be specified
- */
- __u64 offset;
-#define I915_REG_READ_8B_WA (1ul << 0)
-
- __u64 val; /* Return value */
-};
-/* Known registers:
- *
- * Render engine timestamp - 0x2358 + 64bit - gen7+
- * - Note this register returns an invalid value if using the default
- * single instruction 8byte read, in order to workaround that pass
- * flag I915_REG_READ_8B_WA in offset field.
- *
- */
-
-struct drm_i915_reset_stats {
- __u32 ctx_id;
- __u32 flags;
-
- /* All resets since boot/module reload, for all contexts */
- __u32 reset_count;
-
- /* Number of batches lost when active in GPU, for this context */
- __u32 batch_active;
-
- /* Number of batches lost pending for execution, for this context */
- __u32 batch_pending;
-
- __u32 pad;
-};
-
-struct drm_i915_gem_userptr {
- __u64 user_ptr;
- __u64 user_size;
+struct drm_i915_gem_context_create_ext {
+ __u32 ctx_id; /* output: id of new context*/
__u32 flags;
-#define I915_USERPTR_READ_ONLY 0x1
-#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
- /**
- * Returned handle for the object.
- *
- * Object handles are nonzero.
- */
- __u32 handle;
+#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
+ (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
+ __u64 extensions;
};
struct drm_i915_gem_context_param {
@@ -1491,6 +1487,28 @@ struct drm_i915_gem_context_param {
* drm_i915_gem_context_param_sseu.
*/
#define I915_CONTEXT_PARAM_SSEU 0x7
+
+/*
+ * Not all clients may want to attempt automatic recover of a context after
+ * a hang (for example, some clients may only submit very small incremental
+ * batches relying on known logical state of previous batches which will never
+ * recover correctly and each attempt will hang), and so would prefer that
+ * the context is forever banned instead.
+ *
+ * If set to false (0), after a reset, subsequent (and in flight) rendering
+ * from this context is discarded, and the client will need to create a new
+ * context to use instead.
+ *
+ * If set to true (1), the kernel will automatically attempt to recover the
+ * context by skipping the hanging batch and executing the next batch starting
+ * from the default context state (discarding the incomplete logical context
+ * state lost due to the reset).
+ *
+ * On creation, all new contexts are marked as recoverable.
+ */
+#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
+/* Must be kept compact -- no holes and well documented */
+
__u64 value;
};
@@ -1519,8 +1537,7 @@ struct drm_i915_gem_context_param_sseu {
/*
* Engine class & instance to be configured or queried.
*/
- __u16 engine_class;
- __u16 engine_instance;
+ struct i915_engine_class_instance engine;
/*
* Unused for now. Must be cleared to zero.
@@ -1553,6 +1570,96 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd;
};
+struct drm_i915_gem_context_create_ext_setparam {
+#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+ struct i915_user_extension base;
+ struct drm_i915_gem_context_param param;
+};
+
+struct drm_i915_gem_context_destroy {
+ __u32 ctx_id;
+ __u32 pad;
+};
+
+/*
+ * DRM_I915_GEM_VM_CREATE -
+ *
+ * Create a new virtual memory address space (ppGTT) for use within a context
+ * on the same file. Extensions can be provided to configure exactly how the
+ * address space is setup upon creation.
+ *
+ * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
+ * returned in the outparam @id.
+ *
+ * No flags are defined, with all bits reserved and must be zero.
+ *
+ * An extension chain maybe provided, starting with @extensions, and terminated
+ * by the @next_extension being 0. Currently, no extensions are defined.
+ *
+ * DRM_I915_GEM_VM_DESTROY -
+ *
+ * Destroys a previously created VM id, specified in @id.
+ *
+ * No extensions or flags are allowed currently, and so must be zero.
+ */
+struct drm_i915_gem_vm_control {
+ __u64 extensions;
+ __u32 flags;
+ __u32 vm_id;
+};
+
+struct drm_i915_reg_read {
+ /*
+ * Register offset.
+ * For 64bit wide registers where the upper 32bits don't immediately
+ * follow the lower 32bits, the offset of the lower 32bits must
+ * be specified
+ */
+ __u64 offset;
+#define I915_REG_READ_8B_WA (1ul << 0)
+
+ __u64 val; /* Return value */
+};
+
+/* Known registers:
+ *
+ * Render engine timestamp - 0x2358 + 64bit - gen7+
+ * - Note this register returns an invalid value if using the default
+ * single instruction 8byte read, in order to workaround that pass
+ * flag I915_REG_READ_8B_WA in offset field.
+ *
+ */
+
+struct drm_i915_reset_stats {
+ __u32 ctx_id;
+ __u32 flags;
+
+ /* All resets since boot/module reload, for all contexts */
+ __u32 reset_count;
+
+ /* Number of batches lost when active in GPU, for this context */
+ __u32 batch_active;
+
+ /* Number of batches lost pending for execution, for this context */
+ __u32 batch_pending;
+
+ __u32 pad;
+};
+
+struct drm_i915_gem_userptr {
+ __u64 user_ptr;
+ __u64 user_size;
+ __u32 flags;
+#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+};
+
enum drm_i915_oa_format {
I915_OA_FORMAT_A13 = 1, /* HSW only */
I915_OA_FORMAT_A29, /* HSW only */
@@ -1714,6 +1821,7 @@ struct drm_i915_perf_oa_config {
struct drm_i915_query_item {
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+/* Must be kept compact -- no holes and well documented */
/*
* When set to zero by userspace, this is filled with the size of the
diff --git a/include/uapi/drm/lima_drm.h b/include/uapi/drm/lima_drm.h
new file mode 100644
index 000000000000..95a00fb867e6
--- /dev/null
+++ b/include/uapi/drm/lima_drm.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DRM_H__
+#define __LIMA_DRM_H__
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+enum drm_lima_param_gpu_id {
+ DRM_LIMA_PARAM_GPU_ID_UNKNOWN,
+ DRM_LIMA_PARAM_GPU_ID_MALI400,
+ DRM_LIMA_PARAM_GPU_ID_MALI450,
+};
+
+enum drm_lima_param {
+ DRM_LIMA_PARAM_GPU_ID,
+ DRM_LIMA_PARAM_NUM_PP,
+ DRM_LIMA_PARAM_GP_VERSION,
+ DRM_LIMA_PARAM_PP_VERSION,
+};
+
+/**
+ * get various information of the GPU
+ */
+struct drm_lima_get_param {
+ __u32 param; /* in, value in enum drm_lima_param */
+ __u32 pad; /* pad, must be zero */
+ __u64 value; /* out, parameter value */
+};
+
+/**
+ * create a buffer for used by GPU
+ */
+struct drm_lima_gem_create {
+ __u32 size; /* in, buffer size */
+ __u32 flags; /* in, currently no flags, must be zero */
+ __u32 handle; /* out, GEM buffer handle */
+ __u32 pad; /* pad, must be zero */
+};
+
+/**
+ * get information of a buffer
+ */
+struct drm_lima_gem_info {
+ __u32 handle; /* in, GEM buffer handle */
+ __u32 va; /* out, virtual address mapped into GPU MMU */
+ __u64 offset; /* out, used to mmap this buffer to CPU */
+};
+
+#define LIMA_SUBMIT_BO_READ 0x01
+#define LIMA_SUBMIT_BO_WRITE 0x02
+
+/* buffer information used by one task */
+struct drm_lima_gem_submit_bo {
+ __u32 handle; /* in, GEM buffer handle */
+ __u32 flags; /* in, buffer read/write by GPU */
+};
+
+#define LIMA_GP_FRAME_REG_NUM 6
+
+/* frame used to setup GP for each task */
+struct drm_lima_gp_frame {
+ __u32 frame[LIMA_GP_FRAME_REG_NUM];
+};
+
+#define LIMA_PP_FRAME_REG_NUM 23
+#define LIMA_PP_WB_REG_NUM 12
+
+/* frame used to setup mali400 GPU PP for each task */
+struct drm_lima_m400_pp_frame {
+ __u32 frame[LIMA_PP_FRAME_REG_NUM];
+ __u32 num_pp;
+ __u32 wb[3 * LIMA_PP_WB_REG_NUM];
+ __u32 plbu_array_address[4];
+ __u32 fragment_stack_address[4];
+};
+
+/* frame used to setup mali450 GPU PP for each task */
+struct drm_lima_m450_pp_frame {
+ __u32 frame[LIMA_PP_FRAME_REG_NUM];
+ __u32 num_pp;
+ __u32 wb[3 * LIMA_PP_WB_REG_NUM];
+ __u32 use_dlbu;
+ __u32 _pad;
+ union {
+ __u32 plbu_array_address[8];
+ __u32 dlbu_regs[4];
+ };
+ __u32 fragment_stack_address[8];
+};
+
+#define LIMA_PIPE_GP 0x00
+#define LIMA_PIPE_PP 0x01
+
+#define LIMA_SUBMIT_FLAG_EXPLICIT_FENCE (1 << 0)
+
+/**
+ * submit a task to GPU
+ *
+ * User can always merge multi sync_file and drm_syncobj
+ * into one drm_syncobj as in_sync[0], but we reserve
+ * in_sync[1] for another task's out_sync to avoid the
+ * export/import/merge pass when explicit sync.
+ */
+struct drm_lima_gem_submit {
+ __u32 ctx; /* in, context handle task is submitted to */
+ __u32 pipe; /* in, which pipe to use, GP/PP */
+ __u32 nr_bos; /* in, array length of bos field */
+ __u32 frame_size; /* in, size of frame field */
+ __u64 bos; /* in, array of drm_lima_gem_submit_bo */
+ __u64 frame; /* in, GP/PP frame */
+ __u32 flags; /* in, submit flags */
+ __u32 out_sync; /* in, drm_syncobj handle used to wait task finish after submission */
+ __u32 in_sync[2]; /* in, drm_syncobj handle used to wait before start this task */
+};
+
+#define LIMA_GEM_WAIT_READ 0x01
+#define LIMA_GEM_WAIT_WRITE 0x02
+
+/**
+ * wait pending GPU task finish of a buffer
+ */
+struct drm_lima_gem_wait {
+ __u32 handle; /* in, GEM buffer handle */
+ __u32 op; /* in, CPU want to read/write this buffer */
+ __s64 timeout_ns; /* in, wait timeout in absulute time */
+};
+
+/**
+ * create a context
+ */
+struct drm_lima_ctx_create {
+ __u32 id; /* out, context handle */
+ __u32 _pad; /* pad, must be zero */
+};
+
+/**
+ * free a context
+ */
+struct drm_lima_ctx_free {
+ __u32 id; /* in, context handle */
+ __u32 _pad; /* pad, must be zero */
+};
+
+#define DRM_LIMA_GET_PARAM 0x00
+#define DRM_LIMA_GEM_CREATE 0x01
+#define DRM_LIMA_GEM_INFO 0x02
+#define DRM_LIMA_GEM_SUBMIT 0x03
+#define DRM_LIMA_GEM_WAIT 0x04
+#define DRM_LIMA_CTX_CREATE 0x05
+#define DRM_LIMA_CTX_FREE 0x06
+
+#define DRM_IOCTL_LIMA_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GET_PARAM, struct drm_lima_get_param)
+#define DRM_IOCTL_LIMA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_CREATE, struct drm_lima_gem_create)
+#define DRM_IOCTL_LIMA_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_INFO, struct drm_lima_gem_info)
+#define DRM_IOCTL_LIMA_GEM_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_SUBMIT, struct drm_lima_gem_submit)
+#define DRM_IOCTL_LIMA_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_WAIT, struct drm_lima_gem_wait)
+#define DRM_IOCTL_LIMA_CTX_CREATE DRM_IOR(DRM_COMMAND_BASE + DRM_LIMA_CTX_CREATE, struct drm_lima_ctx_create)
+#define DRM_IOCTL_LIMA_CTX_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_CTX_FREE, struct drm_lima_ctx_free)
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __LIMA_DRM_H__ */
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 91a16b333c69..0b85ed6a3710 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -74,6 +74,8 @@ struct drm_msm_timespec {
#define MSM_PARAM_TIMESTAMP 0x05
#define MSM_PARAM_GMEM_BASE 0x06
#define MSM_PARAM_NR_RINGS 0x07
+#define MSM_PARAM_PP_PGTABLE 0x08 /* => 1 for per-process pagetables, else 0 */
+#define MSM_PARAM_FAULTS 0x09
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
@@ -286,6 +288,16 @@ struct drm_msm_submitqueue {
__u32 id; /* out, identifier */
};
+#define MSM_SUBMITQUEUE_PARAM_FAULTS 0
+
+struct drm_msm_submitqueue_query {
+ __u64 data;
+ __u32 id;
+ __u32 param;
+ __u32 len;
+ __u32 pad;
+};
+
#define DRM_MSM_GET_PARAM 0x00
/* placeholder:
#define DRM_MSM_SET_PARAM 0x01
@@ -302,6 +314,7 @@ struct drm_msm_submitqueue {
*/
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
+#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -313,6 +326,7 @@ struct drm_msm_submitqueue {
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
+#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query)
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
new file mode 100644
index 000000000000..a52e0283b90d
--- /dev/null
+++ b/include/uapi/drm/panfrost_drm.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2014-2018 Broadcom
+ * Copyright © 2019 Collabora ltd.
+ */
+#ifndef _PANFROST_DRM_H_
+#define _PANFROST_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#define DRM_PANFROST_SUBMIT 0x00
+#define DRM_PANFROST_WAIT_BO 0x01
+#define DRM_PANFROST_CREATE_BO 0x02
+#define DRM_PANFROST_MMAP_BO 0x03
+#define DRM_PANFROST_GET_PARAM 0x04
+#define DRM_PANFROST_GET_BO_OFFSET 0x05
+
+#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
+#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
+#define DRM_IOCTL_PANFROST_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_CREATE_BO, struct drm_panfrost_create_bo)
+#define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
+#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
+#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
+
+#define PANFROST_JD_REQ_FS (1 << 0)
+/**
+ * struct drm_panfrost_submit - ioctl argument for submitting commands to the 3D
+ * engine.
+ *
+ * This asks the kernel to have the GPU execute a render command list.
+ */
+struct drm_panfrost_submit {
+
+ /** Address to GPU mapping of job descriptor */
+ __u64 jc;
+
+ /** An optional array of sync objects to wait on before starting this job. */
+ __u64 in_syncs;
+
+ /** Number of sync objects to wait on before starting this job. */
+ __u32 in_sync_count;
+
+ /** An optional sync object to place the completion fence in. */
+ __u32 out_sync;
+
+ /** Pointer to a u32 array of the BOs that are referenced by the job. */
+ __u64 bo_handles;
+
+ /** Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ /** A combination of PANFROST_JD_REQ_* */
+ __u32 requirements;
+};
+
+/**
+ * struct drm_panfrost_wait_bo - ioctl argument for waiting for
+ * completion of the last DRM_PANFROST_SUBMIT on a BO.
+ *
+ * This is useful for cases where multiple processes might be
+ * rendering to a BO and you want to wait for all rendering to be
+ * completed.
+ */
+struct drm_panfrost_wait_bo {
+ __u32 handle;
+ __u32 pad;
+ __s64 timeout_ns; /* absolute */
+};
+
+/**
+ * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_panfrost_create_bo {
+ __u32 size;
+ __u32 flags;
+ /** Returned GEM handle for the BO. */
+ __u32 handle;
+ /* Pad, must be zero-filled. */
+ __u32 pad;
+ /**
+ * Returned offset for the BO in the GPU address space. This offset
+ * is private to the DRM fd and is valid for the lifetime of the GEM
+ * handle.
+ *
+ * This offset value will always be nonzero, since various HW
+ * units treat 0 specially.
+ */
+ __u64 offset;
+};
+
+/**
+ * struct drm_panfrost_mmap_bo - ioctl argument for mapping Panfrost BOs.
+ *
+ * This doesn't actually perform an mmap. Instead, it returns the
+ * offset you need to use in an mmap on the DRM device node. This
+ * means that tools like valgrind end up knowing about the mapped
+ * memory.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_panfrost_mmap_bo {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 flags;
+ /** offset into the drm node to use for subsequent mmap call. */
+ __u64 offset;
+};
+
+enum drm_panfrost_param {
+ DRM_PANFROST_PARAM_GPU_PROD_ID,
+};
+
+struct drm_panfrost_get_param {
+ __u32 param;
+ __u32 pad;
+ __u64 value;
+};
+
+/**
+ * Returns the offset for the BO in the GPU address space for this DRM fd.
+ * This is the same value returned by drm_panfrost_create_bo, if that was called
+ * from this DRM fd.
+ */
+struct drm_panfrost_get_bo_offset {
+ __u32 handle;
+ __u32 pad;
+ __u64 offset;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _PANFROST_DRM_H_ */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 5f24b50c9e88..059dc2bedaf6 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -7,5 +7,7 @@ no-export-headers += kvm.h
endif
ifeq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/kvm_para.h),)
+ifeq ($(wildcard $(objtree)/arch/$(SRCARCH)/include/generated/uapi/asm/kvm_para.h),)
no-export-headers += kvm_para.h
endif
+endif
diff --git a/include/uapi/linux/aspeed-p2a-ctrl.h b/include/uapi/linux/aspeed-p2a-ctrl.h
new file mode 100644
index 000000000000..033355552a6e
--- /dev/null
+++ b/include/uapi/linux/aspeed-p2a-ctrl.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright 2019 Google Inc
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Provides a simple driver to control the ASPEED P2A interface which allows
+ * the host to read and write to various regions of the BMC's memory.
+ */
+
+#ifndef _UAPI_LINUX_ASPEED_P2A_CTRL_H
+#define _UAPI_LINUX_ASPEED_P2A_CTRL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define ASPEED_P2A_CTRL_READ_ONLY 0
+#define ASPEED_P2A_CTRL_READWRITE 1
+
+/*
+ * This driver provides a mechanism for enabling or disabling the read-write
+ * property of specific windows into the ASPEED BMC's memory.
+ *
+ * A user can map a region of the BMC's memory as read-only or read-write, with
+ * the caveat that once any region is mapped, all regions are unlocked for
+ * reading.
+ */
+
+/*
+ * Unlock a region of BMC physical memory for access from the host.
+ *
+ * Also used to read back the optional memory-region configuration for the
+ * driver.
+ */
+struct aspeed_p2a_ctrl_mapping {
+ __u64 addr;
+ __u32 length;
+ __u32 flags;
+};
+
+#define __ASPEED_P2A_CTRL_IOCTL_MAGIC 0xb3
+
+/*
+ * This IOCTL is meant to configure a region or regions of memory given a
+ * starting address and length to be readable by the host, or
+ * readable-writeable.
+ */
+#define ASPEED_P2A_CTRL_IOCTL_SET_WINDOW _IOW(__ASPEED_P2A_CTRL_IOCTL_MAGIC, \
+ 0x00, struct aspeed_p2a_ctrl_mapping)
+
+/*
+ * This IOCTL is meant to read back to the user the base address and length of
+ * the memory-region specified to the driver for use with mmap.
+ */
+#define ASPEED_P2A_CTRL_IOCTL_GET_MEMORY_CONFIG \
+ _IOWR(__ASPEED_P2A_CTRL_IOCTL_MAGIC, \
+ 0x01, struct aspeed_p2a_ctrl_mapping)
+
+#endif /* _UAPI_LINUX_ASPEED_P2A_CTRL_H */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index f28acd952d03..a1280af20336 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -114,6 +114,8 @@
#define AUDIT_REPLACE 1329 /* Replace auditd if this packet unanswerd */
#define AUDIT_KERN_MODULE 1330 /* Kernel Module events */
#define AUDIT_FANOTIFY 1331 /* Fanotify access decision */
+#define AUDIT_TIME_INJOFFSET 1332 /* Timekeeping offset injected */
+#define AUDIT_TIME_ADJNTPVAL 1333 /* NTP value adjustment */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
@@ -375,11 +377,19 @@ enum {
#define AUDIT_ARCH_AARCH64 (EM_AARCH64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_ALPHA (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_ARCOMPACT (EM_ARCOMPACT|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_ARCOMPACTBE (EM_ARCOMPACT)
+#define AUDIT_ARCH_ARCV2 (EM_ARCV2|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_ARCV2BE (EM_ARCV2)
#define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_ARMEB (EM_ARM)
+#define AUDIT_ARCH_C6X (EM_TI_C6000|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_C6XBE (EM_TI_C6000)
#define AUDIT_ARCH_CRIS (EM_CRIS|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_CSKY (EM_CSKY|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_FRV (EM_FRV)
+#define AUDIT_ARCH_H8300 (EM_H8_300)
+#define AUDIT_ARCH_HEXAGON (EM_HEXAGON)
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_M32R (EM_M32R)
@@ -393,6 +403,9 @@ enum {
#define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE|\
__AUDIT_ARCH_CONVENTION_MIPS64_N32)
+#define AUDIT_ARCH_NDS32 (EM_NDS32|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_NDS32BE (EM_NDS32)
+#define AUDIT_ARCH_NIOS2 (EM_ALTERA_NIOS2|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_OPENRISC (EM_OPENRISC)
#define AUDIT_ARCH_PARISC (EM_PARISC)
#define AUDIT_ARCH_PARISC64 (EM_PARISC|__AUDIT_ARCH_64BIT)
@@ -413,6 +426,7 @@ enum {
#define AUDIT_ARCH_TILEGX (EM_TILEGX|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_TILEGX32 (EM_TILEGX|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_TILEPRO (EM_TILEPRO|__AUDIT_ARCH_LE)
+#define AUDIT_ARCH_UNICORE (EM_UNICORE|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_XTENSA (EM_XTENSA)
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index c99336f4eefe..4ebc2135e950 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -2,18 +2,6 @@
/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _UAPI_LINUX_BATADV_PACKET_H_
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 305bf316dd03..67f4636758af 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -2,24 +2,6 @@
/* Copyright (C) 2016-2019 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
*/
#ifndef _UAPI_LINUX_BATMAN_ADV_H_
@@ -491,6 +473,13 @@ enum batadv_nl_attrs {
*/
BATADV_ATTR_THROUGHPUT_OVERRIDE,
+ /**
+ * @BATADV_ATTR_MULTICAST_FANOUT: defines the maximum number of packet
+ * copies that may be generated for a multicast-to-unicast conversion.
+ * Once this limit is exceeded distribution will fall back to broadcast.
+ */
+ BATADV_ATTR_MULTICAST_FANOUT,
+
/* add attributes above here, update the policy in netlink.c */
/**
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 3c38ac9a92a7..63e0cf66f01a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -105,6 +105,7 @@ enum bpf_cmd {
BPF_BTF_GET_FD_BY_ID,
BPF_TASK_FD_QUERY,
BPF_MAP_LOOKUP_AND_DELETE_ELEM,
+ BPF_MAP_FREEZE,
};
enum bpf_map_type {
@@ -132,6 +133,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_QUEUE,
BPF_MAP_TYPE_STACK,
+ BPF_MAP_TYPE_SK_STORAGE,
};
/* Note that tracing related programs such as
@@ -166,6 +168,8 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LIRC_MODE2,
BPF_PROG_TYPE_SK_REUSEPORT,
BPF_PROG_TYPE_FLOW_DISSECTOR,
+ BPF_PROG_TYPE_CGROUP_SYSCTL,
+ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
};
enum bpf_attach_type {
@@ -187,6 +191,7 @@ enum bpf_attach_type {
BPF_CGROUP_UDP6_SENDMSG,
BPF_LIRC_MODE2,
BPF_FLOW_DISSECTOR,
+ BPF_CGROUP_SYSCTL,
__MAX_BPF_ATTACH_TYPE
};
@@ -255,8 +260,19 @@ enum bpf_attach_type {
*/
#define BPF_F_ANY_ALIGNMENT (1U << 1)
-/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
+/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
+ * two extensions:
+ *
+ * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE
+ * insn[0].imm: map fd map fd
+ * insn[1].imm: 0 offset into value
+ * insn[0].off: 0 0
+ * insn[1].off: 0 0
+ * ldimm64 rewrite: address of map address of map[0]+offset
+ * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE
+ */
#define BPF_PSEUDO_MAP_FD 1
+#define BPF_PSEUDO_MAP_VALUE 2
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
@@ -283,7 +299,7 @@ enum bpf_attach_type {
#define BPF_OBJ_NAME_LEN 16U
-/* Flags for accessing BPF object */
+/* Flags for accessing BPF object from syscall side. */
#define BPF_F_RDONLY (1U << 3)
#define BPF_F_WRONLY (1U << 4)
@@ -293,6 +309,10 @@ enum bpf_attach_type {
/* Zero-initialize hash function seed. This should only be used for testing. */
#define BPF_F_ZERO_SEED (1U << 6)
+/* Flags for accessing BPF object from program side. */
+#define BPF_F_RDONLY_PROG (1U << 7)
+#define BPF_F_WRONLY_PROG (1U << 8)
+
/* flags for BPF_PROG_QUERY */
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
@@ -396,6 +416,13 @@ union bpf_attr {
__aligned_u64 data_out;
__u32 repeat;
__u32 duration;
+ __u32 ctx_size_in; /* input: len of ctx_in */
+ __u32 ctx_size_out; /* input/output: len of ctx_out
+ * returns ENOSPC if ctx_out
+ * is too small.
+ */
+ __aligned_u64 ctx_in;
+ __aligned_u64 ctx_out;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
@@ -502,16 +529,6 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- * Description
- * Push an element *value* in *map*. *flags* is one of:
- *
- * **BPF_EXIST**
- * If the queue/stack is full, the oldest element is removed to
- * make room for this.
- * Return
- * 0 on success, or a negative error in case of failure.
- *
* int bpf_probe_read(void *dst, u32 size, const void *src)
* Description
* For tracing programs, safely attempt to read *size* bytes from
@@ -612,7 +629,7 @@ union bpf_attr {
* **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\
* **->swhash** and *skb*\ **->l4hash** to 0).
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -637,7 +654,7 @@ union bpf_attr {
* flexibility and can handle sizes larger than 2 or 4 for the
* checksum to update.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -669,7 +686,7 @@ union bpf_attr {
* flexibility and can handle sizes larger than 2 or 4 for the
* checksum to update.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -724,7 +741,7 @@ union bpf_attr {
* efficient, but it is handled through an action code where the
* redirection happens only after the eBPF program has returned.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -789,7 +806,7 @@ union bpf_attr {
* **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to
* be **ETH_P_8021Q**.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -801,7 +818,7 @@ union bpf_attr {
* Description
* Pop a VLAN header from the packet associated to *skb*.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1151,7 +1168,7 @@ union bpf_attr {
* All values for *flags* are reserved for future usage, and must
* be left at zero.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1264,7 +1281,7 @@ union bpf_attr {
* implicitly linearizes, unclones and drops offloads from the
* *skb*.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1300,7 +1317,7 @@ union bpf_attr {
* **bpf_skb_pull_data()** to effectively unclone the *skb* from
* the very beginning in case it is indeed cloned.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1352,7 +1369,7 @@ union bpf_attr {
* All values for *flags* are reserved for future usage, and must
* be left at zero.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1367,7 +1384,7 @@ union bpf_attr {
* can be used to prepare the packet for pushing or popping
* headers.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1435,14 +1452,14 @@ union bpf_attr {
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
- * *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ * *skb*, but gets socket from **struct bpf_sock_addr** context.
* Return
* A 8-byte long non-decreasing number.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
- * *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ * *skb*, but gets socket from **struct bpf_sock_ops** context.
* Return
* A 8-byte long non-decreasing number.
*
@@ -1488,15 +1505,33 @@ union bpf_attr {
* Grow or shrink the room for data in the packet associated to
* *skb* by *len_diff*, and according to the selected *mode*.
*
- * There is a single supported mode at this time:
+ * There are two supported modes at this time:
+ *
+ * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
+ * (room space is added or removed below the layer 2 header).
*
* * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
* (room space is added or removed below the layer 3 header).
*
- * All values for *flags* are reserved for future usage, and must
- * be left at zero.
+ * The following flags are supported at this time:
+ *
+ * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size.
+ * Adjusting mss in this way is not allowed for datagrams.
+ *
+ * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**,
+ * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**:
+ * Any new space is reserved to hold a tunnel header.
+ * Configure skb offsets and other fields accordingly.
+ *
+ * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**,
+ * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**:
+ * Use with ENCAP_L3 flags to further specify the tunnel type.
*
- * A call to this helper is susceptible to change the underlaying
+ * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*):
+ * Use with ENCAP_L3/L4 flags to further specify the tunnel
+ * type; *len* is the length of the inner MAC header.
+ *
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1575,7 +1610,7 @@ union bpf_attr {
* more flexibility as the user is free to store whatever meta
* data they need.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1704,12 +1739,19 @@ union bpf_attr {
* error if an eBPF program tries to set a callback that is not
* supported in the current kernel.
*
- * The supported callback values that *argval* can combine are:
+ * *argval* is a flag array which can combine these flags:
*
* * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out)
* * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission)
* * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change)
*
+ * Therefore, this function can be used to clear a callback flag by
+ * setting the appropriate bit to zero. e.g. to disable the RTO
+ * callback:
+ *
+ * **bpf_sock_ops_cb_flags_set(bpf_sock,**
+ * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)**
+ *
* Here are some examples of where one could call such eBPF
* program:
*
@@ -1810,7 +1852,7 @@ union bpf_attr {
* copied if necessary (i.e. if data was not linear and if start
* and end pointers do not point to the same chunk).
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -1844,7 +1886,7 @@ union bpf_attr {
* only possible to shrink the packet as of this writing,
* therefore *delta* must be a negative integer.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -2019,18 +2061,18 @@ union bpf_attr {
* **BPF_LWT_ENCAP_IP**
* IP encapsulation (GRE/GUE/IPIP/etc). The outer header
* must be IPv4 or IPv6, followed by zero or more
- * additional headers, up to LWT_BPF_MAX_HEADROOM total
- * bytes in all prepended headers. Please note that
- * if skb_is_gso(skb) is true, no more than two headers
- * can be prepended, and the inner header, if present,
- * should be either GRE or UDP/GUE.
- *
- * BPF_LWT_ENCAP_SEG6*** types can be called by bpf programs of
- * type BPF_PROG_TYPE_LWT_IN; BPF_LWT_ENCAP_IP type can be called
- * by bpf programs of types BPF_PROG_TYPE_LWT_IN and
- * BPF_PROG_TYPE_LWT_XMIT.
- *
- * A call to this helper is susceptible to change the underlaying
+ * additional headers, up to **LWT_BPF_MAX_HEADROOM**
+ * total bytes in all prepended headers. Please note that
+ * if **skb_is_gso**\ (*skb*) is true, no more than two
+ * headers can be prepended, and the inner header, if
+ * present, should be either GRE or UDP/GUE.
+ *
+ * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs
+ * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can
+ * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and
+ * **BPF_PROG_TYPE_LWT_XMIT**.
+ *
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -2045,7 +2087,7 @@ union bpf_attr {
* inside the outermost IPv6 Segment Routing Header can be
* modified through this helper.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -2061,7 +2103,7 @@ union bpf_attr {
* after the segments are accepted. *delta* can be as well
* positive (growing) as negative (shrinking).
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -2084,13 +2126,13 @@ union bpf_attr {
* Type of *param*: **int**.
* **SEG6_LOCAL_ACTION_END_B6**
* End.B6 action: Endpoint bound to an SRv6 policy.
- * Type of param: **struct ipv6_sr_hdr**.
+ * Type of *param*: **struct ipv6_sr_hdr**.
* **SEG6_LOCAL_ACTION_END_B6_ENCAP**
* End.B6.Encap action: Endpoint bound to an SRv6
* encapsulation policy.
- * Type of param: **struct ipv6_sr_hdr**.
+ * Type of *param*: **struct ipv6_sr_hdr**.
*
- * A call to this helper is susceptible to change the underlaying
+ * A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
* performed again, if the helper is used in combination with
@@ -2098,52 +2140,52 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
* Description
* This helper is used in programs implementing IR decoding, to
- * report a successfully decoded key press with *scancode*,
- * *toggle* value in the given *protocol*. The scancode will be
- * translated to a keycode using the rc keymap, and reported as
- * an input key down event. After a period a key up event is
- * generated. This period can be extended by calling either
- * **bpf_rc_keydown**\ () again with the same values, or calling
- * **bpf_rc_repeat**\ ().
+ * report a successfully decoded repeat key message. This delays
+ * the generation of a key up event for previously generated
+ * key down event.
*
- * Some protocols include a toggle bit, in case the button was
- * released and pressed again between consecutive scancodes.
+ * Some IR protocols like NEC have a special IR message for
+ * repeating last button, for when a button is held down.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
- * The *protocol* is the decoded protocol number (see
- * **enum rc_proto** for some predefined values).
- *
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
* Description
* This helper is used in programs implementing IR decoding, to
- * report a successfully decoded repeat key message. This delays
- * the generation of a key up event for previously generated
- * key down event.
+ * report a successfully decoded key press with *scancode*,
+ * *toggle* value in the given *protocol*. The scancode will be
+ * translated to a keycode using the rc keymap, and reported as
+ * an input key down event. After a period a key up event is
+ * generated. This period can be extended by calling either
+ * **bpf_rc_keydown**\ () again with the same values, or calling
+ * **bpf_rc_repeat**\ ().
*
- * Some IR protocols like NEC have a special IR message for
- * repeating last button, for when a button is held down.
+ * Some protocols include a toggle bit, in case the button was
+ * released and pressed again between consecutive scancodes.
*
* The *ctx* should point to the lirc sample as passed into
* the program.
*
+ * The *protocol* is the decoded protocol number (see
+ * **enum rc_proto** for some predefined values).
+ *
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
* Return
* 0
*
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
* Description
* Return the cgroup v2 id of the socket associated with the *skb*.
* This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2201,12 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- * Description
- * Return id of cgroup v2 that is ancestor of cgroup associated
- * with the *skb* at the *ancestor_level*. The root cgroup is at
- * *ancestor_level* zero and each step down the hierarchy
- * increments the level. If *ancestor_level* == level of cgroup
- * associated with *skb*, then return value will be same as that
- * of **bpf_skb_cgroup_id**\ ().
- *
- * The helper is useful to implement policies based on cgroups
- * that are upper in hierarchy than immediate cgroup associated
- * with *skb*.
- *
- * The format of returned id and helper limitations are same as in
- * **bpf_skb_cgroup_id**\ ().
- * Return
- * The id is returned or 0 in case the id could not be retrieved.
- *
* u64 bpf_get_current_cgroup_id(void)
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
*
- * void* get_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
* Description
* Get the pointer to the local storage area.
* The type and the size of the local storage is defined
@@ -2209,6 +2233,24 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *skb* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *skb*, then return value will be same as that
+ * of **bpf_skb_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *skb*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_skb_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
* struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
* Look for TCP socket matching *tuple*, optionally in a child
@@ -2243,7 +2285,8 @@ union bpf_attr {
* Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock**
- * result is from **reuse->socks**\ [] using the hash of the tuple.
+ * result is from *reuse*\ **->socks**\ [] using the hash of the
+ * tuple.
*
* struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
* Description
@@ -2279,7 +2322,8 @@ union bpf_attr {
* Return
* Pointer to **struct bpf_sock**, or **NULL** in case of failure.
* For sockets with reuseport option, the **struct bpf_sock**
- * result is from **reuse->socks**\ [] using the hash of the tuple.
+ * result is from *reuse*\ **->socks**\ [] using the hash of the
+ * tuple.
*
* int bpf_sk_release(struct bpf_sock *sock)
* Description
@@ -2289,6 +2333,16 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * Description
+ * Push an element *value* in *map*. *flags* is one of:
+ *
+ * **BPF_EXIST**
+ * If the queue/stack is full, the oldest element is
+ * removed to make room for this.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
* int bpf_map_pop_elem(struct bpf_map *map, void *value)
* Description
* Pop an element from *map*.
@@ -2343,29 +2397,281 @@ union bpf_attr {
* Return
* 0
*
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ * Description
+ * Acquire a spinlock represented by the pointer *lock*, which is
+ * stored as part of a value of a map. Taking the lock allows to
+ * safely update the rest of the fields in that value. The
+ * spinlock can (and must) later be released with a call to
+ * **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ * Spinlocks in BPF programs come with a number of restrictions
+ * and constraints:
+ *
+ * * **bpf_spin_lock** objects are only allowed inside maps of
+ * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ * list could be extended in the future).
+ * * BTF description of the map is mandatory.
+ * * The BPF program can take ONE lock at a time, since taking two
+ * or more could cause dead locks.
+ * * Only one **struct bpf_spin_lock** is allowed per map element.
+ * * When the lock is taken, calls (either BPF to BPF or helpers)
+ * are not allowed.
+ * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ * allowed inside a spinlock-ed region.
+ * * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ * the lock, on all execution paths, before it returns.
+ * * The BPF program can access **struct bpf_spin_lock** only via
+ * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ * helpers. Loading or storing data into the **struct
+ * bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ * * To use the **bpf_spin_lock**\ () helper, the BTF description
+ * of the map value must be a struct and have **struct
+ * bpf_spin_lock** *anyname*\ **;** field at the top level.
+ * Nested lock inside another struct is not allowed.
+ * * The **struct bpf_spin_lock** *lock* field in a map value must
+ * be aligned on a multiple of 4 bytes in that value.
+ * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ * the **bpf_spin_lock** field to user space.
+ * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ * a BPF program, do not update the **bpf_spin_lock** field.
+ * * **bpf_spin_lock** cannot be on the stack or inside a
+ * networking packet (it can only be inside of a map values).
+ * * **bpf_spin_lock** is available to root only.
+ * * Tracing programs and socket filter programs cannot use
+ * **bpf_spin_lock**\ () due to insufficient preemption checks
+ * (but this may change in the future).
+ * * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ * Return
+ * 0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ * Description
+ * Release the *lock* previously locked by a call to
+ * **bpf_spin_lock**\ (\ *lock*\ ).
+ * Return
+ * 0
+ *
* struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
* Description
* This helper gets a **struct bpf_sock** pointer such
- * that all the fields in bpf_sock can be accessed.
+ * that all the fields in this **bpf_sock** can be accessed.
* Return
- * A **struct bpf_sock** pointer on success, or NULL in
+ * A **struct bpf_sock** pointer on success, or **NULL** in
* case of failure.
*
* struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
* Description
* This helper gets a **struct bpf_tcp_sock** pointer from a
* **struct bpf_sock** pointer.
- *
* Return
- * A **struct bpf_tcp_sock** pointer on success, or NULL in
+ * A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
* int bpf_skb_ecn_set_ce(struct sk_buf *skb)
- * Description
- * Sets ECN of IP header to ce (congestion encountered) if
- * current value is ect (ECN capable). Works with IPv6 and IPv4.
- * Return
- * 1 if set, 0 if not set.
+ * Description
+ * Set ECN (Explicit Congestion Notification) field of IP header
+ * to **CE** (Congestion Encountered) if current value is **ECT**
+ * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ * and IPv4.
+ * Return
+ * 1 if the **CE** flag is set (either by the current helper call
+ * or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ * Description
+ * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ * **bpf_sk_release**\ () is unnecessary and not allowed.
+ * Return
+ * A **struct bpf_sock** pointer on success, or **NULL** in
+ * case of failure.
+ *
+ * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
+ * Description
+ * Look for TCP socket matching *tuple*, optionally in a child
+ * network namespace *netns*. The return value must be checked,
+ * and if non-**NULL**, released via **bpf_sk_release**\ ().
+ *
+ * This function is identical to **bpf_sk_lookup_tcp**\ (), except
+ * that it also returns timewait or request sockets. Use
+ * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the
+ * full structure.
+ *
+ * This helper is available only if the kernel was compiled with
+ * **CONFIG_NET** configuration option.
+ * Return
+ * Pointer to **struct bpf_sock**, or **NULL** in case of failure.
+ * For sockets with reuseport option, the **struct bpf_sock**
+ * result is from *reuse*\ **->socks**\ [] using the hash of the
+ * tuple.
+ *
+ * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * Description
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK for
+ * the listening socket in *sk*.
+ *
+ * *iph* points to the start of the IPv4 or IPv6 header, while
+ * *iph_len* contains **sizeof**\ (**struct iphdr**) or
+ * **sizeof**\ (**struct ip6hdr**).
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains **sizeof**\ (**struct tcphdr**).
+ *
+ * Return
+ * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
+ * error otherwise.
+ *
+ * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
+ * Description
+ * Get name of sysctl in /proc/sys/ and copy it into provided by
+ * program buffer *buf* of size *buf_len*.
+ *
+ * The buffer is always NUL terminated, unless it's zero-sized.
+ *
+ * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is
+ * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name
+ * only (e.g. "tcp_mem").
+ * Return
+ * Number of character copied (not including the trailing NUL).
+ *
+ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
+ * truncated name in this case).
+ *
+ * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * Description
+ * Get current value of sysctl as it is presented in /proc/sys
+ * (incl. newline, etc), and copy it as a string into provided
+ * by program buffer *buf* of size *buf_len*.
+ *
+ * The whole value is copied, no matter what file position user
+ * space issued e.g. sys_read at.
+ *
+ * The buffer is always NUL terminated, unless it's zero-sized.
+ * Return
+ * Number of character copied (not including the trailing NUL).
+ *
+ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
+ * truncated name in this case).
+ *
+ * **-EINVAL** if current value was unavailable, e.g. because
+ * sysctl is uninitialized and read returns -EIO for it.
+ *
+ * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * Description
+ * Get new value being written by user space to sysctl (before
+ * the actual write happens) and copy it as a string into
+ * provided by program buffer *buf* of size *buf_len*.
+ *
+ * User space may write new value at file position > 0.
+ *
+ * The buffer is always NUL terminated, unless it's zero-sized.
+ * Return
+ * Number of character copied (not including the trailing NUL).
+ *
+ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain
+ * truncated name in this case).
+ *
+ * **-EINVAL** if sysctl is being read.
+ *
+ * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
+ * Description
+ * Override new value being written by user space to sysctl with
+ * value provided by program in buffer *buf* of size *buf_len*.
+ *
+ * *buf* should contain a string in same form as provided by user
+ * space on sysctl write.
+ *
+ * User space may write new value at file position > 0. To override
+ * the whole sysctl value file position should be set to zero.
+ * Return
+ * 0 on success.
+ *
+ * **-E2BIG** if the *buf_len* is too big.
+ *
+ * **-EINVAL** if sysctl is being read.
+ *
+ * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
+ * Description
+ * Convert the initial part of the string from buffer *buf* of
+ * size *buf_len* to a long integer according to the given base
+ * and save the result in *res*.
+ *
+ * The string may begin with an arbitrary amount of white space
+ * (as determined by **isspace**\ (3)) followed by a single
+ * optional '**-**' sign.
+ *
+ * Five least significant bits of *flags* encode base, other bits
+ * are currently unused.
+ *
+ * Base must be either 8, 10, 16 or 0 to detect it automatically
+ * similar to user space **strtol**\ (3).
+ * Return
+ * Number of characters consumed on success. Must be positive but
+ * no more than *buf_len*.
+ *
+ * **-EINVAL** if no valid digits were found or unsupported base
+ * was provided.
+ *
+ * **-ERANGE** if resulting value was out of range.
+ *
+ * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
+ * Description
+ * Convert the initial part of the string from buffer *buf* of
+ * size *buf_len* to an unsigned long integer according to the
+ * given base and save the result in *res*.
+ *
+ * The string may begin with an arbitrary amount of white space
+ * (as determined by **isspace**\ (3)).
+ *
+ * Five least significant bits of *flags* encode base, other bits
+ * are currently unused.
+ *
+ * Base must be either 8, 10, 16 or 0 to detect it automatically
+ * similar to user space **strtoul**\ (3).
+ * Return
+ * Number of characters consumed on success. Must be positive but
+ * no more than *buf_len*.
+ *
+ * **-EINVAL** if no valid digits were found or unsupported base
+ * was provided.
+ *
+ * **-ERANGE** if resulting value was out of range.
+ *
+ * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
+ * Description
+ * Get a bpf-local-storage from a *sk*.
+ *
+ * Logically, it could be thought of getting the value from
+ * a *map* with *sk* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this
+ * helper enforces the key must be a full socket and the map must
+ * be a **BPF_MAP_TYPE_SK_STORAGE** also.
+ *
+ * Underneath, the value is stored locally at *sk* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf-local-storages residing at *sk*.
+ *
+ * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf-local-storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf-local-storage. If *value* is
+ * **NULL**, the new bpf-local-storage will be zero initialized.
+ * Return
+ * A bpf-local-storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf-local-storage.
+ *
+ * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * Description
+ * Delete a bpf-local-storage from a *sk*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf-local-storage cannot be found.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2465,7 +2771,18 @@ union bpf_attr {
FN(spin_unlock), \
FN(sk_fullsock), \
FN(tcp_sock), \
- FN(skb_ecn_set_ce),
+ FN(skb_ecn_set_ce), \
+ FN(get_listener_sock), \
+ FN(skc_lookup_tcp), \
+ FN(tcp_check_syncookie), \
+ FN(sysctl_get_name), \
+ FN(sysctl_get_current_value), \
+ FN(sysctl_get_new_value), \
+ FN(sysctl_set_new_value), \
+ FN(strtol), \
+ FN(strtoul), \
+ FN(sk_storage_get), \
+ FN(sk_storage_delete),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2524,9 +2841,30 @@ enum bpf_func_id {
/* Current network namespace */
#define BPF_F_CURRENT_NETNS (-1L)
+/* BPF_FUNC_skb_adjust_room flags. */
+#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0)
+
+#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff
+#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56
+
+#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1)
+#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2)
+#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3)
+#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4)
+#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \
+ BPF_ADJ_ROOM_ENCAP_L2_MASK) \
+ << BPF_ADJ_ROOM_ENCAP_L2_SHIFT)
+
+/* BPF_FUNC_sysctl_get_name flags. */
+#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0)
+
+/* BPF_FUNC_sk_storage_get flags */
+#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0)
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
+ BPF_ADJ_ROOM_MAC,
};
/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */
@@ -3152,4 +3490,14 @@ struct bpf_line_info {
struct bpf_spin_lock {
__u32 val;
};
+
+struct bpf_sysctl {
+ __u32 write; /* Sysctl is being read (= 0) or written (= 1).
+ * Allows 1,2,4-byte read, but no write.
+ */
+ __u32 file_pos; /* Sysctl file position to read from, write to.
+ * Allows 1,2,4-byte read an 4-byte write.
+ */
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 7b7475ef2f17..9310652ca4f9 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -39,11 +39,11 @@ struct btf_type {
* struct, union and fwd
*/
__u32 info;
- /* "size" is used by INT, ENUM, STRUCT and UNION.
+ /* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
- * FUNC and FUNC_PROTO.
+ * FUNC, FUNC_PROTO and VAR.
* "type" is a type_id referring to another type.
*/
union {
@@ -70,8 +70,10 @@ struct btf_type {
#define BTF_KIND_RESTRICT 11 /* Restrict */
#define BTF_KIND_FUNC 12 /* Function */
#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
-#define BTF_KIND_MAX 13
-#define NR_BTF_KINDS 14
+#define BTF_KIND_VAR 14 /* Variable */
+#define BTF_KIND_DATASEC 15 /* Section */
+#define BTF_KIND_MAX BTF_KIND_DATASEC
+#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
@@ -138,4 +140,26 @@ struct btf_param {
__u32 type;
};
+enum {
+ BTF_VAR_STATIC = 0,
+ BTF_VAR_GLOBAL_ALLOCATED,
+};
+
+/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe
+ * additional information related to the variable such as its linkage.
+ */
+struct btf_var {
+ __u32 linkage;
+};
+
+/* BTF_KIND_DATASEC is followed by multiple "struct btf_var_secinfo"
+ * to describe all BTF_KIND_VAR types it contains along with it's
+ * in-section offset as well as size.
+ */
+struct btf_var_secinfo {
+ __u32 type;
+ __u32 offset;
+ __u32 size;
+};
+
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index e974f4bb5378..421239b98db2 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -307,6 +307,8 @@
*
* Used by:
* struct btrfs_dir_item.type
+ *
+ * Values 0..7 must match common file type values in fs_types.h.
*/
#define BTRFS_FT_UNKNOWN 0
#define BTRFS_FT_REG_FILE 1
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index 0c3000faedba..f47e853546fa 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -34,14 +34,20 @@
#define EM_M32R 88 /* Renesas M32R */
#define EM_MN10300 89 /* Panasonic/MEI MN10300, AM33 */
#define EM_OPENRISC 92 /* OpenRISC 32-bit embedded processor */
+#define EM_ARCOMPACT 93 /* ARCompact processor */
#define EM_XTENSA 94 /* Tensilica Xtensa Architecture */
#define EM_BLACKFIN 106 /* ADI Blackfin Processor */
+#define EM_UNICORE 110 /* UniCore-32 */
#define EM_ALTERA_NIOS2 113 /* Altera Nios II soft-core processor */
#define EM_TI_C6000 140 /* TI C6X DSPs */
+#define EM_HEXAGON 164 /* QUALCOMM Hexagon */
+#define EM_NDS32 167 /* Andes Technology compact code size
+ embedded RISC processor family */
#define EM_AARCH64 183 /* ARM 64 bit */
#define EM_TILEPRO 188 /* Tilera TILEPro */
#define EM_MICROBLAZE 189 /* Xilinx MicroBlaze */
#define EM_TILEGX 191 /* Tilera TILE-Gx */
+#define EM_ARCV2 195 /* ARCv2 Cores */
#define EM_RISCV 243 /* RISC-V */
#define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */
#define EM_CSKY 252 /* C-SKY */
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 3652b239dad1..3534ce157ae9 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -252,9 +252,17 @@ struct ethtool_tunable {
#define DOWNSHIFT_DEV_DEFAULT_COUNT 0xff
#define DOWNSHIFT_DEV_DISABLE 0
+/* Time in msecs after which link is reported as down
+ * 0 = lowest time supported by the PHY
+ * 0xff = off, link down detection according to standard
+ */
+#define ETHTOOL_PHY_FAST_LINK_DOWN_ON 0
+#define ETHTOOL_PHY_FAST_LINK_DOWN_OFF 0xff
+
enum phy_tunable_id {
ETHTOOL_PHY_ID_UNSPEC,
ETHTOOL_PHY_DOWNSHIFT,
+ ETHTOOL_PHY_FAST_LINK_DOWN,
/*
* Add your fresh new phy tunable attribute above and remember to update
* phy_tunable_strings[] in net/core/ethtool.c
@@ -1591,7 +1599,7 @@ enum ethtool_link_mode_bit_indices {
static inline int ethtool_validate_speed(__u32 speed)
{
- return speed <= INT_MAX || speed == SPEED_UNKNOWN;
+ return speed <= INT_MAX || speed == (__u32)SPEED_UNKNOWN;
}
/* Duplex, half or full. */
@@ -1704,6 +1712,9 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define ETH_MODULE_SFF_8436 0x4
#define ETH_MODULE_SFF_8436_LEN 256
+#define ETH_MODULE_SFF_8636_MAX_LEN 640
+#define ETH_MODULE_SFF_8436_MAX_LEN 640
+
/* Reset flags */
/* The reset() operation must clear the flags for the components which
* were actually reset. On successful return, the flags indicate the
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index a2f8658f1c55..1d338357df8a 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -91,5 +91,7 @@
#define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */
#define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */
+#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
+
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h
index f2ea833a2812..87c2c9f08803 100644
--- a/include/uapi/linux/fou.h
+++ b/include/uapi/linux/fou.h
@@ -16,6 +16,12 @@ enum {
FOU_ATTR_IPPROTO, /* u8 */
FOU_ATTR_TYPE, /* u8 */
FOU_ATTR_REMCSUM_NOPARTIAL, /* flag */
+ FOU_ATTR_LOCAL_V4, /* u32 */
+ FOU_ATTR_LOCAL_V6, /* in6_addr */
+ FOU_ATTR_PEER_V4, /* u32 */
+ FOU_ATTR_PEER_V6, /* in6_addr */
+ FOU_ATTR_PEER_PORT, /* u16 */
+ FOU_ATTR_IFINDEX, /* s32 */
__FOU_ATTR_MAX,
};
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 121e82ce296b..59c71fa8c553 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -320,6 +320,9 @@ struct fscrypt_key {
#define SYNC_FILE_RANGE_WAIT_BEFORE 1
#define SYNC_FILE_RANGE_WRITE 2
#define SYNC_FILE_RANGE_WAIT_AFTER 4
+#define SYNC_FILE_RANGE_WRITE_AND_WAIT (SYNC_FILE_RANGE_WRITE | \
+ SYNC_FILE_RANGE_WAIT_BEFORE | \
+ SYNC_FILE_RANGE_WAIT_AFTER)
/*
* Flags for preadv2/pwritev2:
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 2ac598614a8f..19fb55e3c73e 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -44,6 +44,7 @@
* - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in
* - add blksize field to fuse_attr
* - add file flags field to fuse_read_in and fuse_write_in
+ * - Add ATIME_NOW and MTIME_NOW flags to fuse_setattr_in
*
* 7.10
* - add nonseekable open flag
@@ -54,7 +55,7 @@
* - add POLL message and NOTIFY_POLL notification
*
* 7.12
- * - add umask flag to input argument of open, mknod and mkdir
+ * - add umask flag to input argument of create, mknod and mkdir
* - add notification messages for invalidation of inodes and
* directory entries
*
@@ -125,6 +126,10 @@
*
* 7.29
* - add FUSE_NO_OPENDIR_SUPPORT flag
+ *
+ * 7.30
+ * - add FUSE_EXPLICIT_INVAL_DATA
+ * - add FUSE_IOCTL_COMPAT_X32
*/
#ifndef _LINUX_FUSE_H
@@ -160,7 +165,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 29
+#define FUSE_KERNEL_MINOR_VERSION 30
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -229,11 +234,13 @@ struct fuse_file_lock {
* FOPEN_KEEP_CACHE: don't invalidate the data cache on open
* FOPEN_NONSEEKABLE: the file is not seekable
* FOPEN_CACHE_DIR: allow caching this directory
+ * FOPEN_STREAM: the file is stream-like (no file position at all)
*/
#define FOPEN_DIRECT_IO (1 << 0)
#define FOPEN_KEEP_CACHE (1 << 1)
#define FOPEN_NONSEEKABLE (1 << 2)
#define FOPEN_CACHE_DIR (1 << 3)
+#define FOPEN_STREAM (1 << 4)
/**
* INIT request/reply flags
@@ -263,6 +270,7 @@ struct fuse_file_lock {
* FUSE_MAX_PAGES: init_out.max_pages contains the max number of req pages
* FUSE_CACHE_SYMLINKS: cache READLINK responses
* FUSE_NO_OPENDIR_SUPPORT: kernel supports zero-message opendir
+ * FUSE_EXPLICIT_INVAL_DATA: only invalidate cached pages on explicit request
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -289,6 +297,7 @@ struct fuse_file_lock {
#define FUSE_MAX_PAGES (1 << 22)
#define FUSE_CACHE_SYMLINKS (1 << 23)
#define FUSE_NO_OPENDIR_SUPPORT (1 << 24)
+#define FUSE_EXPLICIT_INVAL_DATA (1 << 25)
/**
* CUSE INIT request/reply flags
@@ -335,6 +344,7 @@ struct fuse_file_lock {
* FUSE_IOCTL_RETRY: retry with new iovecs
* FUSE_IOCTL_32BIT: 32bit ioctl
* FUSE_IOCTL_DIR: is a directory
+ * FUSE_IOCTL_COMPAT_X32: x32 compat ioctl on 64bit machine (64bit time_t)
*
* FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs
*/
@@ -343,6 +353,7 @@ struct fuse_file_lock {
#define FUSE_IOCTL_RETRY (1 << 2)
#define FUSE_IOCTL_32BIT (1 << 3)
#define FUSE_IOCTL_DIR (1 << 4)
+#define FUSE_IOCTL_COMPAT_X32 (1 << 5)
#define FUSE_IOCTL_MAX_IOV 256
@@ -353,6 +364,13 @@ struct fuse_file_lock {
*/
#define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0)
+/**
+ * Fsync flags
+ *
+ * FUSE_FSYNC_FDATASYNC: Sync data only, not metadata
+ */
+#define FUSE_FSYNC_FDATASYNC (1 << 0)
+
enum fuse_opcode {
FUSE_LOOKUP = 1,
FUSE_FORGET = 2, /* no reply */
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index 325395f56bfa..2622b5a3e616 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -90,6 +90,8 @@ struct icmp6hdr {
#define ICMPV6_TIME_EXCEED 3
#define ICMPV6_PARAMPROB 4
+#define ICMPV6_ERRMSG_MAX 127
+
#define ICMPV6_INFOMSG_MASK 0x80
#define ICMPV6_ECHO_REQUEST 128
@@ -110,6 +112,8 @@ struct icmp6hdr {
#define ICMPV6_MRDISC_ADV 151
+#define ICMPV6_MSG_MAX 255
+
/*
* Codes for Destination Unreachable
*/
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 3a45b4ad71a3..3158ba672b72 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -109,6 +109,7 @@
#define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
+#define ETH_P_DSA_8021Q 0xDADB /* Fake VLAN Header for DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */
#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 23a6753b37df..454ae31b93c7 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -60,6 +60,7 @@
#define TUNSETSTEERINGEBPF _IOR('T', 224, int)
#define TUNSETFILTEREBPF _IOR('T', 225, int)
#define TUNSETCARRIER _IOW('T', 226, int)
+#define TUNGETDEVNETNS _IO('T', 227)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
diff --git a/include/uapi/linux/if_vlan.h b/include/uapi/linux/if_vlan.h
index 7a0e8bd65b6b..90a2c89afc8f 100644
--- a/include/uapi/linux/if_vlan.h
+++ b/include/uapi/linux/if_vlan.h
@@ -32,10 +32,11 @@ enum vlan_ioctl_cmds {
};
enum vlan_flags {
- VLAN_FLAG_REORDER_HDR = 0x1,
- VLAN_FLAG_GVRP = 0x2,
- VLAN_FLAG_LOOSE_BINDING = 0x4,
- VLAN_FLAG_MVRP = 0x8,
+ VLAN_FLAG_REORDER_HDR = 0x1,
+ VLAN_FLAG_GVRP = 0x2,
+ VLAN_FLAG_LOOSE_BINDING = 0x4,
+ VLAN_FLAG_MVRP = 0x8,
+ VLAN_FLAG_BRIDGE_BINDING = 0x10,
};
enum vlan_name_types {
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 7f14d4a66c28..85387c76c24f 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -439,10 +439,12 @@
#define KEY_TITLE 0x171
#define KEY_SUBTITLE 0x172
#define KEY_ANGLE 0x173
-#define KEY_ZOOM 0x174
+#define KEY_FULL_SCREEN 0x174 /* AC View Toggle */
+#define KEY_ZOOM KEY_FULL_SCREEN
#define KEY_MODE 0x175
#define KEY_KEYBOARD 0x176
-#define KEY_SCREEN 0x177
+#define KEY_ASPECT_RATIO 0x177 /* HUTRR37: Aspect */
+#define KEY_SCREEN KEY_ASPECT_RATIO
#define KEY_PC 0x178 /* Media Select Computer */
#define KEY_TV 0x179 /* Media Select TV */
#define KEY_TV2 0x17a /* Media Select Cable */
@@ -604,6 +606,7 @@
#define KEY_SCREENSAVER 0x245 /* AL Screen Saver */
#define KEY_VOICECOMMAND 0x246 /* Listening Voice Command */
#define KEY_ASSISTANT 0x247 /* AL Context-aware desktop assistant */
+#define KEY_KBD_LAYOUT_NEXT 0x248 /* AC Next Keyboard Layout Select */
#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index e23408692118..a0c460025036 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -26,6 +26,7 @@ struct io_uring_sqe {
__kernel_rwf_t rw_flags;
__u32 fsync_flags;
__u16 poll_events;
+ __u32 sync_range_flags;
};
__u64 user_data; /* data to be passed back at completion time */
union {
@@ -38,6 +39,7 @@ struct io_uring_sqe {
* sqe->flags
*/
#define IOSQE_FIXED_FILE (1U << 0) /* use fixed fileset */
+#define IOSQE_IO_DRAIN (1U << 1) /* issue after inflight IO */
/*
* io_uring_setup() flags
@@ -54,6 +56,7 @@ struct io_uring_sqe {
#define IORING_OP_WRITE_FIXED 5
#define IORING_OP_POLL_ADD 6
#define IORING_OP_POLL_REMOVE 7
+#define IORING_OP_SYNC_FILE_RANGE 8
/*
* sqe->fsync_flags
@@ -133,5 +136,7 @@ struct io_uring_params {
#define IORING_UNREGISTER_BUFFERS 1
#define IORING_REGISTER_FILES 2
#define IORING_UNREGISTER_FILES 3
+#define IORING_REGISTER_EVENTFD 4
+#define IORING_UNREGISTER_EVENTFD 5
#endif
diff --git a/include/uapi/linux/ip_vs.h b/include/uapi/linux/ip_vs.h
index 1c916b2f89dc..e34f436fc79d 100644
--- a/include/uapi/linux/ip_vs.h
+++ b/include/uapi/linux/ip_vs.h
@@ -124,6 +124,13 @@
#define IP_VS_PEDATA_MAXLEN 255
+/* Tunnel types */
+enum {
+ IP_VS_CONN_F_TUNNEL_TYPE_IPIP = 0, /* IPIP */
+ IP_VS_CONN_F_TUNNEL_TYPE_GUE, /* GUE */
+ IP_VS_CONN_F_TUNNEL_TYPE_MAX,
+};
+
/*
* The struct ip_vs_service_user and struct ip_vs_dest_user are
* used to set IPVS rules through setsockopt.
@@ -392,6 +399,10 @@ enum {
IPVS_DEST_ATTR_STATS64, /* nested attribute for dest stats */
+ IPVS_DEST_ATTR_TUN_TYPE, /* tunnel type */
+
+ IPVS_DEST_ATTR_TUN_PORT, /* tunnel port */
+
__IPVS_DEST_ATTR_MAX,
};
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index e622fd1fbd46..dc067ed0b72d 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -211,6 +211,11 @@ struct kfd_ioctl_dbg_wave_control_args {
#define KFD_HW_EXCEPTION_GPU_HANG 0
#define KFD_HW_EXCEPTION_ECC 1
+/* For kfd_hsa_memory_exception_data.ErrorType */
+#define KFD_MEM_ERR_NO_RAS 0
+#define KFD_MEM_ERR_SRAM_ECC 1
+#define KFD_MEM_ERR_POISON_CONSUMED 2
+#define KFD_MEM_ERR_GPU_HANG 3
struct kfd_ioctl_create_event_args {
__u64 event_page_offset; /* from KFD */
@@ -250,7 +255,12 @@ struct kfd_hsa_memory_exception_data {
struct kfd_memory_exception_failure failure;
__u64 va;
__u32 gpu_id;
- __u32 pad;
+ __u32 ErrorType; /* 0 = no RAS error,
+ * 1 = ECC_SRAM,
+ * 2 = Link_SYNFLOOD (poison),
+ * 3 = GPU hang (not attributable to a specific cause),
+ * other values reserved
+ */
};
/* hw exception data */
diff --git a/include/uapi/linux/lirc.h b/include/uapi/linux/lirc.h
index 45fcbf99d72e..f99d9dcae667 100644
--- a/include/uapi/linux/lirc.h
+++ b/include/uapi/linux/lirc.h
@@ -195,6 +195,7 @@ struct lirc_scancode {
* @RC_PROTO_RCMM12: RC-MM protocol 12 bits
* @RC_PROTO_RCMM24: RC-MM protocol 24 bits
* @RC_PROTO_RCMM32: RC-MM protocol 32 bits
+ * @RC_PROTO_XBOX_DVD: Xbox DVD Movie Playback Kit protocol
*/
enum rc_proto {
RC_PROTO_UNKNOWN = 0,
@@ -224,6 +225,7 @@ enum rc_proto {
RC_PROTO_RCMM12 = 24,
RC_PROTO_RCMM24 = 25,
RC_PROTO_RCMM32 = 26,
+ RC_PROTO_XBOX_DVD = 27,
};
#endif
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index d6a5a3bfe6c4..2a6b253cfb05 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -34,7 +34,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x101b */
+/* RGB - next is 0x101c */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -50,6 +50,7 @@
#define MEDIA_BUS_FMT_RGB666_1X24_CPADHI 0x1015
#define MEDIA_BUS_FMT_RGB666_1X7X3_SPWG 0x1010
#define MEDIA_BUS_FMT_BGR888_1X24 0x1013
+#define MEDIA_BUS_FMT_BGR888_3X8 0x101b
#define MEDIA_BUS_FMT_GBR888_1X24 0x1014
#define MEDIA_BUS_FMT_RGB888_1X24 0x100a
#define MEDIA_BUS_FMT_RGB888_2X12_BE 0x100b
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index e5d0c5c611b5..9aedb187bc48 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -262,6 +262,11 @@ struct media_links_enum {
#define MEDIA_INTF_T_V4L_SWRADIO (MEDIA_INTF_T_V4L_BASE + 4)
#define MEDIA_INTF_T_V4L_TOUCH (MEDIA_INTF_T_V4L_BASE + 5)
+#define MEDIA_INTF_T_ALSA_BASE 0x00000300
+#define MEDIA_INTF_T_ALSA_PCM_CAPTURE (MEDIA_INTF_T_ALSA_BASE)
+#define MEDIA_INTF_T_ALSA_PCM_PLAYBACK (MEDIA_INTF_T_ALSA_BASE + 1)
+#define MEDIA_INTF_T_ALSA_CONTROL (MEDIA_INTF_T_ALSA_BASE + 2)
+
#if defined(__KERNEL__)
/*
@@ -413,19 +418,19 @@ struct media_v2_topology {
#define MEDIA_ENT_F_DTV_DECODER MEDIA_ENT_F_DV_DECODER
/*
- * There is still no ALSA support in the media controller. These
+ * There is still no full ALSA support in the media controller. These
* defines should not have been added and we leave them here only
* in case some application tries to use these defines.
+ *
+ * The ALSA defines that are in use have been moved into __KERNEL__
+ * scope. As support gets added to these interface types, they should
+ * be moved into __KERNEL__ scope with the code that uses them.
*/
-#define MEDIA_INTF_T_ALSA_BASE 0x00000300
-#define MEDIA_INTF_T_ALSA_PCM_CAPTURE (MEDIA_INTF_T_ALSA_BASE)
-#define MEDIA_INTF_T_ALSA_PCM_PLAYBACK (MEDIA_INTF_T_ALSA_BASE + 1)
-#define MEDIA_INTF_T_ALSA_CONTROL (MEDIA_INTF_T_ALSA_BASE + 2)
-#define MEDIA_INTF_T_ALSA_COMPRESS (MEDIA_INTF_T_ALSA_BASE + 3)
-#define MEDIA_INTF_T_ALSA_RAWMIDI (MEDIA_INTF_T_ALSA_BASE + 4)
-#define MEDIA_INTF_T_ALSA_HWDEP (MEDIA_INTF_T_ALSA_BASE + 5)
-#define MEDIA_INTF_T_ALSA_SEQUENCER (MEDIA_INTF_T_ALSA_BASE + 6)
-#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
+#define MEDIA_INTF_T_ALSA_COMPRESS (MEDIA_INTF_T_ALSA_BASE + 3)
+#define MEDIA_INTF_T_ALSA_RAWMIDI (MEDIA_INTF_T_ALSA_BASE + 4)
+#define MEDIA_INTF_T_ALSA_HWDEP (MEDIA_INTF_T_ALSA_BASE + 5)
+#define MEDIA_INTF_T_ALSA_SEQUENCER (MEDIA_INTF_T_ALSA_BASE + 6)
+#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
/* Obsolete symbol for media_version, no longer used in the kernel */
#define MEDIA_API_VERSION ((0 << 16) | (1 << 8) | 0)
diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h
index 0f681cbd38d3..c6aec86cc5de 100644
--- a/include/uapi/linux/mei.h
+++ b/include/uapi/linux/mei.h
@@ -1,70 +1,9 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
-/******************************************************************************
+/*
+ * Copyright(c) 2003-2015 Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
* Intel MEI Interface Header
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Corporation.
- * linux-mei@linux.intel.com
- * http://www.intel.com
- *
- * BSD LICENSE
- *
- * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
+ */
#ifndef _LINUX_MEI_H
#define _LINUX_MEI_H
diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h
index 3f9ec42510b0..96a0240f23fe 100644
--- a/include/uapi/linux/mount.h
+++ b/include/uapi/linux/mount.h
@@ -55,4 +55,66 @@
#define MS_MGC_VAL 0xC0ED0000
#define MS_MGC_MSK 0xffff0000
+/*
+ * open_tree() flags.
+ */
+#define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */
+#define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */
+
+/*
+ * move_mount() flags.
+ */
+#define MOVE_MOUNT_F_SYMLINKS 0x00000001 /* Follow symlinks on from path */
+#define MOVE_MOUNT_F_AUTOMOUNTS 0x00000002 /* Follow automounts on from path */
+#define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */
+#define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */
+#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */
+#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
+#define MOVE_MOUNT__MASK 0x00000077
+
+/*
+ * fsopen() flags.
+ */
+#define FSOPEN_CLOEXEC 0x00000001
+
+/*
+ * fspick() flags.
+ */
+#define FSPICK_CLOEXEC 0x00000001
+#define FSPICK_SYMLINK_NOFOLLOW 0x00000002
+#define FSPICK_NO_AUTOMOUNT 0x00000004
+#define FSPICK_EMPTY_PATH 0x00000008
+
+/*
+ * The type of fsconfig() call made.
+ */
+enum fsconfig_command {
+ FSCONFIG_SET_FLAG = 0, /* Set parameter, supplying no value */
+ FSCONFIG_SET_STRING = 1, /* Set parameter, supplying a string value */
+ FSCONFIG_SET_BINARY = 2, /* Set parameter, supplying a binary blob value */
+ FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */
+ FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */
+ FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */
+ FSCONFIG_CMD_CREATE = 6, /* Invoke superblock creation */
+ FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */
+};
+
+/*
+ * fsmount() flags.
+ */
+#define FSMOUNT_CLOEXEC 0x00000001
+
+/*
+ * Mount attributes.
+ */
+#define MOUNT_ATTR_RDONLY 0x00000001 /* Mount read-only */
+#define MOUNT_ATTR_NOSUID 0x00000002 /* Ignore suid and sgid bits */
+#define MOUNT_ATTR_NODEV 0x00000004 /* Disallow access to device special files */
+#define MOUNT_ATTR_NOEXEC 0x00000008 /* Disallow program execution */
+#define MOUNT_ATTR__ATIME 0x00000070 /* Setting on how atime should be updated */
+#define MOUNT_ATTR_RELATIME 0x00000000 /* - Update atime relative to mtime/ctime. */
+#define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */
+#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */
+#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */
+
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index a66c8de006cc..505393c6e959 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -966,7 +966,7 @@ enum nft_socket_keys {
* @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address)
* @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address)
* @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address)
- * @NFT_CT_TIMEOUT: connection tracking timeout policy assigned to conntrack
+ * @NFT_CT_ID: conntrack id
*/
enum nft_ct_keys {
NFT_CT_STATE,
@@ -992,7 +992,7 @@ enum nft_ct_keys {
NFT_CT_DST_IP,
NFT_CT_SRC_IP6,
NFT_CT_DST_IP6,
- NFT_CT_TIMEOUT,
+ NFT_CT_ID,
__NFT_CT_MAX
};
#define NFT_CT_MAX (__NFT_CT_MAX - 1)
@@ -1136,7 +1136,7 @@ enum nft_log_level {
NFT_LOGLEVEL_AUDIT,
__NFT_LOGLEVEL_MAX
};
-#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX + 1)
+#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX - 1)
/**
* enum nft_queue_attributes - nf_tables queue expression netlink attributes
@@ -1522,15 +1522,21 @@ enum nft_flowtable_hook_attributes {
*
* @NFTA_OSF_DREG: destination register (NLA_U32: nft_registers)
* @NFTA_OSF_TTL: Value of the TTL osf option (NLA_U8)
+ * @NFTA_OSF_FLAGS: flags (NLA_U32)
*/
enum nft_osf_attributes {
NFTA_OSF_UNSPEC,
NFTA_OSF_DREG,
NFTA_OSF_TTL,
+ NFTA_OSF_FLAGS,
__NFTA_OSF_MAX,
};
#define NFTA_OSF_MAX (__NFTA_OSF_MAX - 1)
+enum nft_osf_flags {
+ NFT_OSF_F_VERSION = (1 << 0),
+};
+
/**
* enum nft_device_attributes - nf_tables device netlink attributes
*
diff --git a/include/uapi/linux/nfs_mount.h b/include/uapi/linux/nfs_mount.h
index e44e00616ab5..e3bcfc6aa3b0 100644
--- a/include/uapi/linux/nfs_mount.h
+++ b/include/uapi/linux/nfs_mount.h
@@ -66,13 +66,4 @@ struct nfs_mount_data {
#define NFS_MOUNT_UNSHARED 0x8000 /* 5 */
#define NFS_MOUNT_FLAGMASK 0xFFFF
-/* The following are for internal use only */
-#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
-#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000
-#define NFS_MOUNT_NORESVPORT 0x40000
-#define NFS_MOUNT_LEGACY_INTERFACE 0x80000
-
-#define NFS_MOUNT_LOCAL_FLOCK 0x100000
-#define NFS_MOUNT_LOCAL_FCNTL 0x200000
-
#endif
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index dd4f86ee286e..6f09d1500960 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -11,7 +11,7 @@
* Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
* Copyright 2008 Colin McCabe <colin@cozybit.com>
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -1065,6 +1065,26 @@
* indicated by %NL80211_ATTR_WIPHY_FREQ and other attributes
* determining the width and type.
*
+ * @NL80211_CMD_UPDATE_OWE_INFO: This interface allows the host driver to
+ * offload OWE processing to user space. This intends to support
+ * OWE AKM by the host drivers that implement SME but rely
+ * on the user space for the cryptographic/DH IE processing in AP mode.
+ *
+ * @NL80211_CMD_PROBE_MESH_LINK: The requirement for mesh link metric
+ * refreshing, is that from one mesh point we be able to send some data
+ * frames to other mesh points which are not currently selected as a
+ * primary traffic path, but which are only 1 hop away. The absence of
+ * the primary path to the chosen node makes it necessary to apply some
+ * form of marking on a chosen packet stream so that the packets can be
+ * properly steered to the selected node for testing, and not by the
+ * regular mesh path lookup. Further, the packets must be of type data
+ * so that the rate control (often embedded in firmware) is used for
+ * rate selection.
+ *
+ * Here attribute %NL80211_ATTR_MAC is used to specify connected mesh
+ * peer MAC address and %NL80211_ATTR_FRAME is used to specify the frame
+ * content. The frame is ethernet data.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -1285,6 +1305,10 @@ enum nl80211_commands {
NL80211_CMD_NOTIFY_RADAR,
+ NL80211_CMD_UPDATE_OWE_INFO,
+
+ NL80211_CMD_PROBE_MESH_LINK,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -2308,6 +2332,15 @@ enum nl80211_commands {
* @NL80211_ATTR_AIRTIME_WEIGHT: Station's weight when scheduled by the airtime
* scheduler.
*
+ * @NL80211_ATTR_STA_TX_POWER_SETTING: Transmit power setting type (u8) for
+ * station associated with the AP. See &enum nl80211_tx_power_setting for
+ * possible values.
+ * @NL80211_ATTR_STA_TX_POWER: Transmit power level (s16) in dBm units. This
+ * allows to set Tx power for a station. If this attribute is not included,
+ * the default per-interface tx power setting will be overriding. Driver
+ * should be picking up the lowest tx power, either tx power per-interface
+ * or per-station.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2758,6 +2791,8 @@ enum nl80211_attrs {
NL80211_ATTR_PEER_MEASUREMENTS,
NL80211_ATTR_AIRTIME_WEIGHT,
+ NL80211_ATTR_STA_TX_POWER_SETTING,
+ NL80211_ATTR_STA_TX_POWER,
/* add attributes here, update the policy in nl80211.c */
@@ -2802,7 +2837,7 @@ enum nl80211_attrs {
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_HT_RATES 77
-#define NL80211_MAX_SUPP_REG_RULES 64
+#define NL80211_MAX_SUPP_REG_RULES 128
#define NL80211_TKIP_DATA_OFFSET_ENCR_KEY 0
#define NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY 16
#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
@@ -3139,6 +3174,7 @@ enum nl80211_sta_bss_param {
* @NL80211_STA_INFO_TX_DURATION: aggregate PPDU duration for all frames
* sent to the station (u64, usec)
* @NL80211_STA_INFO_AIRTIME_WEIGHT: current airtime weight for station (u16)
+ * @NL80211_STA_INFO_AIRTIME_LINK_METRIC: airtime link metric for mesh station
* @__NL80211_STA_INFO_AFTER_LAST: internal
* @NL80211_STA_INFO_MAX: highest possible station info attribute
*/
@@ -3184,6 +3220,7 @@ enum nl80211_sta_info {
NL80211_STA_INFO_CONNECTED_TO_GATE,
NL80211_STA_INFO_TX_DURATION,
NL80211_STA_INFO_AIRTIME_WEIGHT,
+ NL80211_STA_INFO_AIRTIME_LINK_METRIC,
/* keep last */
__NL80211_STA_INFO_AFTER_LAST,
@@ -3638,6 +3675,14 @@ enum nl80211_reg_rule_attr {
* value as specified by &struct nl80211_bss_select_rssi_adjust.
* @NL80211_SCHED_SCAN_MATCH_ATTR_BSSID: BSSID to be used for matching
* (this cannot be used together with SSID).
+ * @NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI: Nested attribute that carries the
+ * band specific minimum rssi thresholds for the bands defined in
+ * enum nl80211_band. The minimum rssi threshold value(s32) specific to a
+ * band shall be encapsulated in attribute with type value equals to one
+ * of the NL80211_BAND_* defined in enum nl80211_band. For example, the
+ * minimum rssi threshold value for 2.4GHZ band shall be encapsulated
+ * within an attribute of type NL80211_BAND_2GHZ. And one or more of such
+ * attributes will be nested within this attribute.
* @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
* attribute number currently defined
* @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3650,6 +3695,7 @@ enum nl80211_sched_scan_match_attr {
NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI,
NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST,
NL80211_SCHED_SCAN_MATCH_ATTR_BSSID,
+ NL80211_SCHED_SCAN_MATCH_PER_BAND_RSSI,
/* keep last */
__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -4135,6 +4181,27 @@ enum nl80211_channel_type {
};
/**
+ * enum nl80211_key_mode - Key mode
+ *
+ * @NL80211_KEY_RX_TX: (Default)
+ * Key can be used for Rx and Tx immediately
+ *
+ * The following modes can only be selected for unicast keys and when the
+ * driver supports @NL80211_EXT_FEATURE_EXT_KEY_ID:
+ *
+ * @NL80211_KEY_NO_TX: Only allowed in combination with @NL80211_CMD_NEW_KEY:
+ * Unicast key can only be used for Rx, Tx not allowed, yet
+ * @NL80211_KEY_SET_TX: Only allowed in combination with @NL80211_CMD_SET_KEY:
+ * The unicast key identified by idx and mac is cleared for Tx and becomes
+ * the preferred Tx key for the station.
+ */
+enum nl80211_key_mode {
+ NL80211_KEY_RX_TX,
+ NL80211_KEY_NO_TX,
+ NL80211_KEY_SET_TX
+};
+
+/**
* enum nl80211_chan_width - channel width definitions
*
* These values are used with the %NL80211_ATTR_CHANNEL_WIDTH
@@ -4377,6 +4444,9 @@ enum nl80211_key_default_types {
* @NL80211_KEY_DEFAULT_TYPES: A nested attribute containing flags
* attributes, specifying what a key should be set as default as.
* See &enum nl80211_key_default_types.
+ * @NL80211_KEY_MODE: the mode from enum nl80211_key_mode.
+ * Defaults to @NL80211_KEY_RX_TX.
+ *
* @__NL80211_KEY_AFTER_LAST: internal
* @NL80211_KEY_MAX: highest key attribute
*/
@@ -4390,6 +4460,7 @@ enum nl80211_key_attributes {
NL80211_KEY_DEFAULT_MGMT,
NL80211_KEY_TYPE,
NL80211_KEY_DEFAULT_TYPES,
+ NL80211_KEY_MODE,
/* keep last */
__NL80211_KEY_AFTER_LAST,
@@ -5335,6 +5406,8 @@ enum nl80211_feature_flags {
* able to rekey an in-use key correctly. Userspace must not rekey PTK keys
* if this flag is not set. Ignoring this can leak clear text packets and/or
* freeze the connection.
+ * @NL80211_EXT_FEATURE_EXT_KEY_ID: Driver supports "Extended Key ID for
+ * Individually Addressed Frames" from IEEE802.11-2016.
*
* @NL80211_EXT_FEATURE_AIRTIME_FAIRNESS: Driver supports getting airtime
* fairness for transmitted packets and has enabled airtime fairness
@@ -5343,6 +5416,12 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_AP_PMKSA_CACHING: Driver/device supports PMKSA caching
* (set/del PMKSA operations) in AP mode.
*
+ * @NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD: Driver supports
+ * filtering of sched scan results using band specific RSSI thresholds.
+ *
+ * @NL80211_EXT_FEATURE_STA_TX_PWR: This driver supports controlling tx power
+ * to a station.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5384,6 +5463,9 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER,
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS,
NL80211_EXT_FEATURE_AP_PMKSA_CACHING,
+ NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD,
+ NL80211_EXT_FEATURE_EXT_KEY_ID,
+ NL80211_EXT_FEATURE_STA_TX_PWR,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index dbe0cbe4f1b7..f271f1ec50ae 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -364,6 +364,7 @@ enum ovs_tunnel_key_attr {
OVS_TUNNEL_KEY_ATTR_IPV6_DST, /* struct in6_addr dst IPv6 address. */
OVS_TUNNEL_KEY_ATTR_PAD,
OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS, /* struct erspan_metadata */
+ OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE, /* No argument. IPV4_INFO_BRIDGE mode.*/
__OVS_TUNNEL_KEY_ATTR_MAX
};
@@ -734,6 +735,7 @@ struct ovs_action_hash {
* be received on NFNLGRP_CONNTRACK_NEW and NFNLGRP_CONNTRACK_DESTROY groups,
* respectively. Remaining bits control the changes for which an event is
* delivered on the NFNLGRP_CONNTRACK_UPDATE group.
+ * @OVS_CT_ATTR_TIMEOUT: Variable length string defining conntrack timeout.
*/
enum ovs_ct_attr {
OVS_CT_ATTR_UNSPEC,
@@ -746,6 +748,8 @@ enum ovs_ct_attr {
OVS_CT_ATTR_NAT, /* Nested OVS_NAT_ATTR_* */
OVS_CT_ATTR_FORCE_COMMIT, /* No argument */
OVS_CT_ATTR_EVENTMASK, /* u32 mask of IPCT_* events. */
+ OVS_CT_ATTR_TIMEOUT, /* Associate timeout with this connection for
+ * fine-grain timeout tuning. */
__OVS_CT_ATTR_MAX
};
@@ -798,6 +802,44 @@ struct ovs_action_push_eth {
struct ovs_key_ethernet addresses;
};
+/*
+ * enum ovs_check_pkt_len_attr - Attributes for %OVS_ACTION_ATTR_CHECK_PKT_LEN.
+ *
+ * @OVS_CHECK_PKT_LEN_ATTR_PKT_LEN: u16 Packet length to check for.
+ * @OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER: Nested OVS_ACTION_ATTR_*
+ * actions to apply if the packer length is greater than the specified
+ * length in the attr - OVS_CHECK_PKT_LEN_ATTR_PKT_LEN.
+ * @OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL - Nested OVS_ACTION_ATTR_*
+ * actions to apply if the packer length is lesser or equal to the specified
+ * length in the attr - OVS_CHECK_PKT_LEN_ATTR_PKT_LEN.
+ */
+enum ovs_check_pkt_len_attr {
+ OVS_CHECK_PKT_LEN_ATTR_UNSPEC,
+ OVS_CHECK_PKT_LEN_ATTR_PKT_LEN,
+ OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_GREATER,
+ OVS_CHECK_PKT_LEN_ATTR_ACTIONS_IF_LESS_EQUAL,
+ __OVS_CHECK_PKT_LEN_ATTR_MAX,
+
+#ifdef __KERNEL__
+ OVS_CHECK_PKT_LEN_ATTR_ARG /* struct check_pkt_len_arg */
+#endif
+};
+
+#define OVS_CHECK_PKT_LEN_ATTR_MAX (__OVS_CHECK_PKT_LEN_ATTR_MAX - 1)
+
+#ifdef __KERNEL__
+struct check_pkt_len_arg {
+ u16 pkt_len; /* Same value as OVS_CHECK_PKT_LEN_ATTR_PKT_LEN'. */
+ bool exec_for_greater; /* When true, actions in IF_GREATER will
+ * not change flow keys. False otherwise.
+ */
+ bool exec_for_lesser_equal; /* When true, actions in IF_LESS_EQUAL
+ * will not change flow keys. False
+ * otherwise.
+ */
+};
+#endif
+
/**
* enum ovs_action_attr - Action types.
*
@@ -842,6 +884,9 @@ struct ovs_action_push_eth {
* packet, or modify the packet (e.g., change the DSCP field).
* @OVS_ACTION_ATTR_CLONE: make a copy of the packet and execute a list of
* actions without affecting the original packet and key.
+ * @OVS_ACTION_ATTR_CHECK_PKT_LEN: Check the packet length and execute a set
+ * of actions if greater than the specified packet length, else execute
+ * another set of actions.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -876,6 +921,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_POP_NSH, /* No argument. */
OVS_ACTION_ATTR_METER, /* u32 meter ID. */
OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */
+ OVS_ACTION_ATTR_CHECK_PKT_LEN, /* Nested OVS_CHECK_PKT_LEN_ATTR_*. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 5c98133f2c94..27164769d184 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -1,7 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
- * pci_regs.h
- *
* PCI standard defines
* Copyright 1994, Drew Eckhardt
* Copyright 1997--1999 Martin Mares <mj@ucw.cz>
@@ -15,7 +13,7 @@
* PCI System Design Guide
*
* For HyperTransport information, please consult the following manuals
- * from http://www.hypertransport.org
+ * from http://www.hypertransport.org :
*
* The HyperTransport I/O Link Specification
*/
@@ -301,7 +299,7 @@
#define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */
#define PCI_SID_CHASSIS_NR 3 /* Chassis Number */
-/* Message Signalled Interrupts registers */
+/* Message Signalled Interrupt registers */
#define PCI_MSI_FLAGS 2 /* Message Control */
#define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */
@@ -319,7 +317,7 @@
#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
-/* MSI-X registers */
+/* MSI-X registers (in MSI-X capability) */
#define PCI_MSIX_FLAGS 2 /* Message Control */
#define PCI_MSIX_FLAGS_QSIZE 0x07FF /* Table size */
#define PCI_MSIX_FLAGS_MASKALL 0x4000 /* Mask all vectors for this function */
@@ -333,13 +331,13 @@
#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
-/* MSI-X Table entry format */
+/* MSI-X Table entry format (in memory mapped by a BAR) */
#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR 0
-#define PCI_MSIX_ENTRY_UPPER_ADDR 4
-#define PCI_MSIX_ENTRY_DATA 8
-#define PCI_MSIX_ENTRY_VECTOR_CTRL 12
-#define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0 /* Message Address */
+#define PCI_MSIX_ENTRY_UPPER_ADDR 4 /* Message Upper Address */
+#define PCI_MSIX_ENTRY_DATA 8 /* Message Data */
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 /* Vector Control */
+#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001
/* CompactPCI Hotswap Register */
@@ -372,6 +370,12 @@
#define PCI_EA_FIRST_ENT_BRIDGE 8 /* First EA Entry for Bridges */
#define PCI_EA_ES 0x00000007 /* Entry Size */
#define PCI_EA_BEI 0x000000f0 /* BAR Equivalent Indicator */
+
+/* EA fixed Secondary and Subordinate bus numbers for Bridge */
+#define PCI_EA_SEC_BUS_MASK 0xff
+#define PCI_EA_SUB_BUS_MASK 0xff00
+#define PCI_EA_SUB_BUS_SHIFT 8
+
/* 0-5 map to BARs 0-5 respectively */
#define PCI_EA_BEI_BAR0 0
#define PCI_EA_BEI_BAR5 5
@@ -465,19 +469,19 @@
/* PCI Express capability registers */
#define PCI_EXP_FLAGS 2 /* Capabilities register */
-#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
-#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
-#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
-#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
-#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
-#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
-#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
-#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */
-#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */
-#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
-#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
-#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
-#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
+#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
+#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
+#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
+#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */
+#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
+#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
+#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
+#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */
+#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */
+#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
+#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
+#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
+#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
#define PCI_EXP_DEVCAP 4 /* Device capabilities */
#define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */
#define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */
@@ -616,8 +620,8 @@
#define PCI_EXP_RTCAP 30 /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
#define PCI_EXP_RTSTA 32 /* Root Status */
-#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
-#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
+#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
+#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
* The Device Capabilities 2, Device Status 2, Device Control 2,
* Link Capabilities 2, Link Status 2, Link Control 2,
@@ -637,13 +641,13 @@
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
-#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
+#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
-#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
-#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
+#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
+#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */
#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */
#define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */
@@ -659,11 +663,11 @@
#define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
-#define PCI_EXP_LNKCTL2_TLS 0x000f
-#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
-#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
-#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
-#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
+#define PCI_EXP_LNKCTL2_TLS 0x000f
+#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
+#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
+#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
@@ -752,18 +756,18 @@
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
-#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
-#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */
-#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */
+#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */
#define PCI_ERR_ROOT_STATUS 48
-#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
-#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */
-#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */
-#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008 /* Multiple FATAL/NONFATAL */
-#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */
-#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
-#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
-#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
+#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
+#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */
+#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */
+#define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008 /* Multiple FATAL/NONFATAL */
+#define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */
+#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
+#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
+#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
/* Virtual Channel */
@@ -875,12 +879,12 @@
/* Page Request Interface */
#define PCI_PRI_CTRL 0x04 /* PRI control register */
-#define PCI_PRI_CTRL_ENABLE 0x01 /* Enable */
-#define PCI_PRI_CTRL_RESET 0x02 /* Reset */
+#define PCI_PRI_CTRL_ENABLE 0x0001 /* Enable */
+#define PCI_PRI_CTRL_RESET 0x0002 /* Reset */
#define PCI_PRI_STATUS 0x06 /* PRI status register */
-#define PCI_PRI_STATUS_RF 0x001 /* Response Failure */
-#define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */
-#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */
+#define PCI_PRI_STATUS_RF 0x0001 /* Response Failure */
+#define PCI_PRI_STATUS_UPRGI 0x0002 /* Unexpected PRG index */
+#define PCI_PRI_STATUS_STOPPED 0x0100 /* PRI Stopped */
#define PCI_PRI_STATUS_PASID 0x8000 /* PRG Response PASID Required */
#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
@@ -898,16 +902,16 @@
/* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
-#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
+#define PCI_SRIOV_CAP_VFM 0x00000001 /* VF Migration Capable */
#define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */
#define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */
-#define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */
-#define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */
-#define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */
-#define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */
-#define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */
+#define PCI_SRIOV_CTRL_VFE 0x0001 /* VF Enable */
+#define PCI_SRIOV_CTRL_VFM 0x0002 /* VF Migration Enable */
+#define PCI_SRIOV_CTRL_INTR 0x0004 /* VF Migration Interrupt Enable */
+#define PCI_SRIOV_CTRL_MSE 0x0008 /* VF Memory Space Enable */
+#define PCI_SRIOV_CTRL_ARI 0x0010 /* ARI Capable Hierarchy */
#define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */
-#define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */
+#define PCI_SRIOV_STATUS_VFM 0x0001 /* VF Migration Status */
#define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */
#define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
#define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */
@@ -937,13 +941,13 @@
/* Access Control Service */
#define PCI_ACS_CAP 0x04 /* ACS Capability Register */
-#define PCI_ACS_SV 0x01 /* Source Validation */
-#define PCI_ACS_TB 0x02 /* Translation Blocking */
-#define PCI_ACS_RR 0x04 /* P2P Request Redirect */
-#define PCI_ACS_CR 0x08 /* P2P Completion Redirect */
-#define PCI_ACS_UF 0x10 /* Upstream Forwarding */
-#define PCI_ACS_EC 0x20 /* P2P Egress Control */
-#define PCI_ACS_DT 0x40 /* Direct Translated P2P */
+#define PCI_ACS_SV 0x0001 /* Source Validation */
+#define PCI_ACS_TB 0x0002 /* Translation Blocking */
+#define PCI_ACS_RR 0x0004 /* P2P Request Redirect */
+#define PCI_ACS_CR 0x0008 /* P2P Completion Redirect */
+#define PCI_ACS_UF 0x0010 /* Upstream Forwarding */
+#define PCI_ACS_EC 0x0020 /* P2P Egress Control */
+#define PCI_ACS_DT 0x0040 /* Direct Translated P2P */
#define PCI_ACS_EGRESS_BITS 0x05 /* ACS Egress Control Vector Size */
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
@@ -993,9 +997,9 @@
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
#define PCI_EXP_DPC_CTL 6 /* DPC control */
-#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
-#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
-#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
+#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
+#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
+#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 7ee74c3474bf..8b2f993cbb77 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1148,6 +1148,16 @@ enum {
#define TCA_TAPRIO_SCHED_MAX (__TCA_TAPRIO_SCHED_MAX - 1)
+/* The format for the admin sched (dump only):
+ * [TCA_TAPRIO_SCHED_ADMIN_SCHED]
+ * [TCA_TAPRIO_ATTR_SCHED_BASE_TIME]
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY]
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_CMD]
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_GATES]
+ * [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL]
+ */
+
enum {
TCA_TAPRIO_ATTR_UNSPEC,
TCA_TAPRIO_ATTR_PRIOMAP, /* struct tc_mqprio_qopt */
@@ -1156,6 +1166,9 @@ enum {
TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY, /* single entry */
TCA_TAPRIO_ATTR_SCHED_CLOCKID, /* s32 */
TCA_TAPRIO_PAD,
+ TCA_TAPRIO_ATTR_ADMIN_SCHED, /* The admin sched, only used in dump */
+ TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME, /* s64 */
+ TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION, /* s64 */
__TCA_TAPRIO_ATTR_MAX,
};
diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
index b3bcabe380da..2fcad1dd0b0e 100644
--- a/include/uapi/linux/psci.h
+++ b/include/uapi/linux/psci.h
@@ -49,8 +49,11 @@
#define PSCI_1_0_FN_PSCI_FEATURES PSCI_0_2_FN(10)
#define PSCI_1_0_FN_SYSTEM_SUSPEND PSCI_0_2_FN(14)
+#define PSCI_1_0_FN_SET_SUSPEND_MODE PSCI_0_2_FN(15)
+#define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18)
#define PSCI_1_0_FN64_SYSTEM_SUSPEND PSCI_0_2_FN64(14)
+#define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18)
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
@@ -97,6 +100,10 @@
#define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_MASK \
(0x1 << PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT)
+#define PSCI_1_0_OS_INITIATED BIT(0)
+#define PSCI_1_0_SUSPEND_MODE_PC 0
+#define PSCI_1_0_SUSPEND_MODE_OSI 1
+
/* PSCI return values (inclusive of all PSCI versions) */
#define PSCI_RET_SUCCESS 0
#define PSCI_RET_NOT_SUPPORTED -1
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index ac8c60bcc83b..43521d500c2b 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -6,8 +6,7 @@
*
* Author: Brijesh Singh <brijesh.singh@amd.com>
*
- * SEV spec 0.14 is available at:
- * http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+ * SEV API specification is available at: https://developer.amd.com/sev/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -30,7 +29,8 @@ enum {
SEV_PDH_GEN,
SEV_PDH_CERT_EXPORT,
SEV_PEK_CERT_IMPORT,
- SEV_GET_ID,
+ SEV_GET_ID, /* This command is deprecated, use SEV_GET_ID2 */
+ SEV_GET_ID2,
SEV_MAX,
};
@@ -125,7 +125,7 @@ struct sev_user_data_pdh_cert_export {
} __packed;
/**
- * struct sev_user_data_get_id - GET_ID command parameters
+ * struct sev_user_data_get_id - GET_ID command parameters (deprecated)
*
* @socket1: Buffer to pass unique ID of first socket
* @socket2: Buffer to pass unique ID of second socket
@@ -136,6 +136,16 @@ struct sev_user_data_get_id {
} __packed;
/**
+ * struct sev_user_data_get_id2 - GET_ID command parameters
+ * @address: Buffer to store unique ID
+ * @length: length of the unique ID
+ */
+struct sev_user_data_get_id2 {
+ __u64 address; /* In */
+ __u32 length; /* In/Out */
+} __packed;
+
+/**
* struct sev_issue_cmd - SEV ioctl parameters
*
* @cmd: SEV commands to execute
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 22627f80063e..ed4ee170bee2 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -10,6 +10,7 @@
#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
+#define CLONE_PIDFD 0x00001000 /* set if a pidfd should be placed in parent */
#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index 627624d35030..33e53b80cd1f 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -5,15 +5,6 @@
* Authors:
* Rafael Antognolli <rafael.antognolli@intel.com>
* Scott Bauer <scott.bauer@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef _UAPI_SED_OPAL_H
@@ -58,7 +49,7 @@ struct opal_key {
struct opal_lr_act {
struct opal_key key;
__u32 sum;
- __u8 num_lrs;
+ __u8 num_lrs;
__u8 lr[OPAL_MAX_LRS];
__u8 align[2]; /* Align to 8 byte boundary */
};
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 6009ee2c2e99..67c4aaaa2308 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -287,4 +287,10 @@
/* RDA UART */
#define PORT_RDA 118
+/* Socionext Milbeaut UART */
+#define PORT_MLB_USIO 119
+
+/* SiFive UART */
+#define PORT_SIFIVE_V0 120
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/sockios.h b/include/uapi/linux/sockios.h
index d393e9ed3964..7d1bccbbef78 100644
--- a/include/uapi/linux/sockios.h
+++ b/include/uapi/linux/sockios.h
@@ -19,6 +19,7 @@
#ifndef _LINUX_SOCKIOS_H
#define _LINUX_SOCKIOS_H
+#include <asm/bitsperlong.h>
#include <asm/sockios.h>
/* Linux-specific socket ioctls */
@@ -27,6 +28,26 @@
#define SOCK_IOC_TYPE 0x89
+/*
+ * the timeval/timespec data structure layout is defined by libc,
+ * so we need to cover both possible versions on 32-bit.
+ */
+/* Get stamp (timeval) */
+#define SIOCGSTAMP_NEW _IOR(SOCK_IOC_TYPE, 0x06, long long[2])
+/* Get stamp (timespec) */
+#define SIOCGSTAMPNS_NEW _IOR(SOCK_IOC_TYPE, 0x07, long long[2])
+
+#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
+/* on 64-bit and x32, avoid the ?: operator */
+#define SIOCGSTAMP SIOCGSTAMP_OLD
+#define SIOCGSTAMPNS SIOCGSTAMPNS_OLD
+#else
+#define SIOCGSTAMP ((sizeof(struct timeval)) == 8 ? \
+ SIOCGSTAMP_OLD : SIOCGSTAMP_NEW)
+#define SIOCGSTAMPNS ((sizeof(struct timespec)) == 8 ? \
+ SIOCGSTAMPNS_OLD : SIOCGSTAMPNS_NEW)
+#endif
+
/* Routing table calls. */
#define SIOCADDRT 0x890B /* add routing table entry */
#define SIOCDELRT 0x890C /* delete routing table entry */
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index c4253f0090d8..ee0f2460bff6 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -66,6 +66,9 @@
* @delay_usecs: If nonzero, how long to delay after the last bit transfer
* before optionally deselecting the device before the next transfer.
* @cs_change: True to deselect device before starting the next transfer.
+ * @word_delay_usecs: If nonzero, how long to wait between words within one
+ * transfer. This property needs explicit support in the SPI controller,
+ * otherwise it is silently ignored.
*
* This structure is mapped directly to the kernel spi_transfer structure;
* the fields have the same meanings, except of course that the pointers
@@ -100,7 +103,8 @@ struct spi_ioc_transfer {
__u8 cs_change;
__u8 tx_nbits;
__u8 rx_nbits;
- __u16 pad;
+ __u8 word_delay_usecs;
+ __u8 pad;
/* If the contents of 'struct spi_ioc_transfer' ever change
* incompatibly, then the ioctl number (currently 0) must change;
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
index 4f4daf8db954..c912b5a678e4 100644
--- a/include/uapi/linux/switchtec_ioctl.h
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -50,7 +50,7 @@ struct switchtec_ioctl_flash_part_info {
__u32 active;
};
-struct switchtec_ioctl_event_summary {
+struct switchtec_ioctl_event_summary_legacy {
__u64 global;
__u64 part_bitmap;
__u32 local_part;
@@ -59,6 +59,15 @@ struct switchtec_ioctl_event_summary {
__u32 pff[48];
};
+struct switchtec_ioctl_event_summary {
+ __u64 global;
+ __u64 part_bitmap;
+ __u32 local_part;
+ __u32 padding;
+ __u32 part[48];
+ __u32 pff[255];
+};
+
#define SWITCHTEC_IOCTL_EVENT_STACK_ERROR 0
#define SWITCHTEC_IOCTL_EVENT_PPU_ERROR 1
#define SWITCHTEC_IOCTL_EVENT_ISP_ERROR 2
@@ -127,6 +136,8 @@ struct switchtec_ioctl_pff_port {
_IOWR('W', 0x41, struct switchtec_ioctl_flash_part_info)
#define SWITCHTEC_IOCTL_EVENT_SUMMARY \
_IOR('W', 0x42, struct switchtec_ioctl_event_summary)
+#define SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY \
+ _IOR('W', 0x42, struct switchtec_ioctl_event_summary_legacy)
#define SWITCHTEC_IOCTL_EVENT_CTL \
_IOWR('W', 0x43, struct switchtec_ioctl_event_ctl)
#define SWITCHTEC_IOCTL_PFF_TO_PORT \
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 8bb6cc5f3235..b521464ea962 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -160,15 +160,42 @@ enum {
#define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */
#define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */
+/*
+ * Sender's congestion state indicating normal or abnormal situations
+ * in the last round of packets sent. The state is driven by the ACK
+ * information and timer events.
+ */
enum tcp_ca_state {
+ /*
+ * Nothing bad has been observed recently.
+ * No apparent reordering, packet loss, or ECN marks.
+ */
TCP_CA_Open = 0,
#define TCPF_CA_Open (1<<TCP_CA_Open)
+ /*
+ * The sender enters disordered state when it has received DUPACKs or
+ * SACKs in the last round of packets sent. This could be due to packet
+ * loss or reordering but needs further information to confirm packets
+ * have been lost.
+ */
TCP_CA_Disorder = 1,
#define TCPF_CA_Disorder (1<<TCP_CA_Disorder)
+ /*
+ * The sender enters Congestion Window Reduction (CWR) state when it
+ * has received ACKs with ECN-ECE marks, or has experienced congestion
+ * or packet discard on the sender host (e.g. qdisc).
+ */
TCP_CA_CWR = 2,
#define TCPF_CA_CWR (1<<TCP_CA_CWR)
+ /*
+ * The sender is in fast recovery and retransmitting lost packets,
+ * typically triggered by ACK events.
+ */
TCP_CA_Recovery = 3,
#define TCPF_CA_Recovery (1<<TCP_CA_Recovery)
+ /*
+ * The sender is in loss recovery triggered by retransmission timeout.
+ */
TCP_CA_Loss = 4
#define TCPF_CA_Loss (1<<TCP_CA_Loss)
};
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 6b2fd4d9655f..7df026ea6aff 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -190,6 +190,7 @@ struct sockaddr_tipc {
#define TIPC_MCAST_REPLICAST 134 /* Default: TIPC selects. No arg */
#define TIPC_GROUP_JOIN 135 /* Takes struct tipc_group_req* */
#define TIPC_GROUP_LEAVE 136 /* No argument */
+#define TIPC_SOCK_RECVQ_USED 137 /* Default: none (read only) */
/*
* Flag values
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 0ebe02ef1a86..efb958fd167d 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -281,6 +281,8 @@ enum {
TIPC_NLA_PROP_TOL, /* u32 */
TIPC_NLA_PROP_WIN, /* u32 */
TIPC_NLA_PROP_MTU, /* u32 */
+ TIPC_NLA_PROP_BROADCAST, /* u32 */
+ TIPC_NLA_PROP_BROADCAST_RATIO, /* u32 */
__TIPC_NLA_PROP_MAX,
TIPC_NLA_PROP_MAX = __TIPC_NLA_PROP_MAX - 1
diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h
index 401d6f01de6a..5b9c26753e46 100644
--- a/include/uapi/linux/tls.h
+++ b/include/uapi/linux/tls.h
@@ -70,6 +70,13 @@
#define TLS_CIPHER_AES_GCM_256_TAG_SIZE 16
#define TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE 8
+#define TLS_CIPHER_AES_CCM_128 53
+#define TLS_CIPHER_AES_CCM_128_IV_SIZE 8
+#define TLS_CIPHER_AES_CCM_128_KEY_SIZE 16
+#define TLS_CIPHER_AES_CCM_128_SALT_SIZE 4
+#define TLS_CIPHER_AES_CCM_128_TAG_SIZE 16
+#define TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE 8
+
#define TLS_SET_RECORD_TYPE 1
#define TLS_GET_RECORD_TYPE 2
@@ -94,4 +101,12 @@ struct tls12_crypto_info_aes_gcm_256 {
unsigned char rec_seq[TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE];
};
+struct tls12_crypto_info_aes_ccm_128 {
+ struct tls_crypto_info info;
+ unsigned char iv[TLS_CIPHER_AES_CCM_128_IV_SIZE];
+ unsigned char key[TLS_CIPHER_AES_CCM_128_KEY_SIZE];
+ unsigned char salt[TLS_CIPHER_AES_CCM_128_SALT_SIZE];
+ unsigned char rec_seq[TLS_CIPHER_AES_CCM_128_REC_SEQ_SIZE];
+};
+
#endif /* _UAPI_LINUX_TLS_H */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 06479f2fb3ae..37807f23231e 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -404,6 +404,10 @@ enum v4l2_mpeg_video_multi_slice_mode {
#define V4L2_CID_MPEG_VIDEO_MV_V_SEARCH_RANGE (V4L2_CID_MPEG_BASE+228)
#define V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME (V4L2_CID_MPEG_BASE+229)
+/* CIDs for the FWHT codec as used by the vicodec driver. */
+#define V4L2_CID_FWHT_I_FRAME_QP (V4L2_CID_MPEG_BASE + 290)
+#define V4L2_CID_FWHT_P_FRAME_QP (V4L2_CID_MPEG_BASE + 291)
+
#define V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP (V4L2_CID_MPEG_BASE+300)
#define V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP (V4L2_CID_MPEG_BASE+301)
#define V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP (V4L2_CID_MPEG_BASE+302)
@@ -535,6 +539,10 @@ enum v4l2_mpeg_video_h264_hierarchical_coding_type {
#define V4L2_CID_MPEG_VIDEO_H264_HIERARCHICAL_CODING_LAYER_QP (V4L2_CID_MPEG_BASE+382)
#define V4L2_CID_MPEG_VIDEO_H264_CONSTRAINED_INTRA_PREDICTION (V4L2_CID_MPEG_BASE+383)
#define V4L2_CID_MPEG_VIDEO_H264_CHROMA_QP_INDEX_OFFSET (V4L2_CID_MPEG_BASE+384)
+#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MIN_QP (V4L2_CID_MPEG_BASE+385)
+#define V4L2_CID_MPEG_VIDEO_H264_I_FRAME_MAX_QP (V4L2_CID_MPEG_BASE+386)
+#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MIN_QP (V4L2_CID_MPEG_BASE+387)
+#define V4L2_CID_MPEG_VIDEO_H264_P_FRAME_MAX_QP (V4L2_CID_MPEG_BASE+388)
#define V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP (V4L2_CID_MPEG_BASE+400)
#define V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP (V4L2_CID_MPEG_BASE+401)
#define V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP (V4L2_CID_MPEG_BASE+402)
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index 0e68024f36c7..26f39816af14 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -102,6 +102,66 @@ enum vmmdev_request_type {
#define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
#endif
+/* vmmdev_request_header.requestor defines */
+
+/* Requestor user not given. */
+#define VMMDEV_REQUESTOR_USR_NOT_GIVEN 0x00000000
+/* The kernel driver (vboxguest) is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV 0x00000001
+/* Some other kernel driver is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV_OTHER 0x00000002
+/* The root or a admin user is the requestor. */
+#define VMMDEV_REQUESTOR_USR_ROOT 0x00000003
+/* Regular joe user is making the request. */
+#define VMMDEV_REQUESTOR_USR_USER 0x00000006
+/* User classification mask. */
+#define VMMDEV_REQUESTOR_USR_MASK 0x00000007
+
+/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
+#define VMMDEV_REQUESTOR_KERNEL 0x00000000
+/* User mode request. */
+#define VMMDEV_REQUESTOR_USERMODE 0x00000008
+/* User or kernel mode classification mask. */
+#define VMMDEV_REQUESTOR_MODE_MASK 0x00000008
+
+/* Don't know the physical console association of the requestor. */
+#define VMMDEV_REQUESTOR_CON_DONT_KNOW 0x00000000
+/*
+ * The request originates with a process that is NOT associated with the
+ * physical console.
+ */
+#define VMMDEV_REQUESTOR_CON_NO 0x00000010
+/* Requestor process is associated with the physical console. */
+#define VMMDEV_REQUESTOR_CON_YES 0x00000020
+/* Console classification mask. */
+#define VMMDEV_REQUESTOR_CON_MASK 0x00000030
+
+/* Requestor is member of special VirtualBox user group. */
+#define VMMDEV_REQUESTOR_GRP_VBOX 0x00000080
+
+/* Note: trust level is for windows guests only, linux always uses not-given */
+/* Requestor trust level: Unspecified */
+#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN 0x00000000
+/* Requestor trust level: Untrusted (SID S-1-16-0) */
+#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED 0x00001000
+/* Requestor trust level: Untrusted (SID S-1-16-4096) */
+#define VMMDEV_REQUESTOR_TRUST_LOW 0x00002000
+/* Requestor trust level: Medium (SID S-1-16-8192) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM 0x00003000
+/* Requestor trust level: Medium plus (SID S-1-16-8448) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS 0x00004000
+/* Requestor trust level: High (SID S-1-16-12288) */
+#define VMMDEV_REQUESTOR_TRUST_HIGH 0x00005000
+/* Requestor trust level: System (SID S-1-16-16384) */
+#define VMMDEV_REQUESTOR_TRUST_SYSTEM 0x00006000
+/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
+#define VMMDEV_REQUESTOR_TRUST_PROTECTED 0x00007000
+/* Requestor trust level mask */
+#define VMMDEV_REQUESTOR_TRUST_MASK 0x00007000
+
+/* Requestor is using the less trusted user device node (/dev/vboxuser) */
+#define VMMDEV_REQUESTOR_USER_DEVICE 0x00008000
+
/** HGCM service location types. */
enum vmmdev_hgcm_service_location_type {
VMMDEV_HGCM_LOC_INVALID = 0,
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 02bb7ad6e986..8f10748dac79 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -353,6 +353,10 @@ struct vfio_region_gfx_edid {
#define VFIO_DEVICE_GFX_LINK_STATE_DOWN 2
};
+#define VFIO_REGION_TYPE_CCW (2)
+/* ccw sub-types */
+#define VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD (1)
+
/*
* 10de vendor sub-type
*
diff --git a/include/uapi/linux/vfio_ccw.h b/include/uapi/linux/vfio_ccw.h
index 2ec5f367ff78..cbecbf0cd54f 100644
--- a/include/uapi/linux/vfio_ccw.h
+++ b/include/uapi/linux/vfio_ccw.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
+/* used for START SUBCHANNEL, always present */
struct ccw_io_region {
#define ORB_AREA_SIZE 12
__u8 orb_area[ORB_AREA_SIZE];
@@ -22,4 +23,15 @@ struct ccw_io_region {
__u32 ret_code;
} __packed;
+/*
+ * used for processing commands that trigger asynchronous actions
+ * Note: this is controlled by a capability
+ */
+#define VFIO_CCW_ASYNC_CMD_HSCH (1 << 0)
+#define VFIO_CCW_ASYNC_CMD_CSCH (1 << 1)
+struct ccw_cmd_region {
+ __u32 command;
+ __u32 ret_code;
+} __packed;
+
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 1db220da3bcc..1050a75fb7ef 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -514,9 +514,21 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_RGB444 v4l2_fourcc('R', '4', '4', '4') /* 16 xxxxrrrr ggggbbbb */
#define V4L2_PIX_FMT_ARGB444 v4l2_fourcc('A', 'R', '1', '2') /* 16 aaaarrrr ggggbbbb */
#define V4L2_PIX_FMT_XRGB444 v4l2_fourcc('X', 'R', '1', '2') /* 16 xxxxrrrr ggggbbbb */
+#define V4L2_PIX_FMT_RGBA444 v4l2_fourcc('R', 'A', '1', '2') /* 16 rrrrgggg bbbbaaaa */
+#define V4L2_PIX_FMT_RGBX444 v4l2_fourcc('R', 'X', '1', '2') /* 16 rrrrgggg bbbbxxxx */
+#define V4L2_PIX_FMT_ABGR444 v4l2_fourcc('A', 'B', '1', '2') /* 16 aaaabbbb ggggrrrr */
+#define V4L2_PIX_FMT_XBGR444 v4l2_fourcc('X', 'B', '1', '2') /* 16 xxxxbbbb ggggrrrr */
+#define V4L2_PIX_FMT_BGRA444 v4l2_fourcc('B', 'A', '1', '2') /* 16 bbbbgggg rrrraaaa */
+#define V4L2_PIX_FMT_BGRX444 v4l2_fourcc('B', 'X', '1', '2') /* 16 bbbbgggg rrrrxxxx */
#define V4L2_PIX_FMT_RGB555 v4l2_fourcc('R', 'G', 'B', 'O') /* 16 RGB-5-5-5 */
#define V4L2_PIX_FMT_ARGB555 v4l2_fourcc('A', 'R', '1', '5') /* 16 ARGB-1-5-5-5 */
#define V4L2_PIX_FMT_XRGB555 v4l2_fourcc('X', 'R', '1', '5') /* 16 XRGB-1-5-5-5 */
+#define V4L2_PIX_FMT_RGBA555 v4l2_fourcc('R', 'A', '1', '5') /* 16 RGBA-5-5-5-1 */
+#define V4L2_PIX_FMT_RGBX555 v4l2_fourcc('R', 'X', '1', '5') /* 16 RGBX-5-5-5-1 */
+#define V4L2_PIX_FMT_ABGR555 v4l2_fourcc('A', 'B', '1', '5') /* 16 ABGR-1-5-5-5 */
+#define V4L2_PIX_FMT_XBGR555 v4l2_fourcc('X', 'B', '1', '5') /* 16 XBGR-1-5-5-5 */
+#define V4L2_PIX_FMT_BGRA555 v4l2_fourcc('B', 'A', '1', '5') /* 16 BGRA-5-5-5-1 */
+#define V4L2_PIX_FMT_BGRX555 v4l2_fourcc('B', 'X', '1', '5') /* 16 BGRX-5-5-5-1 */
#define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */
#define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */
#define V4L2_PIX_FMT_ARGB555X v4l2_fourcc_be('A', 'R', '1', '5') /* 16 ARGB-5-5-5 BE */
@@ -528,7 +540,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */
#define V4L2_PIX_FMT_ABGR32 v4l2_fourcc('A', 'R', '2', '4') /* 32 BGRA-8-8-8-8 */
#define V4L2_PIX_FMT_XBGR32 v4l2_fourcc('X', 'R', '2', '4') /* 32 BGRX-8-8-8-8 */
+#define V4L2_PIX_FMT_BGRA32 v4l2_fourcc('R', 'A', '2', '4') /* 32 ABGR-8-8-8-8 */
+#define V4L2_PIX_FMT_BGRX32 v4l2_fourcc('R', 'X', '2', '4') /* 32 XBGR-8-8-8-8 */
#define V4L2_PIX_FMT_RGB32 v4l2_fourcc('R', 'G', 'B', '4') /* 32 RGB-8-8-8-8 */
+#define V4L2_PIX_FMT_RGBA32 v4l2_fourcc('A', 'B', '2', '4') /* 32 RGBA-8-8-8-8 */
+#define V4L2_PIX_FMT_RGBX32 v4l2_fourcc('X', 'B', '2', '4') /* 32 RGBX-8-8-8-8 */
#define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */
#define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */
@@ -669,6 +685,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
+#define V4L2_PIX_FMT_FWHT_STATELESS v4l2_fourcc('S', 'F', 'W', 'H') /* Stateless FWHT (vicodec) */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 8e88eba1fa7a..0c85914d9369 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -40,8 +40,16 @@
#include <linux/types.h>
-#define VIRTIO_GPU_F_VIRGL 0
-#define VIRTIO_GPU_F_EDID 1
+/*
+ * VIRTIO_GPU_CMD_CTX_*
+ * VIRTIO_GPU_CMD_*_3D
+ */
+#define VIRTIO_GPU_F_VIRGL 0
+
+/*
+ * VIRTIO_GPU_CMD_GET_EDID
+ */
+#define VIRTIO_GPU_F_EDID 1
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 7fd6f633534c..8ac292cf4d00 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -20,8 +20,8 @@
/*
* Queue Numbering
*
- * The external queues (DMA channels + CPU) MUST be before the internal queues
- * and each group (DMA channels + CPU and internal) must be contiguous inside
+ * The external queues (PCI DMA channels) MUST be before the internal queues
+ * and each group (PCI DMA channels and internal) must be contiguous inside
* itself but there can be a gap between the two groups (although not
* recommended)
*/
@@ -33,7 +33,7 @@ enum goya_queue_id {
GOYA_QUEUE_ID_DMA_3,
GOYA_QUEUE_ID_DMA_4,
GOYA_QUEUE_ID_CPU_PQ,
- GOYA_QUEUE_ID_MME,
+ GOYA_QUEUE_ID_MME, /* Internal queues start here */
GOYA_QUEUE_ID_TPC0,
GOYA_QUEUE_ID_TPC1,
GOYA_QUEUE_ID_TPC2,
@@ -45,11 +45,18 @@ enum goya_queue_id {
GOYA_QUEUE_ID_SIZE
};
+enum hl_device_status {
+ HL_DEVICE_STATUS_OPERATIONAL,
+ HL_DEVICE_STATUS_IN_RESET,
+ HL_DEVICE_STATUS_MALFUNCTION
+};
+
/* Opcode for management ioctl */
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
#define HL_INFO_DRAM_USAGE 2
#define HL_INFO_HW_IDLE 3
+#define HL_INFO_DEVICE_STATUS 4
#define HL_INFO_VERSION_MAX_LEN 128
@@ -82,6 +89,11 @@ struct hl_info_hw_idle {
__u32 pad;
};
+struct hl_info_device_status {
+ __u32 status;
+ __u32 pad;
+};
+
struct hl_info_args {
/* Location of relevant struct in userspace */
__u64 return_pointer;
@@ -181,7 +193,10 @@ struct hl_cs_in {
};
struct hl_cs_out {
- /* this holds the sequence number of the CS to pass to wait ioctl */
+ /*
+ * seq holds the sequence number of the CS to pass to wait ioctl. All
+ * values are valid except for 0 and ULLONG_MAX
+ */
__u64 seq;
/* HL_CS_STATUS_* */
__u32 status;
@@ -320,6 +335,110 @@ union hl_mem_args {
struct hl_mem_out out;
};
+#define HL_DEBUG_MAX_AUX_VALUES 10
+
+struct hl_debug_params_etr {
+ /* Address in memory to allocate buffer */
+ __u64 buffer_address;
+
+ /* Size of buffer to allocate */
+ __u64 buffer_size;
+
+ /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
+ __u32 sink_mode;
+ __u32 pad;
+};
+
+struct hl_debug_params_etf {
+ /* Address in memory to allocate buffer */
+ __u64 buffer_address;
+
+ /* Size of buffer to allocate */
+ __u64 buffer_size;
+
+ /* Sink operation mode: SW fifo, HW fifo, Circular buffer */
+ __u32 sink_mode;
+ __u32 pad;
+};
+
+struct hl_debug_params_stm {
+ /* Two bit masks for HW event and Stimulus Port */
+ __u64 he_mask;
+ __u64 sp_mask;
+
+ /* Trace source ID */
+ __u32 id;
+
+ /* Frequency for the timestamp register */
+ __u32 frequency;
+};
+
+struct hl_debug_params_bmon {
+ /* Two address ranges that the user can request to filter */
+ __u64 start_addr0;
+ __u64 addr_mask0;
+
+ __u64 start_addr1;
+ __u64 addr_mask1;
+
+ /* Capture window configuration */
+ __u32 bw_win;
+ __u32 win_capture;
+
+ /* Trace source ID */
+ __u32 id;
+ __u32 pad;
+};
+
+struct hl_debug_params_spmu {
+ /* Event types selection */
+ __u64 event_types[HL_DEBUG_MAX_AUX_VALUES];
+
+ /* Number of event types selection */
+ __u32 event_types_num;
+ __u32 pad;
+};
+
+/* Opcode for ETR component */
+#define HL_DEBUG_OP_ETR 0
+/* Opcode for ETF component */
+#define HL_DEBUG_OP_ETF 1
+/* Opcode for STM component */
+#define HL_DEBUG_OP_STM 2
+/* Opcode for FUNNEL component */
+#define HL_DEBUG_OP_FUNNEL 3
+/* Opcode for BMON component */
+#define HL_DEBUG_OP_BMON 4
+/* Opcode for SPMU component */
+#define HL_DEBUG_OP_SPMU 5
+/* Opcode for timestamp */
+#define HL_DEBUG_OP_TIMESTAMP 6
+
+struct hl_debug_args {
+ /*
+ * Pointer to user input structure.
+ * This field is relevant to specific opcodes.
+ */
+ __u64 input_ptr;
+ /* Pointer to user output structure */
+ __u64 output_ptr;
+ /* Size of user input structure */
+ __u32 input_size;
+ /* Size of user output structure */
+ __u32 output_size;
+ /* HL_DEBUG_OP_* */
+ __u32 op;
+ /*
+ * Register index in the component, taken from the debug_regs_index enum
+ * in the various ASIC header files
+ */
+ __u32 reg_idx;
+ /* Enable/disable */
+ __u32 enable;
+ /* Context ID - Currently not in use */
+ __u32 ctx_id;
+};
+
/*
* Various information operations such as:
* - H/W IP information
@@ -361,6 +480,12 @@ union hl_mem_args {
* Each JOB will be enqueued on a specific queue, according to the user's input.
* There can be more then one JOB per queue.
*
+ * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
+ * a second set is for "execution" phase and a third set is for "store" phase.
+ * The JOBS on the "restore" phase are enqueued only after context-switch
+ * (or if its the first CS for this context). The user can also order the
+ * driver to run the "restore" phase explicitly
+ *
* There are two types of queues - external and internal. External queues
* are DMA queues which transfer data from/to the Host. All other queues are
* internal. The driver will get completion notifications from the device only
@@ -377,19 +502,18 @@ union hl_mem_args {
* relevant queues. Therefore, the user mustn't assume the CS has been completed
* or has even started to execute.
*
- * Upon successful enqueue, the IOCTL returns an opaque handle which the user
+ * Upon successful enqueue, the IOCTL returns a sequence number which the user
* can use with the "Wait for CS" IOCTL to check whether the handle's CS
* external JOBS have been completed. Note that if the CS has internal JOBS
* which can execute AFTER the external JOBS have finished, the driver might
* report that the CS has finished executing BEFORE the internal JOBS have
* actually finish executing.
*
- * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase,
- * a second set is for "execution" phase and a third set is for "store" phase.
- * The JOBS on the "restore" phase are enqueued only after context-switch
- * (or if its the first CS for this context). The user can also order the
- * driver to run the "restore" phase explicitly
- *
+ * Even though the sequence number increments per CS, the user can NOT
+ * automatically assume that if CS with sequence number N finished, then CS
+ * with sequence number N-1 also finished. The user can make this assumption if
+ * and only if CS N and CS N-1 are exactly the same (same CBs for the same
+ * queues).
*/
#define HL_IOCTL_CS \
_IOWR('H', 0x03, union hl_cs_args)
@@ -444,7 +568,20 @@ union hl_mem_args {
#define HL_IOCTL_MEMORY \
_IOWR('H', 0x05, union hl_mem_args)
+/*
+ * Debug
+ * - Enable/disable the ETR/ETF/FUNNEL/STM/BMON/SPMU debug traces
+ *
+ * This IOCTL allows the user to get debug traces from the chip.
+ *
+ * The user needs to provide the register index and essential data such as
+ * buffer address and size.
+ *
+ */
+#define HL_IOCTL_DEBUG \
+ _IOWR('H', 0x06, struct hl_debug_args)
+
#define HL_COMMAND_START 0x01
-#define HL_COMMAND_END 0x06
+#define HL_COMMAND_END 0x07
#endif /* HABANALABS_H_ */
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
new file mode 100644
index 000000000000..9599a2a62be8
--- /dev/null
+++ b/include/uapi/rdma/efa-abi.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
+/*
+ * Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All rights reserved.
+ */
+
+#ifndef EFA_ABI_USER_H
+#define EFA_ABI_USER_H
+
+#include <linux/types.h>
+
+/*
+ * Increment this value if any changes that break userspace ABI
+ * compatibility are made.
+ */
+#define EFA_UVERBS_ABI_VERSION 1
+
+/*
+ * Keep structs aligned to 8 bytes.
+ * Keep reserved fields as arrays of __u8 named reserved_XXX where XXX is the
+ * hex bit offset of the field.
+ */
+
+enum efa_ibv_user_cmds_supp_udata {
+ EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE = 1 << 0,
+ EFA_USER_CMDS_SUPP_UDATA_CREATE_AH = 1 << 1,
+};
+
+struct efa_ibv_alloc_ucontext_resp {
+ __u32 comp_mask;
+ __u32 cmds_supp_udata_mask;
+ __u16 sub_cqs_per_cq;
+ __u16 inline_buf_size;
+ __u32 max_llq_size; /* bytes */
+};
+
+struct efa_ibv_alloc_pd_resp {
+ __u32 comp_mask;
+ __u16 pdn;
+ __u8 reserved_30[2];
+};
+
+struct efa_ibv_create_cq {
+ __u32 comp_mask;
+ __u32 cq_entry_size;
+ __u16 num_sub_cqs;
+ __u8 reserved_50[6];
+};
+
+struct efa_ibv_create_cq_resp {
+ __u32 comp_mask;
+ __u8 reserved_20[4];
+ __aligned_u64 q_mmap_key;
+ __aligned_u64 q_mmap_size;
+ __u16 cq_idx;
+ __u8 reserved_d0[6];
+};
+
+enum {
+ EFA_QP_DRIVER_TYPE_SRD = 0,
+};
+
+struct efa_ibv_create_qp {
+ __u32 comp_mask;
+ __u32 rq_ring_size; /* bytes */
+ __u32 sq_ring_size; /* bytes */
+ __u32 driver_qp_type;
+};
+
+struct efa_ibv_create_qp_resp {
+ __u32 comp_mask;
+ /* the offset inside the page of the rq db */
+ __u32 rq_db_offset;
+ /* the offset inside the page of the sq db */
+ __u32 sq_db_offset;
+ /* the offset inside the page of descriptors buffer */
+ __u32 llq_desc_offset;
+ __aligned_u64 rq_mmap_key;
+ __aligned_u64 rq_mmap_size;
+ __aligned_u64 rq_db_mmap_key;
+ __aligned_u64 sq_db_mmap_key;
+ __aligned_u64 llq_desc_mmap_key;
+ __u16 send_sub_cq_idx;
+ __u16 recv_sub_cq_idx;
+ __u8 reserved_1e0[4];
+};
+
+struct efa_ibv_create_ah_resp {
+ __u32 comp_mask;
+ __u16 efa_address_handle;
+ __u8 reserved_30[2];
+};
+
+struct efa_ibv_ex_query_device_resp {
+ __u32 comp_mask;
+ __u32 max_sq_wr;
+ __u32 max_rq_wr;
+ __u16 max_sq_sge;
+ __u16 max_rq_sge;
+};
+
+#endif /* EFA_ABI_USER_H */
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 87b3198f4b5d..624f5b53eb1f 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -238,6 +238,7 @@ enum mlx5_ib_query_dev_resp_flags {
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
+ MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
};
enum mlx5_ib_tunnel_offloads {
@@ -359,6 +360,7 @@ enum mlx5_ib_create_qp_resp_mask {
MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2,
MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3,
+ MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR = 1UL << 4,
};
struct mlx5_ib_create_qp_resp {
@@ -370,6 +372,7 @@ struct mlx5_ib_create_qp_resp {
__u32 rqn;
__u32 sqn;
__u32 reserved1;
+ __u64 tir_icm_addr;
};
struct mlx5_ib_alloc_mw {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 8149d224030b..d404c951954c 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -44,6 +44,7 @@ enum mlx5_ib_create_flow_action_attrs {
enum mlx5_ib_alloc_dm_attrs {
MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
+ MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE,
};
enum mlx5_ib_devx_methods {
@@ -144,6 +145,7 @@ enum mlx5_ib_flow_matcher_create_attrs {
MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ MLX5_IB_ATTR_FLOW_MATCHER_FT_TYPE,
};
enum mlx5_ib_flow_matcher_destroy_attrs {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 4a701033b93f..a8f34c237458 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -42,6 +42,7 @@ enum mlx5_ib_uapi_flow_action_flags {
enum mlx5_ib_uapi_flow_table_type {
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0,
MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2,
};
enum mlx5_ib_uapi_flow_action_packet_reformat_type {
@@ -56,5 +57,11 @@ struct mlx5_ib_uapi_devx_async_cmd_hdr {
__u8 out_data[];
};
+enum mlx5_ib_uapi_dm_type {
+ MLX5_IB_UAPI_DM_TYPE_MEMIC,
+ MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM,
+ MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM,
+};
+
#endif
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index 5cc592728071..42a8bdc40a14 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -49,17 +49,6 @@ enum {
RDMA_NL_IWPM_NUM_OPS
};
-struct rdma_cm_id_stats {
- __u32 qp_num;
- __u32 bound_dev_if;
- __u32 port_space;
- __s32 pid;
- __u8 cm_state;
- __u8 node_type;
- __u8 port_num;
- __u8 qp_type;
-};
-
enum {
IWPM_NLA_REG_PID_UNSPEC = 0,
IWPM_NLA_REG_PID_SEQ,
@@ -261,7 +250,10 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_PORT_GET, /* can dump */
- /* 6 - 8 are free to use */
+ RDMA_NLDEV_CMD_SYS_GET, /* can dump */
+ RDMA_NLDEV_CMD_SYS_SET,
+
+ /* 8 is free to use */
RDMA_NLDEV_CMD_RES_GET = 9, /* can dump */
@@ -473,6 +465,21 @@ enum rdma_nldev_attr {
RDMA_NLDEV_ATTR_LINK_TYPE, /* string */
/*
+ * net namespace mode for rdma subsystem:
+ * either shared or exclusive among multiple net namespaces.
+ */
+ RDMA_NLDEV_SYS_ATTR_NETNS_MODE, /* u8 */
+ /*
+ * Device protocol, e.g. ib, iw, usnic, roce and opa
+ */
+ RDMA_NLDEV_ATTR_DEV_PROTOCOL, /* string */
+
+ /*
+ * File descriptor handle of the net namespace object
+ */
+ RDMA_NLDEV_NET_NS_FD, /* u32 */
+
+ /*
* Always the end
*/
RDMA_NLDEV_ATTR_MAX
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index 06c34d99be85..26213f49f5c8 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -102,6 +102,7 @@ enum rdma_driver_id {
RDMA_DRIVER_RXE,
RDMA_DRIVER_HFI1,
RDMA_DRIVER_QIB,
+ RDMA_DRIVER_EFA,
};
#endif
diff --git a/include/uapi/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h
index b7e0a5ed40de..a81c53508cc6 100644
--- a/include/uapi/scsi/fc/fc_els.h
+++ b/include/uapi/scsi/fc/fc_els.h
@@ -52,6 +52,7 @@ enum fc_els_cmd {
ELS_RRQ = 0x12, /* reinstate recovery qualifier */
ELS_REC = 0x13, /* read exchange concise */
ELS_SRR = 0x14, /* sequence retransmission request */
+ ELS_FPIN = 0x16, /* Fabric Performance Impact Notification */
ELS_PRLI = 0x20, /* process login */
ELS_PRLO = 0x21, /* process logout */
ELS_SCN = 0x22, /* state change notification */
@@ -119,6 +120,7 @@ enum fc_els_cmd {
[ELS_RRQ] = "RRQ", \
[ELS_REC] = "REC", \
[ELS_SRR] = "SRR", \
+ [ELS_FPIN] = "FPIN", \
[ELS_PRLI] = "PRLI", \
[ELS_PRLO] = "PRLO", \
[ELS_SCN] = "SCN", \
@@ -829,4 +831,35 @@ enum fc_els_clid_ic {
ELS_CLID_IC_LIP = 8, /* receiving LIP */
};
+
+/*
+ * Fabric Notification Descriptor Tag values
+ */
+enum fc_fn_dtag {
+ ELS_FN_DTAG_LNK_INTEGRITY = 0x00020001, /* Link Integrity */
+ ELS_FN_DTAG_PEER_CONGEST = 0x00020003, /* Peer Congestion */
+ ELS_FN_DTAG_CONGESTION = 0x00020004, /* Congestion */
+};
+
+/*
+ * Fabric Notification Descriptor
+ */
+struct fc_fn_desc {
+ __be32 fn_desc_tag; /* Notification Descriptor Tag */
+ __be32 fn_desc_value_len; /* Length of Descriptor Value field
+ * (in bytes)
+ */
+ __u8 fn_desc_value[0]; /* Descriptor Value */
+};
+
+/*
+ * ELS_FPIN - Fabric Performance Impact Notification
+ */
+struct fc_els_fpin {
+ __u8 fpin_cmd; /* command (0x16) */
+ __u8 fpin_zero[3]; /* specified as zero - part of cmd */
+ __be32 fpin_desc_cnt; /* count of descriptors */
+ struct fc_fn_desc fpin_desc[0]; /* Descriptor list */
+};
+
#endif /* _FC_ELS_H_ */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 404d4b9ffe76..df1153cea0b7 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -32,6 +32,7 @@
#ifndef __KERNEL__
#include <stdlib.h>
+#include <time.h>
#endif
/*
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
new file mode 100644
index 000000000000..37e0a90dc9e6
--- /dev/null
+++ b/include/uapi/sound/sof/abi.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+/**
+ * SOF ABI versioning is based on Semantic Versioning where we have a given
+ * MAJOR.MINOR.PATCH version number. See https://semver.org/
+ *
+ * Rules for incrementing or changing version :-
+ *
+ * 1) Increment MAJOR version if you make incompatible API changes. MINOR and
+ * PATCH should be reset to 0.
+ *
+ * 2) Increment MINOR version if you add backwards compatible features or
+ * changes. PATCH should be reset to 0.
+ *
+ * 3) Increment PATCH version if you add backwards compatible bug fixes.
+ */
+
+#ifndef __INCLUDE_UAPI_SOUND_SOF_ABI_H__
+#define __INCLUDE_UAPI_SOUND_SOF_ABI_H__
+
+/* SOF ABI version major, minor and patch numbers */
+#define SOF_ABI_MAJOR 3
+#define SOF_ABI_MINOR 4
+#define SOF_ABI_PATCH 0
+
+/* SOF ABI version number. Format within 32bit word is MMmmmppp */
+#define SOF_ABI_MAJOR_SHIFT 24
+#define SOF_ABI_MAJOR_MASK 0xff
+#define SOF_ABI_MINOR_SHIFT 12
+#define SOF_ABI_MINOR_MASK 0xfff
+#define SOF_ABI_PATCH_SHIFT 0
+#define SOF_ABI_PATCH_MASK 0xfff
+
+#define SOF_ABI_VER(major, minor, patch) \
+ (((major) << SOF_ABI_MAJOR_SHIFT) | \
+ ((minor) << SOF_ABI_MINOR_SHIFT) | \
+ ((patch) << SOF_ABI_PATCH_SHIFT))
+
+#define SOF_ABI_VERSION_MAJOR(version) \
+ (((version) >> SOF_ABI_MAJOR_SHIFT) & SOF_ABI_MAJOR_MASK)
+#define SOF_ABI_VERSION_MINOR(version) \
+ (((version) >> SOF_ABI_MINOR_SHIFT) & SOF_ABI_MINOR_MASK)
+#define SOF_ABI_VERSION_PATCH(version) \
+ (((version) >> SOF_ABI_PATCH_SHIFT) & SOF_ABI_PATCH_MASK)
+
+#define SOF_ABI_VERSION_INCOMPATIBLE(sof_ver, client_ver) \
+ (SOF_ABI_VERSION_MAJOR((sof_ver)) != \
+ SOF_ABI_VERSION_MAJOR((client_ver)) \
+ )
+
+#define SOF_ABI_VERSION SOF_ABI_VER(SOF_ABI_MAJOR, SOF_ABI_MINOR, SOF_ABI_PATCH)
+
+/* SOF ABI magic number "SOF\0". */
+#define SOF_ABI_MAGIC 0x00464F53
+
+#endif
diff --git a/include/uapi/sound/sof/eq.h b/include/uapi/sound/sof/eq.h
new file mode 100644
index 000000000000..666c2b6a3229
--- /dev/null
+++ b/include/uapi/sound/sof/eq.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_EQ_H__
+#define __INCLUDE_UAPI_SOUND_SOF_USER_EQ_H__
+
+/* FIR EQ type */
+
+#define SOF_EQ_FIR_IDX_SWITCH 0
+
+#define SOF_EQ_FIR_MAX_SIZE 4096 /* Max size allowed for coef data in bytes */
+
+#define SOF_EQ_FIR_MAX_LENGTH 192 /* Max length for individual filter */
+
+#define SOF_EQ_FIR_MAX_RESPONSES 8 /* A blob can define max 8 FIR EQs */
+
+/*
+ * eq_fir_configuration data structure contains this information
+ * uint32_t size
+ * This is the number of bytes need to store the received EQ
+ * configuration.
+ * uint16_t channels_in_config
+ * This describes the number of channels in this EQ config data. It
+ * can be different from PLATFORM_MAX_CHANNELS.
+ * uint16_t number_of_responses
+ * 0=no responses, 1=one response defined, 2=two responses defined, etc.
+ * int16_t data[]
+ * assign_response[channels_in_config]
+ * 0 = use first response, 1 = use 2nd response, etc.
+ * E.g. {0, 0, 0, 0, 1, 1, 1, 1} would apply to channels 0-3 the
+ * same first defined response and for to channels 4-7 the second.
+ * coef_data[]
+ * Repeated data
+ * { filter_length, output_shift, h[] }
+ * for every EQ response defined where vector h has filter_length
+ * number of coefficients. Coefficients in h[] are in Q1.15 format.
+ * E.g. 16384 (Q1.15) = 0.5. The shifts are number of right shifts.
+ *
+ * NOTE: The channels_in_config must be even to have coef_data aligned to
+ * 32 bit word in RAM. Therefore a mono EQ assign must be duplicated to 2ch
+ * even if it would never used. Similarly a 5ch EQ assign must be increased
+ * to 6ch. EQ init will return an error if this is not met.
+ *
+ * NOTE: The filter_length must be multiple of four. Therefore the filter must
+ * be padded from the end with zeros have this condition met.
+ */
+
+struct sof_eq_fir_config {
+ uint32_t size;
+ uint16_t channels_in_config;
+ uint16_t number_of_responses;
+
+ /* reserved */
+ uint32_t reserved[4];
+
+ int16_t data[];
+} __packed;
+
+struct sof_eq_fir_coef_data {
+ int16_t length; /* Number of FIR taps */
+ int16_t out_shift; /* Amount of right shifts at output */
+
+ /* reserved */
+ uint32_t reserved[4];
+
+ int16_t coef[]; /* FIR coefficients */
+} __packed;
+
+/* In the struct above there's two 16 bit words (length, shift) and four
+ * reserved 32 bit words before the actual FIR coefficients. This information
+ * is used in parsing of the configuration blob.
+ */
+#define SOF_EQ_FIR_COEF_NHEADER \
+ (sizeof(struct sof_eq_fir_coef_data) / sizeof(int16_t))
+
+/* IIR EQ type */
+
+#define SOF_EQ_IIR_IDX_SWITCH 0
+
+#define SOF_EQ_IIR_MAX_SIZE 1024 /* Max size allowed for coef data in bytes */
+
+#define SOF_EQ_IIR_MAX_RESPONSES 8 /* A blob can define max 8 IIR EQs */
+
+/* eq_iir_configuration
+ * uint32_t channels_in_config
+ * This describes the number of channels in this EQ config data. It
+ * can be different from PLATFORM_MAX_CHANNELS.
+ * uint32_t number_of_responses_defined
+ * 0=no responses, 1=one response defined, 2=two responses defined, etc.
+ * int32_t data[]
+ * Data consist of two parts. First is the response assign vector that
+ * has length of channels_in_config. The latter part is coefficient
+ * data.
+ * uint32_t assign_response[channels_in_config]
+ * -1 = not defined, 0 = use first response, 1 = use 2nd, etc.
+ * E.g. {0, 0, 0, 0, -1, -1, -1, -1} would apply to channels 0-3 the
+ * same first defined response and leave channels 4-7 unequalized.
+ * coefficient_data[]
+ * <1st EQ>
+ * uint32_t num_biquads
+ * uint32_t num_biquads_in_series
+ * <1st biquad>
+ * int32_t coef_a2 Q2.30 format
+ * int32_t coef_a1 Q2.30 format
+ * int32_t coef_b2 Q2.30 format
+ * int32_t coef_b1 Q2.30 format
+ * int32_t coef_b0 Q2.30 format
+ * int32_t output_shift number of shifts right, shift left is negative
+ * int32_t output_gain Q2.14 format
+ * <2nd biquad>
+ * ...
+ * <2nd EQ>
+ *
+ * Note: A flat response biquad can be made with a section set to
+ * b0 = 1.0, gain = 1.0, and other parameters set to 0
+ * {0, 0, 0, 0, 1073741824, 0, 16484}
+ */
+
+struct sof_eq_iir_config {
+ uint32_t size;
+ uint32_t channels_in_config;
+ uint32_t number_of_responses;
+
+ /* reserved */
+ uint32_t reserved[4];
+
+ int32_t data[]; /* eq_assign[channels], eq 0, eq 1, ... */
+} __packed;
+
+struct sof_eq_iir_header_df2t {
+ uint32_t num_sections;
+ uint32_t num_sections_in_series;
+
+ /* reserved */
+ uint32_t reserved[4];
+
+ int32_t biquads[]; /* Repeated biquad coefficients */
+} __packed;
+
+struct sof_eq_iir_biquad_df2t {
+ int32_t a2; /* Q2.30 */
+ int32_t a1; /* Q2.30 */
+ int32_t b2; /* Q2.30 */
+ int32_t b1; /* Q2.30 */
+ int32_t b0; /* Q2.30 */
+ int32_t output_shift; /* Number of right shifts */
+ int32_t output_gain; /* Q2.14 */
+} __packed;
+
+/* A full 22th order equalizer with 11 biquads cover octave bands 1-11 in
+ * in the 0 - 20 kHz bandwidth.
+ */
+#define SOF_EQ_IIR_DF2T_BIQUADS_MAX 11
+
+/* The number of int32_t words in sof_eq_iir_header_df2t:
+ * num_sections, num_sections_in_series, reserved[4]
+ */
+#define SOF_EQ_IIR_NHEADER_DF2T \
+ (sizeof(struct sof_eq_iir_header_df2t) / sizeof(int32_t))
+
+/* The number of int32_t words in sof_eq_iir_biquad_df2t:
+ * a2, a1, b2, b1, b0, output_shift, output_gain
+ */
+#define SOF_EQ_IIR_NBIQUAD_DF2T \
+ (sizeof(struct sof_eq_iir_biquad_df2t) / sizeof(int32_t))
+
+#endif
diff --git a/include/uapi/sound/sof/fw.h b/include/uapi/sound/sof/fw.h
new file mode 100644
index 000000000000..1afca973eb09
--- /dev/null
+++ b/include/uapi/sound/sof/fw.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+/*
+ * Firmware file format .
+ */
+
+#ifndef __INCLUDE_UAPI_SOF_FW_H__
+#define __INCLUDE_UAPI_SOF_FW_H__
+
+#define SND_SOF_FW_SIG_SIZE 4
+#define SND_SOF_FW_ABI 1
+#define SND_SOF_FW_SIG "Reef"
+
+/*
+ * Firmware module is made up of 1 . N blocks of different types. The
+ * Block header is used to determine where and how block is to be copied in the
+ * DSP/host memory space.
+ */
+enum snd_sof_fw_blk_type {
+ SOF_FW_BLK_TYPE_INVALID = -1,
+ SOF_FW_BLK_TYPE_START = 0,
+ SOF_FW_BLK_TYPE_RSRVD0 = SOF_FW_BLK_TYPE_START,
+ SOF_FW_BLK_TYPE_IRAM = 1, /* local instruction RAM */
+ SOF_FW_BLK_TYPE_DRAM = 2, /* local data RAM */
+ SOF_FW_BLK_TYPE_SRAM = 3, /* system RAM */
+ SOF_FW_BLK_TYPE_ROM = 4,
+ SOF_FW_BLK_TYPE_IMR = 5,
+ SOF_FW_BLK_TYPE_RSRVD6 = 6,
+ SOF_FW_BLK_TYPE_RSRVD7 = 7,
+ SOF_FW_BLK_TYPE_RSRVD8 = 8,
+ SOF_FW_BLK_TYPE_RSRVD9 = 9,
+ SOF_FW_BLK_TYPE_RSRVD10 = 10,
+ SOF_FW_BLK_TYPE_RSRVD11 = 11,
+ SOF_FW_BLK_TYPE_RSRVD12 = 12,
+ SOF_FW_BLK_TYPE_RSRVD13 = 13,
+ SOF_FW_BLK_TYPE_RSRVD14 = 14,
+ /* use SOF_FW_BLK_TYPE_RSVRDX for new block types */
+ SOF_FW_BLK_TYPE_NUM
+};
+
+struct snd_sof_blk_hdr {
+ enum snd_sof_fw_blk_type type;
+ uint32_t size; /* bytes minus this header */
+ uint32_t offset; /* offset from base */
+} __packed;
+
+/*
+ * Firmware file is made up of 1 .. N different modules types. The module
+ * type is used to determine how to load and parse the module.
+ */
+enum snd_sof_fw_mod_type {
+ SOF_FW_BASE = 0, /* base firmware image */
+ SOF_FW_MODULE = 1, /* firmware module */
+};
+
+struct snd_sof_mod_hdr {
+ enum snd_sof_fw_mod_type type;
+ uint32_t size; /* bytes minus this header */
+ uint32_t num_blocks; /* number of blocks */
+} __packed;
+
+/*
+ * Firmware file header.
+ */
+struct snd_sof_fw_header {
+ unsigned char sig[SND_SOF_FW_SIG_SIZE]; /* "Reef" */
+ uint32_t file_size; /* size of file minus this header */
+ uint32_t num_modules; /* number of modules */
+ uint32_t abi; /* version of header format */
+} __packed;
+
+#endif
diff --git a/include/uapi/sound/sof/header.h b/include/uapi/sound/sof/header.h
new file mode 100644
index 000000000000..7868990b0d6f
--- /dev/null
+++ b/include/uapi/sound/sof/header.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__
+#define __INCLUDE_UAPI_SOUND_SOF_USER_HEADER_H__
+
+/*
+ * Header for all non IPC ABI data.
+ *
+ * Identifies data type, size and ABI.
+ * Used by any bespoke component data structures or binary blobs.
+ */
+struct sof_abi_hdr {
+ uint32_t magic; /**< 'S', 'O', 'F', '\0' */
+ uint32_t type; /**< component specific type */
+ uint32_t size; /**< size in bytes of data excl. this struct */
+ uint32_t abi; /**< SOF ABI version */
+ uint32_t reserved[4]; /**< reserved for future use */
+ uint32_t data[0]; /**< Component data - opaque to core */
+} __packed;
+
+#endif
diff --git a/include/uapi/sound/sof/manifest.h b/include/uapi/sound/sof/manifest.h
new file mode 100644
index 000000000000..2009ee30fad0
--- /dev/null
+++ b/include/uapi/sound/sof/manifest.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_MANIFEST_H__
+#define __INCLUDE_UAPI_SOUND_SOF_USER_MANIFEST_H__
+
+/* start offset for base FW module */
+#define SOF_MAN_ELF_TEXT_OFFSET 0x2000
+
+/* FW Extended Manifest Header id = $AE1 */
+#define SOF_MAN_EXT_HEADER_MAGIC 0x31454124
+
+/* module type load type */
+#define SOF_MAN_MOD_TYPE_BUILTIN 0
+#define SOF_MAN_MOD_TYPE_MODULE 1
+
+struct sof_man_module_type {
+ uint32_t load_type:4; /* SOF_MAN_MOD_TYPE_ */
+ uint32_t auto_start:1;
+ uint32_t domain_ll:1;
+ uint32_t domain_dp:1;
+ uint32_t rsvd_:25;
+};
+
+/* segment flags.type */
+#define SOF_MAN_SEGMENT_TEXT 0
+#define SOF_MAN_SEGMENT_RODATA 1
+#define SOF_MAN_SEGMENT_DATA 1
+#define SOF_MAN_SEGMENT_BSS 2
+#define SOF_MAN_SEGMENT_EMPTY 15
+
+union sof_man_segment_flags {
+ uint32_t ul;
+ struct {
+ uint32_t contents:1;
+ uint32_t alloc:1;
+ uint32_t load:1;
+ uint32_t readonly:1;
+ uint32_t code:1;
+ uint32_t data:1;
+ uint32_t _rsvd0:2;
+ uint32_t type:4; /* MAN_SEGMENT_ */
+ uint32_t _rsvd1:4;
+ uint32_t length:16; /* of segment in pages */
+ } r;
+} __packed;
+
+/*
+ * Module segment descriptor. Used by ROM - Immutable.
+ */
+struct sof_man_segment_desc {
+ union sof_man_segment_flags flags;
+ uint32_t v_base_addr;
+ uint32_t file_offset;
+} __packed;
+
+/*
+ * The firmware binary can be split into several modules.
+ */
+
+#define SOF_MAN_MOD_ID_LEN 4
+#define SOF_MAN_MOD_NAME_LEN 8
+#define SOF_MAN_MOD_SHA256_LEN 32
+#define SOF_MAN_MOD_ID {'$', 'A', 'M', 'E'}
+
+/*
+ * Each module has an entry in the FW header. Used by ROM - Immutable.
+ */
+struct sof_man_module {
+ uint8_t struct_id[SOF_MAN_MOD_ID_LEN]; /* SOF_MAN_MOD_ID */
+ uint8_t name[SOF_MAN_MOD_NAME_LEN];
+ uint8_t uuid[16];
+ struct sof_man_module_type type;
+ uint8_t hash[SOF_MAN_MOD_SHA256_LEN];
+ uint32_t entry_point;
+ uint16_t cfg_offset;
+ uint16_t cfg_count;
+ uint32_t affinity_mask;
+ uint16_t instance_max_count; /* max number of instances */
+ uint16_t instance_bss_size; /* instance (pages) */
+ struct sof_man_segment_desc segment[3];
+} __packed;
+
+/*
+ * Each module has a configuration in the FW header. Used by ROM - Immutable.
+ */
+struct sof_man_mod_config {
+ uint32_t par[4]; /* module parameters */
+ uint32_t is_pages; /* actual size of instance .bss (pages) */
+ uint32_t cps; /* cycles per second */
+ uint32_t ibs; /* input buffer size (bytes) */
+ uint32_t obs; /* output buffer size (bytes) */
+ uint32_t module_flags; /* flags, reserved for future use */
+ uint32_t cpc; /* cycles per single run */
+ uint32_t obls; /* output block size, reserved for future use */
+} __packed;
+
+/*
+ * FW Manifest Header
+ */
+
+#define SOF_MAN_FW_HDR_FW_NAME_LEN 8
+#define SOF_MAN_FW_HDR_ID {'$', 'A', 'M', '1'}
+#define SOF_MAN_FW_HDR_NAME "ADSPFW"
+#define SOF_MAN_FW_HDR_FLAGS 0x0
+#define SOF_MAN_FW_HDR_FEATURES 0xff
+
+/*
+ * The firmware has a standard header that is checked by the ROM on firmware
+ * loading. preload_page_count is used by DMA code loader and is entire
+ * image size on CNL. i.e. CNL: total size of the binary’s .text and .rodata
+ * Used by ROM - Immutable.
+ */
+struct sof_man_fw_header {
+ uint8_t header_id[4];
+ uint32_t header_len;
+ uint8_t name[SOF_MAN_FW_HDR_FW_NAME_LEN];
+ /* number of pages of preloaded image loaded by driver */
+ uint32_t preload_page_count;
+ uint32_t fw_image_flags;
+ uint32_t feature_mask;
+ uint16_t major_version;
+ uint16_t minor_version;
+ uint16_t hotfix_version;
+ uint16_t build_version;
+ uint32_t num_module_entries;
+ uint32_t hw_buf_base_addr;
+ uint32_t hw_buf_length;
+ /* target address for binary loading as offset in IMR - must be == base offset */
+ uint32_t load_offset;
+} __packed;
+
+/*
+ * Firmware manifest descriptor. This can contain N modules and N module
+ * configs. Used by ROM - Immutable.
+ */
+struct sof_man_fw_desc {
+ struct sof_man_fw_header header;
+
+ /* Warning - hack for module arrays. For some unknown reason the we
+ * have a variable size array of struct man_module followed by a
+ * variable size array of struct mod_config. These should have been
+ * merged into a variable array of a parent structure. We have to hack
+ * around this in many places....
+ *
+ * struct sof_man_module man_module[];
+ * struct sof_man_mod_config mod_config[];
+ */
+
+} __packed;
+
+/*
+ * Component Descriptor. Used by ROM - Immutable.
+ */
+struct sof_man_component_desc {
+ uint32_t reserved[2]; /* all 0 */
+ uint32_t version;
+ uint8_t hash[SOF_MAN_MOD_SHA256_LEN];
+ uint32_t base_offset;
+ uint32_t limit_offset;
+ uint32_t attributes[4];
+} __packed;
+
+/*
+ * Audio DSP extended metadata. Used by ROM - Immutable.
+ */
+struct sof_man_adsp_meta_file_ext {
+ uint32_t ext_type; /* always 17 for ADSP extension */
+ uint32_t ext_len;
+ uint32_t imr_type;
+ uint8_t reserved[16]; /* all 0 */
+ struct sof_man_component_desc comp_desc[1];
+} __packed;
+
+/*
+ * Module Manifest for rimage module metadata. Not used by ROM.
+ */
+struct sof_man_module_manifest {
+ struct sof_man_module module;
+ uint32_t text_size;
+} __packed;
+
+#endif
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
new file mode 100644
index 000000000000..53ea94bf1c08
--- /dev/null
+++ b/include/uapi/sound/sof/tokens.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
+ * Keyon Jie <yang.jie@linux.intel.com>
+ */
+
+/*
+ * Topology IDs and tokens.
+ *
+ * ** MUST BE ALIGNED WITH TOPOLOGY CONFIGURATION TOKEN VALUES **
+ */
+
+#ifndef __INCLUDE_UAPI_SOF_TOPOLOGY_H__
+#define __INCLUDE_UAPI_SOF_TOPOLOGY_H__
+
+/*
+ * Kcontrol IDs
+ */
+#define SOF_TPLG_KCTL_VOL_ID 256
+#define SOF_TPLG_KCTL_ENUM_ID 257
+#define SOF_TPLG_KCTL_BYTES_ID 258
+#define SOF_TPLG_KCTL_SWITCH_ID 259
+
+/*
+ * Tokens - must match values in topology configurations
+ */
+
+/* buffers */
+#define SOF_TKN_BUF_SIZE 100
+#define SOF_TKN_BUF_CAPS 101
+
+/* DAI */
+/* Token retired with ABI 3.2, do not use for new capabilities
+ * #define SOF_TKN_DAI_DMAC_CONFIG 153
+ */
+#define SOF_TKN_DAI_TYPE 154
+#define SOF_TKN_DAI_INDEX 155
+#define SOF_TKN_DAI_DIRECTION 156
+
+/* scheduling */
+#define SOF_TKN_SCHED_PERIOD 200
+#define SOF_TKN_SCHED_PRIORITY 201
+#define SOF_TKN_SCHED_MIPS 202
+#define SOF_TKN_SCHED_CORE 203
+#define SOF_TKN_SCHED_FRAMES 204
+#define SOF_TKN_SCHED_TIME_DOMAIN 205
+
+/* volume */
+#define SOF_TKN_VOLUME_RAMP_STEP_TYPE 250
+#define SOF_TKN_VOLUME_RAMP_STEP_MS 251
+
+/* SRC */
+#define SOF_TKN_SRC_RATE_IN 300
+#define SOF_TKN_SRC_RATE_OUT 301
+
+/* PCM */
+#define SOF_TKN_PCM_DMAC_CONFIG 353
+
+/* Generic components */
+#define SOF_TKN_COMP_PERIOD_SINK_COUNT 400
+#define SOF_TKN_COMP_PERIOD_SOURCE_COUNT 401
+#define SOF_TKN_COMP_FORMAT 402
+/* Token retired with ABI 3.2, do not use for new capabilities
+ * #define SOF_TKN_COMP_PRELOAD_COUNT 403
+ */
+
+/* SSP */
+#define SOF_TKN_INTEL_SSP_CLKS_CONTROL 500
+#define SOF_TKN_INTEL_SSP_MCLK_ID 501
+#define SOF_TKN_INTEL_SSP_SAMPLE_BITS 502
+#define SOF_TKN_INTEL_SSP_FRAME_PULSE_WIDTH 503
+#define SOF_TKN_INTEL_SSP_QUIRKS 504
+#define SOF_TKN_INTEL_SSP_TDM_PADDING_PER_SLOT 505
+
+/* DMIC */
+#define SOF_TKN_INTEL_DMIC_DRIVER_VERSION 600
+#define SOF_TKN_INTEL_DMIC_CLK_MIN 601
+#define SOF_TKN_INTEL_DMIC_CLK_MAX 602
+#define SOF_TKN_INTEL_DMIC_DUTY_MIN 603
+#define SOF_TKN_INTEL_DMIC_DUTY_MAX 604
+#define SOF_TKN_INTEL_DMIC_NUM_PDM_ACTIVE 605
+#define SOF_TKN_INTEL_DMIC_SAMPLE_RATE 608
+#define SOF_TKN_INTEL_DMIC_FIFO_WORD_LENGTH 609
+
+/* DMIC PDM */
+#define SOF_TKN_INTEL_DMIC_PDM_CTRL_ID 700
+#define SOF_TKN_INTEL_DMIC_PDM_MIC_A_Enable 701
+#define SOF_TKN_INTEL_DMIC_PDM_MIC_B_Enable 702
+#define SOF_TKN_INTEL_DMIC_PDM_POLARITY_A 703
+#define SOF_TKN_INTEL_DMIC_PDM_POLARITY_B 704
+#define SOF_TKN_INTEL_DMIC_PDM_CLK_EDGE 705
+#define SOF_TKN_INTEL_DMIC_PDM_SKEW 706
+
+/* Tone */
+#define SOF_TKN_TONE_SAMPLE_RATE 800
+
+/* Processing Components */
+#define SOF_TKN_PROCESS_TYPE 900
+
+/* for backward compatibility */
+#define SOF_TKN_EFFECT_TYPE SOF_TKN_PROCESS_TYPE
+
+#endif
diff --git a/include/uapi/sound/sof/tone.h b/include/uapi/sound/sof/tone.h
new file mode 100644
index 000000000000..d7c6e5d8317e
--- /dev/null
+++ b/include/uapi/sound/sof/tone.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+* This file is provided under a dual BSD/GPLv2 license. When using or
+* redistributing this file, you may do so under either license.
+*
+* Copyright(c) 2018 Intel Corporation. All rights reserved.
+*/
+
+#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_TONE_H__
+#define __INCLUDE_UAPI_SOUND_SOF_USER_TONE_H__
+
+#define SOF_TONE_IDX_FREQUENCY 0
+#define SOF_TONE_IDX_AMPLITUDE 1
+#define SOF_TONE_IDX_FREQ_MULT 2
+#define SOF_TONE_IDX_AMPL_MULT 3
+#define SOF_TONE_IDX_LENGTH 4
+#define SOF_TONE_IDX_PERIOD 5
+#define SOF_TONE_IDX_REPEATS 6
+#define SOF_TONE_IDX_LIN_RAMP_STEP 7
+
+#endif
diff --git a/include/uapi/sound/sof/trace.h b/include/uapi/sound/sof/trace.h
new file mode 100644
index 000000000000..ffa7288a0f16
--- /dev/null
+++ b/include/uapi/sound/sof/trace.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INCLUDE_UAPI_SOUND_SOF_USER_TRACE_H__
+#define __INCLUDE_UAPI_SOUND_SOF_USER_TRACE_H__
+
+/*
+ * Host system time.
+ *
+ * This property is used by the driver to pass down information about
+ * current system time. It is expressed in us.
+ * FW translates timestamps (in log entries, probe pockets) to this time
+ * domain.
+ *
+ * (cavs: SystemTime).
+ */
+struct system_time {
+ uint32_t val_l; /* Lower dword of current host time value */
+ uint32_t val_u; /* Upper dword of current host time value */
+} __packed;
+
+#define LOG_ENABLE 1 /* Enable logging */
+#define LOG_DISABLE 0 /* Disable logging */
+
+#define LOG_LEVEL_CRITICAL 1 /* (FDK fatal) */
+#define LOG_LEVEL_VERBOSE 2
+
+/*
+ * Layout of a log fifo.
+ */
+struct log_buffer_layout {
+ uint32_t read_ptr; /*read pointer */
+ uint32_t write_ptr; /* write pointer */
+ uint32_t buffer[0]; /* buffer */
+} __packed;
+
+/*
+ * Log buffer status reported by FW.
+ */
+struct log_buffer_status {
+ uint32_t core_id; /* ID of core that logged to other half */
+} __packed;
+
+#define TRACE_ID_LENGTH 12
+
+/*
+ * Log entry header.
+ *
+ * The header is followed by an array of arguments (uint32_t[]).
+ * Number of arguments is specified by the params_num field of log_entry
+ */
+struct log_entry_header {
+ uint32_t id_0 : TRACE_ID_LENGTH; /* e.g. Pipeline ID */
+ uint32_t id_1 : TRACE_ID_LENGTH; /* e.g. Component ID */
+ uint32_t core_id : 8; /* Reporting core's id */
+
+ uint64_t timestamp; /* Timestamp (in dsp ticks) */
+ uint32_t log_entry_address; /* Address of log entry in ELF */
+} __packed;
+
+#endif
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index 7d09e54ae54e..58fb5732831a 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -48,6 +48,13 @@ struct dlfb_data {
int base8;
u32 pseudo_palette[256];
int blank_mode; /*one of FB_BLANK_ */
+ struct mutex render_mutex;
+ int damage_x;
+ int damage_y;
+ int damage_x2;
+ int damage_y2;
+ spinlock_t damage_lock;
+ struct work_struct damage_work;
struct fb_ops ops;
/* blit-only rendering path metrics, exposed through sysfs */
atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 19d032373de5..19a72f591e2b 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -43,8 +43,10 @@ extern struct hvm_start_info pvh_start_info;
#endif /* CONFIG_XEN_DOM0 */
struct bio_vec;
+struct page;
+
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- const struct bio_vec *vec2);
+ const struct page *page);
#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
extern u64 xen_saved_max_mem_size;